How to run code every x seconds inside while true - python - python

I need to execute code inside while loop every x seconds without stoping loop work
I have trying threading and lock combinations but it is still not working. I am working on python 3.7.4, pycharm 2019.2
#!/usr/bin/env python3
import configparser
import logging
import threading
import time
import ts3
__all__ = ["notify_bot"]
logging.basicConfig(filename='ts3bot.log',
level=logging.INFO,
format="%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s",
)
logging.getLogger().addHandler(logging.StreamHandler())
def notify_bot(ts3conn, config, lock):
logging.info("Start Notify Bot ...")
lock.acquire()
ts3conn.exec_("servernotifyregister", event="server")
lock.release()
while True:
event = ts3conn.wait_for_event()
try:
reasonid_ = event[0]["reasonid"]
except KeyError:
continue
if reasonid_ == "0":
logging.info("User joined Lobby:")
logging.info(event[0])
servergroups = event[0]['client_servergroups']
guestname = event[0]['client_nickname']
lock.acquire()
if not set(servergroups):
print(f"s1 {guestname}")
else:
print(f"s2{guestname}")
lock.release()
return None
def keep_alive(ts3conn, lock):
while True:
logging.info("Send keep alive!")
lock.acquire()
ts3conn.send_keepalive()
lock.release()
time.sleep(5)
if __name__ == "__main__":
logging.info("Start TS Bot ...")
config = configparser.ConfigParser()
config.sections()
config.read("settings_test.ini")
logging.info("Config loaded!")
HOST = config['server']['url']
PORT = config['server']['query_port']
USER = config['server']['query_user']
PASS = config['server']['query_pw']
SID = config['server']['sid']
NAME = config['bot']['name']
logging.info("Connecting to query interface ...")
URI = f"telnet://{USER}:{PASS}#{HOST}:{PORT}"
try:
with ts3.query.TS3ServerConnection(URI) as ts3conn:
ts3conn.exec_("use", sid=SID)
ts3conn.query("clientupdate", client_nickname="x123d")
logging.info("Connected!")
lock = threading.Lock()
notify_thread = threading.Thread(target=notify_bot, args=(ts3conn, config, lock), daemon=True,
name="notify")
keep_alive_thread = threading.Thread(target=keep_alive, args=(ts3conn, lock), daemon=True,
name="keep_alive")
notify_thread.start()
keep_alive_thread.start()
keep_alive_thread.join()
notify_thread.join()
except KeyboardInterrupt:
logging.INFO(60 * "=")
logging.info("TS Bot terminated by user!")
logging.INFO(60 * "=")
After run work for 1 person who join server and do nothing, dont send keep alive and dont work at all

you can use Bibio TIME
You can check it from official python website (https://docs.python.org/3/library/time.html)

Personally, for simple things, I find the _thread library easier. Here's a function that you can run in a thread, and an example of starting that thread:
import _thread
def mythread(arg1):
while True:
time.sleep(arg1)
do.whatever()
_thread.start_new_thread(mythread, (5,))
The important thing to note is the second argument I passed to the _thread.start_new_thread function. It must be a tuple, which is why there is a comma after the 5. Even if your function doesn't require any arguments, you have to pass a tuple.

I am using time module and threading,
I'v made some changes and it seems to work
#!/usr/bin/env python3
import configparser
import logging
import threading
import time
import ts3
logging.basicConfig(filename='ts3bot.log',
level=logging.INFO,
format="%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s",
)
logging.getLogger().addHandler(logging.StreamHandler())
def notify_bot(ts3conn):
logging.info("Start Notify Bot ...")
ts3conn.exec_("servernotifyregister", event="server")
while True:
event = ts3conn.wait_for_event()
try:
reasonid_ = event[0]["reasonid"]
except KeyError:
continue
if reasonid_ == "0":
logging.info("User joined Lobby:")
logging.info(event[0])
servergroups = event[0]['client_servergroups']
guestname = event[0]['client_nickname']
if not set(servergroups):
print(f"s1 {guestname}")
else:
print(f"s2{guestname}")
return None
def keep_alive(ts3conn, time):
while True:
logging.info("Send keep alive!")
ts3conn.send_keepalive()
time.sleep(20)
if __name__ == "__main__":
logging.info("Start TS Bot ...")
config = configparser.ConfigParser()
config.sections()
config.read("settings_test.ini")
logging.info("Config loaded!")
HOST = config['server']['url']
PORT = config['server']['query_port']
USER = config['server']['query_user']
PASS = config['server']['query_pw']
SID = config['server']['sid']
NAME = config['bot']['name']
logging.info("Connecting to query interface ...")
URI = f"telnet://{USER}:{PASS}#{HOST}:{PORT}"
try:
with ts3.query.TS3ServerConnection(URI) as ts3conn:
ts3conn.exec_("use", sid=SID)
ts3conn.query("clientupdate", client_nickname="x123d")
logging.info("Connected!")
notify_thread = threading.Thread(target=notify_bot, args=(ts3conn,), daemon=True,
name="notify")
keep_alive_thread = threading.Thread(target=keep_alive, args=(ts3conn, time), daemon=True,
name="keep_alive")
notify_thread.start()
keep_alive_thread.start()
keep_alive_thread.join()
notify_thread.join()
except KeyboardInterrupt:
logging.INFO(60 * "=")
logging.info("TS Bot terminated by user!")
logging.INFO(60 * "=")
It looks like ts3conn.send_keepalive() making error, when I delete it, code work fine, when I'v add it, code stop working after send ts3conn.send_keepalive() once

Related

using select stdin for non blocking input python

I want to create a process that has 3 sub processes, 2 to handle websockets and one to get input from terminal to pass to the other two sockets.
import sys
import time
import select
import asyncio
import threading
import multiprocessing as mp
from multiprocessing import Queue
from multiprocessing import Process
def managment_api():
poller = select.poll()
poller.register(sys.stdin, select.POLLIN)
started = True
count = 0
while started:
print("management api [{:^6}]".format(count))
while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
_line = sys.stdin.readline()
if _line:
line = _line.strip().lower()
if(line == "exit" or line == "quit"):
started = False
print("got exit message [{}]".format(started))
else:
pass
count += 1
time.sleep(1)
def process_main(loop):
print("process_main BEGIN")
loop.run_in_executor(None, managment_api())
print("process_main ENG ")
if __name__ == "__main__":
#this doesn't work
main = Process(target=managment_api, args=())
main.name = "management api"
main.start()
main.join()
"""
#asyncio.run(managment_api()) #need to add async to management_api
When I make management_api an async function and call asyncio.run(management_api); I can get input.
If I try to run the same code without async in a separate process it, I get stuck in the while sys.stdin in selec... section of the code. I've tried with threads but that doesn't work either.
How can I run this code from a separate process, to get the input in another process?
I was able to solve the problem by first, using fn = sys.stdin.fileno() to get the main process file descriptor, passing that as an argument to the subprocess. Then using sys.stdin = os.fdopen(fn)
import sys
import time
import select
import asyncio
import threading
import multiprocessing as mp
from multiprocessing import Queue
from multiprocessing import Process
def managment_api(fn):
sys.stdin = os.fdopen(fn)
poller = select.poll()
poller.register(sys.stdin, select.POLLIN)
started = True
count = 0
while started:
print("management api [{:^6}]".format(count))
while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
_line = sys.stdin.readline()
if _line:
line = _line.strip().lower()
if(line == "exit" or line == "quit"):
started = False
print("got exit message [{}]".format(started))
else:
pass
count += 1
time.sleep(1)
def process_main(loop):
print("process_main BEGIN")
loop.run_in_executor(None, managment_api())
print("process_main ENG ")
if __name__ == "__main__":
fn = sys.stdin.fileno()
main = Process(target=managment_api, args=(fn, ))
main.name = "management api"
main.start()
main.join()
"""
#asyncio.run(managment_api()) #need to add async to management_api

Not receiving messages from Kafka Topic

I am receiving None when calling poll() in this program but I am getting the messages when running the kafka-console-consumer.bat from cmd, I can't figure out what exactly the problem.
The execution starts from main.py
from queue import Queue
from concurrent.futures import ThreadPoolExecutor
import time
import json
from kafka_message_consumer import KafkaMessageConsumer
from kafka_discovery_executor import KafkaDiscoveryExecutor
with open('kafka_properties.json') as f:
kafka_properties = json.loads(f.read())
message_queue = Queue()
kafka_message_consumer = KafkaMessageConsumer(kafka_properties, message_queue)
kafka_discovery_executor = KafkaDiscoveryExecutor(message_queue, kafka_properties)
with ThreadPoolExecutor(max_workers=5) as executor:
executor.submit(kafka_message_consumer.run())
time.sleep(1)
executor.submit(kafka_discovery_executor.run())
time.sleep(1)
KafkaDiscoveryExecutor class is for consuming messages from shared queue and processing that messages.
This is kafka_message_consumer.py
import logging
from confluent_kafka import Consumer
class KafkaMessageConsumer:
def __init__(self, kafka_properties, message_queue):
self.message_queue = message_queue
self.logger = logging.getLogger('KafkaMessageConsumer')
self.kafka_stream_consumer = None
self.create_consumer(kafka_properties)
def create_consumer(self, kafka_properties):
"""
Create an instance of Kafka Consumer with the consumer configuration properties
and subscribes to the defined topic(s).
"""
consumer_config = dict()
# Consumer configuration properties.
consumer_config['bootstrap.servers'] = kafka_properties.get('bootstrap.servers')
consumer_config['group.id'] = kafka_properties.get('group.id')
consumer_config['enable.auto.commit'] = True
consumer_config['auto.offset.reset'] = 'earliest'
# For SSL Security
# consumer_config['security.protocol'] = 'SASL_SSL'
# consumer_config['sasl.mechanisms'] = 'PLAIN'
# consumer_config['sasl.username'] = ''
# consumer_config['sasl.password'] = ''
# Create the consumer using consumer_config.
self.kafka_stream_consumer = Consumer(consumer_config)
# Subscribe to the specified topic(s).
self.kafka_stream_consumer.subscribe(['mytopic'])
def run(self):
while True:
msg = self.kafka_stream_consumer.poll(1.0)
if msg is None:
# No message available within timeout.
print("Waiting for message or event/error in poll()")
continue
elif msg.error():
print("Error: {}".format(msg.error()))
else:
# Consume the record.
# Push the message into message_queue
try:
self.message_queue.put(msg)
except Exception as e:
self.logger.critical("Error occured in kafka Consumer: {}".format(e))
The specified topic has events but I am getting None here and the print statement inside 'if msg is None:' is executing.
I am still not sure as to why the above code is not working as it should.
Here's what changes I made to make this code work
I used threading module instead of concurrent.futures
used daemon thread
make a call to thread.init() inside the constructor of the classes [KafkaMessageConsumer, KafkaDiscoveryExecutor]
Here's main.py
from queue import Queue
import threading
import time
import json
from kafka_message_consumer import KafkaMessageConsumer
from kafka_discovery_executor import KafkaDiscoveryExecutor
def main():
with open('kafka_properties.json') as f:
kafka_properties = json.loads(f.read())
message_queue = Queue()
threads = [
KafkaMessageConsumer(kafka_properties, message_queue),
KafkaDiscoveryExecutor(message_queue, kafka_properties)
]
for thread in threads:
thread.start()
time.sleep(1)
for thread in threads:
thread.join()
time.sleep(1)
if __name__ == "__main__":
main()
and kafka_message_consumer.py
import logging
from confluent_kafka import Consumer
import threading
class KafkaMessageConsumer(threading.Thread):
daemon = True
def __init__(self, kafka_properties, message_queue):
threading.Thread.__init__(self)
self.message_queue = message_queue
self.logger = logging.getLogger('KafkaMessageConsumer')
self.kafka_stream_consumer = None
self.create_consumer(kafka_properties)
def create_consumer(self, kafka_properties):
"""
Create an instance of Kafka Consumer with the consumer configuration properties
and subscribes to the defined topic(s).
"""
consumer_config = dict()
# Consumer configuration properties.
consumer_config['bootstrap.servers'] = kafka_properties.get('bootstrap.servers')
consumer_config['group.id'] = kafka_properties.get('group.id')
consumer_config['enable.auto.commit'] = True
consumer_config['auto.offset.reset'] = 'earliest'
# Create the consumer using consumer_config.
self.kafka_stream_consumer = Consumer(consumer_config)
# Subscribe to the specified topic(s).
self.kafka_stream_consumer.subscribe(['mytopic'])
def run(self):
while True:
msg = self.kafka_stream_consumer.poll(1.0)
if msg is None:
# No message available within timeout.
print("Waiting for message or event/error in poll()")
continue
elif msg.error():
print("Error: {}".format(msg.error()))
else:
# Consume the record.
# Push the message into message_queue
try:
self.message_queue.put(msg)
except Exception as e:
self.logger.critical("Error occured in kafka Consumer: {}".format(e))
self.kafka_stream_consumer.close()

TimeoutError when two computers communicated

I am using Python3 to learn distributed programming
there are two python fileļ¼Œone's name is main.py, it distributes information, the other one manipulation data, and the name is worker.py.
everything goes well when I run this two file in one computer[set server address = 127.0.0.1, port = 5000]
but when i run these two files in seperate computers, they cannot connect to each other, and TimeoutError was encoutered.
I don't know why. one computer is Win10 at my home, the other is a linux cloud server which I baught.
the code works in one computer. but when I ran main.py in linux, and ran worker.py{change server to linux's ip address} in win10, then the worker.py encounter a TimeoutError
I know nothing about the linux, is there some security settings I need to open or close?
"""main.py"""
import queue
from multiprocessing.managers import BaseManager
import datetime
import time
TASK_QUEUE = queue.Queue()
RESULT_QUEUE = queue.Queue()
def get_task_queue():
"""set TASK_QUEUE as a function"""
global TASK_QUEUE
return TASK_QUEUE
def receive_result_queue():
"""set RESULT_QUEUE as a function"""
global RESULT_QUEUE
return RESULT_QUEUE
class QueueManager(BaseManager):
"""inherit BaseManager from multiprocessing.managers"""
pass
if __name__ == '__main__':
QueueManager.register('distribute_task_queue', callable=get_task_queue)
QueueManager.register('receive_result_queue', callable=receive_result_queue)
# bind port 5000, set verification code = 'abc'
MANAGER = QueueManager(address=('127.0.0.1', 5000), authkey=b'abc')
# start manager
MANAGER.start()
TASK = MANAGER.distribute_task_queue()
RESULT = MANAGER.receive_result_queue()
# put each line into manager`enter code here`
with open("C:/Users/dayia/Desktop/log.20170817") as f:
for line in f:
TASK.put(line)
# try receive result
while 1:
try:
r = RESULT.get(timeout=1)
if r[0] == r[1] and r[0] == "done":
break
else:
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),"line %s\'s length is %s" % (r[0], r[1]))
except queue.Empty:
print('result queue is empty.')
#
"""worker.py"""
import datetime
from multiprocessing.managers import BaseManager
import queue
import time
class QueueManager(BaseManager):
"""inherit BaseManager from multiprocessing.managers"""
pass
QueueManager.register('distribute_task_queue')
QueueManager.register('receive_result_queue')
server_addr = '127.0.0.1'
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 'Connect to server %s...' % server_addr)
m = QueueManager(address=(server_addr, 5000), authkey=b'abc')
m.connect()
TASK = m.distribute_task_queue()
RESULT = m.receive_result_queue()
def parse_line(line):
return len(line)
C = 0
while not TASK.empty():
try:
n = TASK.get(timeout=1)
r = parse_line(n)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), 'running line %s, length is %s' % (C+1, r))
C += 1
RESULT.put([r, C])
except queue.Empty:
print('task queue is empty.')
RESULT.put(["done", "done"])
enter code here
print('worker exit')
The address 127.0.0.1 very specifically refers to the same computer where the code is running (in network terms: 127.0.0.1 is the IP address of localhost) .

Imported module logging calls not showing up

I have been reading up on proper logging and so far I am liking how it is going. All was fine until I tried to do logging in a main file and a module I wrote. The main file is able to write to a file and the console but the imported module displays nothing in either. If i had to take a guess, I am assuming I would have to configure the modules output separately as I am using in code configs. Problem is I am not sure how or if that is even the reason. I have tried my best to google this instead of asking but here I am now. Here is the link to the source code. If you try to run it you may have to change the import as pycharm does not like it when I import a file directly. So from "from tests import speedtest" to "import speedtest" The files are main.py and speedtest.py
Main
import logging
from tests import speedtest
import time
# Logging configuration
logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# creates a handler to deal with writing to the file
file_handler = logging.FileHandler("log.txt", mode="w")
file_handler.setFormatter(logFormatter)
# handler for writing to the console
console_handler = logging.StreamHandler()
console_handler.setFormatter(logFormatter)
# adds the handlers to the root logger
logger.addHandler(file_handler)
logger.addHandler(console_handler)
# max speed provided
NOMINAL_SPEED = 50
# threshold in percentage 60% seems to be a decent amount to expect.
THRESHOLD = 60
# padding in percentage for severe warning
PAD = 10
# amount of time in between runs
INTERVAL = 300
class Main:
"""
Main running class
"""
def __init__(self):
self.speedtest = speedtest.SpeedTest(share=True)
self.threshold = THRESHOLD
self.pad = PAD
self.nominal = NOMINAL_SPEED
self.done = False
logger.debug("Starting main loop.")
while not self.done:
self.loop()
time.sleep(INTERVAL)
def loop(self):
try:
results = self.speedtest.run()
except Exception as e:
logger.error("Skipped running speed test this run. Will try again next time")
return
download = float(results["download"][:-7])
upload = float(results["upload"][:-7])
url = results["url"]
host = results["host"]
diff_download = (download / self.nominal) * 100
logger.debug("Current download is {} Mbps upload is {} Mbps. Share url: {} host: {}".format(download, upload, url, host))
if (((self.threshold - self.pad)/100) * self.nominal) <= diff_download <= ((self.threshold/100) * self.nominal):
logger.info("Speed is currently at {}% nominal.".format(diff_download))
self.warning()
elif diff_download <= ((self.threshold - self.pad)/100) * self.nominal:
logger.info("Speed is currently at {}% nominal. This is a problem.".format(diff_download))
self.critical()
def warning(self):
pass
def critical(self):
pass
if __name__ == "__main__":
Main()
speedtest
import subprocess
import logging
import os
class SpeedTest:
"""
Class to run speed test and return the results in an easy to use manner
"""
def __init__(self, share=False):
"""
Init method
:param share: When set to true it will also return a url to the speed test image
:return:
"""
self.logger = logging.getLogger(__name__)
self.logger.addHandler(logging.NullHandler())
self._share = share
if share is True:
self.logger.debug("Share flag set to True")
self.cmd = ["speedtest-cli", "--share"]
else:
self.logger.debug("Share not set to true. Ignoring share url")
self.cmd = ["speedtest-cli"]
def run(self):
"""
Runs the speed test returning a dict containing upload, download, ping, and share url if wanted.
:return:
"""
self.logger.debug("Starting speedtest!")
# check_output returns the output in bytes so we use decode() to turn it into a simple string. Then we split
# the lines giving us a list.
try:
stdout = subprocess.check_output(self.cmd).decode().splitlines()
except subprocess.CalledProcessError as e:
self.logger.error(e)
raise e
res = {}
for i in stdout:
if "Download:" in i:
res["download"] = i[10:]
if "Upload:" in i:
res["upload"] = i[8:]
if "Hosted" in i:
res["host"] = i[2:]
if self._share is True and "Share results:" in i:
res["url"] = i[15:]
else:
res["url"] = None
return res
def ping(self, addr):
"""
Pings an address and returns a 1 if the connection can not be made or a 0 if it succeeds
:param addr: IPv4 address
:return:
"""
try:
if os.name is "nt":
self.logger.debug("Windows OS detected")
self.logger.info("Pinging {}".format(addr))
subprocess.check_output(["ping", "-n", "1", addr])
elif os.name is "posix":
self.logger.debug("Nix OS detected")
subprocess.check_output(["ping", "-c", "1", addr])
except subprocess.CalledProcessError:
self.logger.warning("Returned non zero value. Is the internet working?")
return 1
return 0
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
for i in SpeedTest(share=True).run().items():
print(i)
print(SpeedTest().ping("8.8.8.0"))
In speedtest.py when you call:
logging.getLogger(__name__)
it will create a logger object for speedtest.py, so you will have to configure it separately. If you want it to be the same logger as in the main just add:
self.speedtest.logger = logger
after you create the SpeedTest object in Main's constructor
Another option for you is to pass __name__ as an argument to SpeedTest() and create the logger with that argument (I think this is a better option for you since you write to the logger in the constructor).

Python Threading Error when executing a MSSQL statement

I have a producer, consumer application where the producer reads a database and puts the results into a website. On success, The consumer then gets the id of the transaction and updates the db.
The program runs as required until it attempts to execute the update. It fails sometimes with this error 'HY000', 'The driver did not supply an error!'
The code can comfortably write to file without any issues.
What could i do to fix this? We need to update the db.
Thanks
Notes...using python 2.7 with pyodbc on mssql 2008.
code below
#!/usr/bin/env python
from urlparse import urlparse
from threading import Thread
import httplib, sys
import Queue
import urllib2
import urllib
from time import localtime, strftime
from ConfigParser import SafeConfigParser
from functions import Functions
from pyodbcclass import db_mssql
now = strftime("%y-%m-%d-%H%M%S", localtime())
k = db_mssql()
thread_list = []
thread_list2 = []
getFromDBQueue = Queue.Queue()
updateDBQueue = Queue.Queue()
number_of_consumer_threads = 3
def putURL():
querySql = "select distinct top 3 id,url from tblURL where processed=0 order by id asc"
re = k.query2(querySql)
if re:
for r in re:
id = r.id
params = urllib.urlencode({'user': user, 'password': password})
ourl = urlini + "?%s" % params
urlplusid = {'url':ourl.strip(),'id':id}
getFromDBQueue.put(urlplusid)
def getURL(thread_id):
while 1:
try:
URL_toget = getFromDBQueue.get(block=False)
url2 = URL_toget['url']
msgid2 = URL_toget['id']
except Queue.Empty:
print "thread exiting, id: " + str(thread_id) + "++getFromDB++"
sys.exit()
status,url = getStatus(url2)
if status == 200:
updateDBQueue.put(msgid2)
print(status)
def updateDB(thread_id):
while 1:
try:
id2 = updateDBQueue.get(block=False)
if id2:
params = ['true',id2]
sqlupdate = "UPDATE tblURL SET processed=? WHERE id=?"
k.execute3(sqlupdate,params)
except Queue.Empty:
print "thread exiting, id: " + str(thread_id) + "**update**"
sys.exit()
# fill the queue with work and block until we are done filling the queue
producer_thread = Thread(target=putURL)
producer_thread.start()
producer_thread.join()
# we can now start consumers
for i in range(number_of_consumer_threads):
getfromDB = Thread(target=getURL, args=(i,))
getfromDB.start()
thread_list.append(getfromDB)
for i in range(number_of_consumer_threads):
update = Thread(target=updateDB, args=(i,))
update.start()
thread_list2.append(update)
for thread in thread_list:
thread.join()
for thread2 in thread_list2:
thread2.join()

Categories

Resources