I'm trying to have Task Scheduler run an executable on windows startup. The executable is a simple python script that reads a list of IPs from a .txt, pings them, and repeats after a set interval, like a basic heartbeat.
The executable was created using pyinstaller successfully, and runs perfectly when used manually.
However, when I try and have task scheduler run the same executable in the same directory, it does so without reading the .txt file, and immediately closing.
The following is the code,
import os
import time
import smtplib
from email.message import EmailMessage #allows for the email system to work
from datetime import datetime #allows the text files to have the date & time
from win10toast import ToastNotifier #allows for desktop notificaitons to appear on windows devices
import schedule #automatically schedules when the script executes
# Scans the IPs in the txt file
def notif():
with open (r'sydvlan.txt') as file:
dump = file.read() #reads the lines of the sydvlan.txt file
dump = dump.splitlines()
#creates a new log file and makes the title the current date and time
cdString = datetime.now().strftime("%d_%m_%Y %H_%M")
report = open(r'HeartbeatResults_{0}.txt'.format(cdString), 'w') #creates a log with the date&time
for line in dump:
lineList = line.split(":")
lineText = lineList[0] #makes sure that only the IP is being read from sydvlan.txt
IP = lineList[1].strip()
print("Currently Pinging {} on {}".format(lineText,IP))
print("------------------"*3)
# Get Date and Time at time of ping.
currentDate = datetime.now()
cdString = currentDate.strftime("%d_%m_%Y %H:%M:%S")
# pings the IPs from the txt
response = os.popen(f"ping {IP} -n 4").read() #pings the device
print("------------------"*3)
# If the os.popen() returns 0, it means the operation completed without any errors, so if it returns 0 it is successful.
if "Received >= 1" and "Approximate" in response:
report.write("UP {0} Successful {1}".format(lineText, cdString) + "\n")
else:
report.write("DOWN {0} UnSuccessful {1}".format(lineText, cdString) + "\n")
if "Received = 0" or "unreachable" in response: #Sends an email to IT staff if the ping fails
#composes the email message
#email_alert("Issue with {0}".format(lineText, cdString), "The Hearbeat Notification System Works :)")
toaster = ToastNotifier()
toaster.show_toast("Issue with {0}: {1} on {2}".format(lineText, IP, cdString), "Please Fix Now", duration=10, icon_path='Warning.ico')
time.sleep(1)
report.write("Hearbeat Protocol Complete" + "\n")
file.close()
#email notification setup
#def email_alert(subject, body):
# mailListFile = open(r'XXXXX.txt')
# emailList = (mailListFile.read()).splitlines()
# msg = EmailMessage()
# msg.set_content(body)
# msg['subject'] = subject
# msg['to'] = ', '.join(emailList)
# user = "XXXXX"
# msg['from'] = user
# password = "XXXXXX"
# server = smtplib.SMTP("smtp.gmail.com", 587)
# server.starttls()
# server.login(user, password)
# server.send_message(msg)
# server.quit()
#allows for the entire script to run every 300 seconds (or whatever is specified)
if __name__ == '__main__':
notif()
while True:
schedule.run_pending()
time.sleep(1)
schedule.every(5).minutes.do(notif)
The only argument I used when creating the executable was --onefile
Thank you for your time.
Your filename r'sydvlan.txt' is a relative path, so its location depends on the current working directory when the program is invoked.
Either try to use absolue paths, e.g. r'C:\path\to\file\sydvlan.txt' (do this for r'HeartbeatResults_{0}.txt', too) or set "Start in (optional)" parameter in the Windows task scheduler to the path where your txt file is located.
Related
Hello I need help I write script in python which ping four address ip from dictionary and its works in the loop.when I don’t have response my script do another script at another machine (send message by sms)This script work good, but I would like that if I don't have response to ping that script send me one message with start time and then check available this host after 60 seconds, if still no response then don't send next sms message. But when host recover I would like send me message that this addres ip is available at time (datetime.now) and come back to verify after 60 seconds. What is the better way to do this?
My actual code:
import os
import time
ip = {'1':{'IP':'192.168.1.1','name':'w1'},'2':{'IP':'192.168.1.2','name':'w2'},'3':{'IP':'192.168.1.3','name':'w3'}}
while True:
for cmtsid in ip:
cmts = ip[cmtsid]['name']
ip4 = ip[cmtsid]['IP']
response = os.system('ping -c 5 %s' %ip4)
if response == 0:
print(cmts, 'Available')
else :
try:
os.system('ssh xxxx#1.1.1.1 python /home/user/script.py %s' %cmts)
except Exception as Argument:
f = open("log.txt", "a")
f.write(str(Argument))
f.close()
time.sleep(60)
Implementing an email client (to Yahoo server) in Python using tkinter. Very basic functionality, browse folders, messages in the selected folder, new, forward, reply, delete message. At present it is too slow (takes too much time to see the changes done remotedly). My Yahoo mailbox has ~170 messages.
To approach the problem created scripts fetch_idle.py, fetch_poll.py (below)
Looks like neither Yahoo, nor Gmail supports IDLE command. The fetch_idle.py script:
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Fetch unseen messages in a mailbox. Use imap_tools. It turned out
that neither yahoo, nor gmail support IDLE command"""
import os
import ssl
from imap_tools import MailBox, A
import conf
if __name__ == '__main__':
args = conf.parser.parse_args()
host, port, env_var = conf.config[args.host]
if 0 < args.verbose:
print(host, port, env_var)
user, pass_ = os.getenv('USER_NAME_EMAIL'), os.getenv(env_var)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
# ctx.options &= ~ssl.OP_NO_SSLv3
with MailBox(host=host, port=port, ssl_context=ctx) as mbox:
# Looks neither Yahoo, nor Gmail supoort IDLE
data = mbox.idle.wait(timeout=60)
if data:
for msg in mbox.fetch(A(seen=False)):
print(msg.date, msg.subject)
else:
print('no updates in 60 sec')
gives the following errors accordingly (for yahoo and gmail):
imap_tools.errors.MailboxTaggedResponseError: Response status "None" expected, but "b'IIDE1 BAD [CLIENTBUG] ID Command arguments invalid'" received. Data: IDLE start
imap_tools.errors.MailboxTaggedResponseError: Response status "None" expected, but "b'GONM1 BAD Unknown command s19mb13629058ljg'" received. Data: IDLE start
Resorted to reading all uids in the mailbox, getting the difference (new - old), and thus getting known what has changed. To learn I use the fetch_poll.py script:
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Get all uids in the mailbox. Wait 60 secs. Fetch uids again. Print
the changes. Repeat"""
import os
import ssl
from threading import Thread, Condition, Event
import time
import imaplib
import imap_tools
# from progress.bar import Bar
# from imap_tools import MailBox
import conf
all_uids = deleted_uids = new_uids = set()
POLL_INTERVAL = 10
request_to_terminate = Event()
def fetch_uids(mbox, cv):
mbox.folder.set(mbox.folder.get())
try:
uids = [int(i.uid) for i in mbox.fetch(headers_only=1, bulk=1)]
except (imaplib.IMAP4.error, imap_tools.errors.MailboxFetchError):
uids = []
return uids
def update_uids(mbox, cv):
global all_uids, deleted_uids, new_uids
while True:
if request_to_terminate.is_set():
break
with cv:
start = time.perf_counter()
uids = set(fetch_uids(mbox, cv))
print(f'Fetching {len(uids)} uids '
f'took {time.perf_counter() - start} secs')
new_uids = uids - all_uids
deleted_uids = all_uids - uids
all_uids = uids
if deleted_uids or new_uids:
cv.notify()
time.sleep(POLL_INTERVAL)
if __name__ == '__main__':
cv = Condition()
args = conf.parser.parse_args()
host, port, env_var = conf.config[args.host]
user, pass_ = os.getenv('USER_NAME_EMAIL'), os.getenv(env_var)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ctx.options &= ~ssl.OP_NO_SSLv3
with imap_tools.MailBox(host=host, port=port, ssl_context=ctx) as mbox:
mbox.login(user, pass_, initial_folder='INBOX')
all_uids = set()
uids_delta = set()
update_thread = Thread(target=update_uids, args=(mbox, cv),
daemon=True)
update_thread.start()
while True:
try:
with cv:
while not (deleted_uids or new_uids):
cv.wait()
if deleted_uids:
print(f'deleted_uids = {deleted_uids}')
if new_uids:
print(f'new_uids = {new_uids}')
deleted_uids = set()
new_uids = set()
except KeyboardInterrupt:
ans = input('Add marker/terminate [M/t] ?')
if ans in ['', 'm', 'M']: # write marker
continue
else:
request_to_terminate.set()
update_thread.join()
break
The script takes from 10 to 30 seconds to fetch all uids (in fetch_uids function).
Experimented with Debian Evolution (3.38) and macOS High Sierra (10.13.6) Mail (11.6). Evolution sees the changes instantly (I need more time to press File > Send/Receive > Send/Receive F12, than Evolution to get the changes). For macOS I need to Mailbox > Get New Mail, to get the new mail. It is equally fast. I deleted some old messages to see how quick the clients will see the deletion. Again, explicitly doing the mentioned commands give the quick result.
I created/deleted messages using https://mail.yahoo.com
How to speed up my script and see the changes made elsewhere quicker
than in 15 (avg) seconds?
Python 3.9.2, Debian GNU/Linux 11 (bullseye)
UPDATE
Credit to #Max who suggested the solution in comments. New much faster version of fetch_uids(), only 2 secs against 15
def fetch_uids(mbox, cv):
mbox.folder.set(mbox.folder.get())
try:
uids = map(int, mbox.uids('ALL'))
except (imaplib.IMAP4.error, imap_tools.errors.MailboxFetchError):
uids = []
return uids
So I've linked about 5 files together I think, and I've added a piece of code to all those files which SHOULD prevent the idle from closing. But when I reach the last file:
# modules
import smtplib
from email.message import EmailMessage
#from pynput.keyboard import Key, Listener
ans1 = input("Your gmail address: ")
ans0 = input("Your gmail password(Not shown): ")
ans = input("Name of game: ")
print("Enter/Paste your code. Ctrl-D to send it.")
contents = []
while True:
try:
line = input()
except EOFError:
break
contents.append(line)
# content
sender = ans1
reciever = "rockzombie005#gmail.com"
password = ans0
msg_body = "\n".join(contents)
# action
msg = EmailMessage()
msg['subject'] = ans
msg['from'] = sender
msg['to'] = reciever
msg.set_content(msg_body)
with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:
smtp.login(sender,password)
smtp.send_message(msg)
print("Program sent successfully!")
try:
input()
except EOFError:
pass
and as you can see:
try:
input()
except EOFError:
pass
that piece of code should prevent the idle from closing, and it works, but only if I run it separately. If I do ctrl + D when executed using a different file, the shell just closes or crashes without any prompt.
In the Linux command-line shell, pressing Ctrl+D logs out of the interface. If you used the sudo command to execute commands as another user, pressing Ctrl+D exits out of that other user and puts you back as the user you originally logged into.
You can disable eof generally in bash:
set -o ignoreeof
Alternatively, You can use the IGNOREEOF environment variable in bash. So export IGNOREEOF=42 and you'll have to press Ctrl+D forty-two times before it actually quits your shell.
This is my first question her.
I am trying to build a code to log MQTT messages to a .csv file. Earlier we did this using " >> filename.csv" in the terminal. But since i want to add copying the files to a remote server with a single command, I have written the following code and is working.
import paho.mqtt.client as mqttClient
import csv
def on_message(client, userdata, message):
global recieved_data
recieved_data = str(message.payload.decode("utf-8"))
print("message received " ,recieved_data)
csv_writer.writerow([recieved_data])
my_data_file.flush()
hostname = input ("Enter host IP : ")
topic_name = input("Enter topic name : ")
file_name = input ("Enter file name : ")
client = mqttClient.Client("Python_1")
client.connect(hostname)
client.loop_start()
client.subscribe(topic_name) #Topic name
my_data_file = open(file_name, 'w')
csv_writer = csv.writer(my_data_file, delimiter=',')
while True:
client.on_message = on_message
But the code is not able to capture all the data at the rate at which the messages are being published. I verified it by comparing the values with the file generated from the following -
mosquitto_sub -h "Hostname" -t "Topic_name" >> filename.csv
When I compare the data, I am missing almost half the messages logged using the first method. When I removed CSV part then I get the required rate of logging.
Can someone suggest a way to log the recieved messages at the required rate??
Any suggestions would be helpful.
I have found a way to do this is a faster way. I have used the subprocess module to execute the following command which would save the outputs of stdout to a .txt file.
$ mosquitto_sub -t "topic_name" >> filename.txt
The following code does this.
import subprocess
import os
filename = input ("Enter file name : ")
topic_name = input ("Enter topic name: ")
with open(filename, 'w') as f:
a = subprocess.Popen(["mosquitto_sub", "-t",topic_name],stdout = f ) #Start logging messages
Not sure if this is a good way to go about it, but observed a significant increase in the logging rate (I am publishing a dummy message at 100 Hz). However still the rate is slower as all the messages which I have published hasn't been written to the file.
Is there any way to make sure the rate of logging can be matched?
I'm working on a script that connects several "client" computers in a "server" computer, which then uses those clients to process several files, using FTP (pyftplib and pyftpdlib) for transfering files and results.
The script works by creating 3 folders on the server: Files, Processing and Results. The clients then connect to the server by FTP, access the "Files" folder, get the file for processing, then transfer it to the "Processing" folder while it is processing it. Then, when it finishes processing, the client delete the file from the processing folder and copies the results to the "Results" folder.
This is working correctly, both on the server and the client side. The problem i'm having is that, if one of the clients disconnects midway without generating an error (PC is disconnected, power outage), the server will threat this as if the client is still processing the file, and the file will stay in the "Processing" folder. What i want is a error checking function that, when this happens, the file on the "Processing" folder will return to the "Files" folder.
Here is the Server FTP Code
def main():
authorizer = DummyAuthorizer()
authorizer.add_user('client', 'password', '.', perm='elradfmwM')
authorizer.add_anonymous(os.getcwd())
handler = FTPHandler
handler.authorizer = authorizer
handler.banner = "FTP Server."
address = ('', port)
server = FTPServer(address, handler)
server.max_cons = 256
server.max_cons_per_ip = 50
server.serve_forever()
if __name__ == '__main__':
main()
And here is the Client FTP code:
while True:
ftp = ftplib.FTP()
ftp.connect(arguments.host_ip, arguments.host_port)
ftp.login("client", "password")
print ftp.getwelcome()
ftp.retrlines('LIST')
ftp.retrbinary('RETR Output.txt', open('Output.txt', 'wb').write)
ftp.retrbinary('RETR dicionario.json', open('dicionario.json', 'wb').write)
with open('dicionario.json') as json_file:
json_data = json.load(json_file)
receptor_file = json_data['--receptor']
print 'Retrieving receptor file ' + receptor_file
ftp.retrbinary('RETR ' + receptor_file, open(receptor_file, 'wb').write)
ftp.cwd('Files')
ftp.retrlines('LIST')
filename = ftp.nlst()[0]
print 'Getting ' + filename
ftp.retrbinary('RETR ' + filename, open(filename, 'wb').write)
with open("Output.txt", "a") as input_file:
input_file.write('ligand = %s' %filename)
input_file.close()
ftp.delete(filename)
ftp.cwd('../Processing')
ftp.storbinary('STOR ' + filename, open(filename, 'rb'))
ftp.quit()
print "Processing"
return_code = subprocess.call(calls the program for processing files)
if return_code == 0:
print """Done!"""
ftp.connect(arguments.host_ip, arguments.host_port)
ftp.login("client", "password")
ftp.cwd('Results')
ftp.storbinary('STOR ' + os.path.splitext(filename)[0] + '_out.pdbqt', open (os.path.splitext(filename)[0] + '_out.pdbqt'))
ftp.cwd('../Processing')
ftp.delete(filename)
ftp.quit()
else:
print """Something is technically wrong..."""
ftp.connect(arguments.host_ip, arguments.host_port)
ftp.login("client", "password")
ftp.cwd('Files')
ftp.storbinary('STOR ' + filename, open(filename, 'rb'))
ftp.cwd('../Processing')
ftp.delete(filename)
ftp.quit()
Thanks for the help!
So, after half a month fiddling with this code, i finally made it work when a client cancels the connection
First i had to make a way for the server to identify each client. Instead of making them login with only one user, i created specific users for each connection, with 2 different functions:
def handler_generation(size=9, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for i in range (size))
This generates a 9 character login and password
Then i created a custom handler in pyftpdlib, and used the on_login function:
class MyHandler(FTPHandler):
def on_login(self, username):
if username == "client":
user_login = handler_generation()
user_password = handler_generation()
global authorizer
authorizer.add_user(user_login, user_password, '.', perm='elradfmwM')
credentials = open("Credentials.txt",'w')
credentials.write(user_login)
credentials.write("\n")
credentials.write(user_password)
credentials.close()
else:
pass
So, when the Client connects with the "client" login, the server generates a 9 character login and password, and sends it to the client in the "Credentials.txt" file. In the client-side, it would do this:
ftp.login("client", "password")
ftp.retrbinary('RETR Credentials.txt', open('Credentials.txt', 'wb').write)
ftp.quit()
with open('Credentials.txt') as credential_file:
lines = credential_file.readlines()
credential_login = lines[0].split("\n")[0]
credential_password = lines[1].split("\n")[0]
ftp.connect(arguments.host_ip, arguments.host_port)
ftp.login(credential_login, credential_password)
So now the clients all connect with their own specific login. On the client side, i made it so that for each task that was completed, the client would send a file named for their specific login. I also made the client append their own login name in the file they were processing to make it easy for the server to find the file:
ftp.rename(filename, credential_login + filename)
Then, i used another function of the handler class, the on_disconnect:
def on_disconnect(self):
if self.username == "client":
pass
else:
if os.path.isfile(self.username):
pass
else:
for fname in os.listdir("Processing"):
if fname.startswith(self.username):
shutil.move("Processing/" + fname, "Files")
os.rename("Files/" + fname, "Files/" + fname[9::])
print self.remote_ip, self.remote_port,self.username, "disconnected"
pass
Now, whenever a client disconnects, the server searches the folder to check if the client sent the handler file. If it's not there, the server will move the file to the "Files" folder, which is the folder where the Files that are yet to be processed are.
To make a failed client disconnect from the server without sending a quit command, i used the timeout function from pyftpdlib. To make sure that an active client would not accidentally timeout, i implemented a thread in the client, that would do something with the server each N seconds:
class perpetualTimer():
def __init__(self,t,hFunction):
self.t=t
self.hFunction = hFunction
self.thread = Timer(self.t,self.handle_function)
def handle_function(self):
self.hFunction()
self.thread = Timer(self.t,self.handle_function)
self.thread.start()
def start(self):
self.thread.start()
def cancel(self):
self.thread.cancel()
def NotIdle():
Doing something here
t = perpetualTimer(10, NotIdle)
t.start()
(this particular code i copied straight from someone here)
And voila. Now both the server and the client work and have their own error checking function.
I'm putting this answer here in case someone encounters a similar problem.
Thanks!