Does anyone know of a way or a resource I can look at to be able to check the status of all my Windows tasks I have in the task scheduler? I would like to see if that see if the task failed or was successful. I would like to do this in Python.
I have looked a little at using the win32com.client module. I can see what tasks are but can't find what the status of completed job are.
import win32com.client
scheduler = win32com.client.Dispatch("Schedule.Service")
scheduler.Connect()
tasks = scheduler.GetRunningTasks(1)
names = [tasks.Item(i+1).Name for i in range(tasks.Count)]
print names
The following uses the Task Scheduler API to print basic information for all registered tasks, including the last run time and result.
import win32com.client
TASK_ENUM_HIDDEN = 1
TASK_STATE = {0: 'Unknown',
1: 'Disabled',
2: 'Queued',
3: 'Ready',
4: 'Running'}
scheduler = win32com.client.Dispatch('Schedule.Service')
scheduler.Connect()
n = 0
folders = [scheduler.GetFolder('\\')]
while folders:
folder = folders.pop(0)
folders += list(folder.GetFolders(0))
tasks = list(folder.GetTasks(TASK_ENUM_HIDDEN))
n += len(tasks)
for task in tasks:
settings = task.Definition.Settings
print('Path : %s' % task.Path)
print('Hidden : %s' % settings.Hidden)
print('State : %s' % TASK_STATE[task.State])
print('Last Run : %s' % task.LastRunTime)
print('Last Result: %s\n' % task.LastTaskResult)
print('Listed %d tasks.' % n)
This starts with only the root folder in the list. Each pass through the loop pops a folder; pushes all of its subfolders; and lists the tasks in the folder. It continues until the list of folders is empty.
COM Interfaces
ITaskService
ITaskFolder
IRegisteredTask
ITaskDefinition
ITaskSettings
Alternatively, here's a walk_tasks generator that's modeled on the standard library's os.walk.
import os
import pywintypes
import win32com.client
TASK_ENUM_HIDDEN = 1
TASK_STATE = {
0: 'Unknown',
1: 'Disabled',
2: 'Queued',
3: 'Ready',
4: 'Running'
}
def walk_tasks(top, topdown=True, onerror=None, include_hidden=True,
serverName=None, user=None, domain=None, password=None):
scheduler = win32com.client.Dispatch('Schedule.Service')
scheduler.Connect(serverName, user, domain, password)
if isinstance(top, bytes):
if hasattr(os, 'fsdecode'):
top = os.fsdecode(top)
else:
top = top.decode('mbcs')
if u'/' in top:
top = top.replace(u'/', u'\\')
include_hidden = TASK_ENUM_HIDDEN if include_hidden else 0
try:
top = scheduler.GetFolder(top)
except pywintypes.com_error:
if onerror is not None:
onerror(error)
return
for entry in _walk_tasks_internal(top, topdown, onerror, include_hidden):
yield entry
def _walk_tasks_internal(top, topdown, onerror, flags):
try:
folders = list(top.GetFolders(0))
tasks = list(top.GetTasks(flags))
except pywintypes.com_error as error:
if onerror is not None:
onerror(error)
return
if not topdown:
for d in folders:
for entry in _walk_tasks_internal(d, topdown, onerror, flags):
yield entry
yield top, folders, tasks
if topdown:
for d in folders:
for entry in _walk_tasks_internal(d, topdown, onerror, flags):
yield entry
Example
if __name__ == '__main__':
n = 0
for folder, subfolders, tasks in walk_tasks('/'):
n += len(tasks)
for task in tasks:
settings = task.Definition.Settings
print('Path : %s' % task.Path)
print('Hidden : %s' % settings.Hidden)
print('State : %s' % TASK_STATE[task.State])
print('Last Run : %s' % task.LastRunTime)
print('Last Result: %s\n' % task.LastTaskResult)
print('Listed %d tasks.' % n)
task scheduler can be accessed from command line using schtasks and at
schtasks: https://technet.microsoft.com/en-us/library/cc772785%28v=ws.10%29.aspx
at: https://technet.microsoft.com/en-us/library/cc755618%28v=ws.10%29.aspx
run schtasks /query from python using subprocess.check_output see
Running windows shell commands with python
https://technet.microsoft.com/en-us/library/cc722006.aspx
The tasklist command lists all running programs and services or in powershell get-process
https://superuser.com/questions/914782/how-do-you-list-all-processes-on-the-command-line-in-windows
Related
I want to get the name of running application having the process name for e.g. pycharm64.exe-->Pycharm or chrome.exe --> Google chrome
Here's what I have achieved so far
import wmi
c = wmi.WMI()
def get_process_name(hwnd):
"""Get applicatin filename given hwnd."""
try:
_, pid = win32process.GetWindowThreadProcessId(hwnd)
for p in c.query('SELECT Name FROM Win32_Process WHERE ProcessId = %s' % str(pid)):
exe = p.Name
# print(GetFileVersionInfo(exe))
break
return exe
except:
return None
while True :
time.sleep(3)
print(get_process_name(win32gui.GetForegroundWindow()))
I am trying to import the snmpSessionBaseClass python module in a script I am running, but I do not have the module installed and I can't seem to find where to download it. Does anyone know the pip or yum command to download and install this module? Thanks!
import netsnmp
sys.path.insert(1, os.path.join(sys.path[0], os.pardir))
from snmpSessionBaseClass import add_common_options, get_common_options, verify_host, get_data
from pynag.Plugins import PluginHelper,ok,critical
The following code needs to be added to a file called snmpSessionBaseClass.py and that file needs to be placed in a directory that is in pythons path.
#!/usr/bin/env python
# Copyright (C) 2016 rsmuc <rsmuc#mailbox.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with health_monitoring_plugins. If not, see <http://www.gnu.org/licenses/>.
import pynag
import netsnmp
import os
import sys
dev_null = os.open(os.devnull, os.O_WRONLY)
tmp_stdout = os.dup(sys.stdout.fileno())
def dev_null_wrapper(func, *a, **kwargs):
"""
Temporarily swap stdout with /dev/null, and execute given function while stdout goes to /dev/null.
This is useful because netsnmp writes to stdout and disturbes Icinga result in some cases.
"""
os.dup2(dev_null, sys.stdout.fileno())
return_object = func(*a, **kwargs)
sys.stdout.flush()
os.dup2(tmp_stdout, sys.stdout.fileno())
return return_object
def add_common_options(helper):
# Define the common command line parameters
helper.parser.add_option('-H', help="Hostname or ip address", dest="hostname")
helper.parser.add_option('-C', '--community', dest='community', help='SNMP community of the SNMP service on target host.', default='public')
helper.parser.add_option('-V', '--snmpversion', dest='version', help='SNMP version. (1 or 2)', default=2, type='int')
def get_common_options(helper):
# get the common options
host = helper.options.hostname
version = helper.options.version
community = helper.options.community
return host, version, community
def verify_host(host, helper):
if host == "" or host is None:
helper.exit(summary="Hostname must be specified"
, exit_code=pynag.Plugins.unknown
, perfdata='')
netsnmp_session = dev_null_wrapper(netsnmp.Session,
DestHost=helper.options.hostname,
Community=helper.options.community,
Version=helper.options.version)
try:
# Works around lacking error handling in netsnmp package.
if netsnmp_session.sess_ptr == 0:
helper.exit(summary="SNMP connection failed"
, exit_code=pynag.Plugins.unknown
, perfdata='')
except ValueError as error:
helper.exit(summary=str(error)
, exit_code=pynag.Plugins.unknown
, perfdata='')
# make a snmp get, if it fails (or returns nothing) exit the plugin
def get_data(session, oid, helper, empty_allowed=False):
var = netsnmp.Varbind(oid)
varl = netsnmp.VarList(var)
data = session.get(varl)
value = data[0]
if value is None:
helper.exit(summary="snmpget failed - no data for host "
+ session.DestHost + " OID: " +oid
, exit_code=pynag.Plugins.unknown
, perfdata='')
if not empty_allowed and not value:
helper.exit(summary="snmpget failed - no data for host "
+ session.DestHost + " OID: " +oid
, exit_code=pynag.Plugins.unknown
, perfdata='')
return value
# make a snmp get, but do not exit the plugin, if it returns nothing
# be careful! This funciton does not exit the plugin, if snmp get fails!
def attempt_get_data(session, oid):
var = netsnmp.Varbind(oid)
varl = netsnmp.VarList(var)
data = session.get(varl)
value = data[0]
return value
# make a snmp walk, if it fails (or returns nothing) exit the plugin
def walk_data(session, oid, helper):
tag = []
var = netsnmp.Varbind(oid)
varl = netsnmp.VarList(var)
data = list(session.walk(varl))
if len(data) == 0:
helper.exit(summary="snmpwalk failed - no data for host " + session.DestHost
+ " OID: " +oid
, exit_code=pynag.Plugins.unknown
, perfdata='')
for x in range(0, len(data)):
tag.append(varl[x].tag)
return data, tag
# make a snmp walk, but do not exit the plugin, if it returns nothing
# be careful! This function does not exit the plugin, if snmp walk fails!
def attempt_walk_data(session, oid):
tag = []
var = netsnmp.Varbind(oid)
varl = netsnmp.VarList(var)
data = list(session.walk(varl))
for x in range(0, len(data)):
tag.append(varl[x].tag)
return data, tag
def state_summary(value, name, state_list, helper, ok_value = 'ok', info = None):
"""
Always add the status to the long output, and if the status is not ok (or ok_value),
we show it in the summary and set the status to critical
"""
# translate the value (integer) we receive to a human readable value (e.g. ok, critical etc.) with the given state_list
state_value = state_list[int(value)]
summary_output = ''
long_output = ''
if not info:
info = ''
if state_value != ok_value:
summary_output += ('%s status: %s %s ' % (name, state_value, info))
helper.status(pynag.Plugins.critical)
long_output += ('%s status: %s %s\n' % (name, state_value, info))
return (summary_output, long_output)
def add_output(summary_output, long_output, helper):
"""
if the summary output is empty, we don't add it as summary, otherwise we would have empty spaces (e.g.: '. . . . .') in our summary report
"""
if summary_output != '':
helper.add_summary(summary_output)
helper.add_long_output(long_output)
I am trying to write a python script scanning a folder and collect updated SQL script, and then automatically pull data for the SQL script. In the code, a while loop is scanning new SQL file, and send to data pull function. I am having trouble to understand how to make a dynamic queue with while loop, but also have multiprocess to run the tasks in the queue.
The following code has a problem that the while loop iteration will work on a long job before it moves to next iteration and collects other jobs to fill the vacant processor.
Update:
Thanks to #pbacterio for catching the bug, and now the error message is gone. After changing the code, the python code can take all the job scripts during one iteration, and distribute the scripts to four processors. However, it will get hang by a long job to go to next iteration, scanning and submitting the newly added job scripts. Any idea how to reconstruct the code?
I finally figured out the solution see answer below. It turned out what I was looking for is
the_queue = Queue()
the_pool = Pool(4, worker_main,(the_queue,))
For those stumble on the similar idea, following is the whole architecture of this automation script converting a shared drive to a 'server for SQL pulling' or any other job queue 'server'.
a. The python script auto_data_pull.py as shown in the answer. You need to add your own job function.
b. A 'batch script' with following:
start C:\Anaconda2\python.exe C:\Users\bin\auto_data_pull.py
c. Add a task triggered by start computer, run the 'batch script'
That's all. It works.
Python Code:
from glob import glob
import os, time
import sys
import CSV
import re
import subprocess
import pandas as PD
import pypyodbc
from multiprocessing import Process, Queue, current_process, freeze_support
#
# Function run by worker processes
#
def worker(input, output):
for func, args in iter(input.get, 'STOP'):
result = compute(func, args)
output.put(result)
#
# Function used to compute result
#
def compute(func, args):
result = func(args)
return '%s says that %s%s = %s' % \
(current_process().name, func.__name__, args, result)
def query_sql(sql_file): #test func
#jsl file processing and SQL querying, data table will be saved to csv.
fo_name = os.path.splitext(sql_file)[0] + '.csv'
fo = open(fo_name, 'w')
print sql_file
fo.write("sql_file {0} is done\n".format(sql_file))
return "Query is done for \n".format(sql_file)
def check_files(path):
"""
arguments -- root path to monitor
returns -- dictionary of {file: timestamp, ...}
"""
sql_query_dirs = glob(path + "/*/IDABox/")
files_dict = {}
for sql_query_dir in sql_query_dirs:
for root, dirs, filenames in os.walk(sql_query_dir):
[files_dict.update({(root + filename): os.path.getmtime(root + filename)}) for
filename in filenames if filename.endswith('.jsl')]
return files_dict
##### working in single thread
def single_thread():
path = "Y:/"
before = check_files(path)
sql_queue = []
while True:
time.sleep(3)
after = check_files(path)
added = [f for f in after if not f in before]
deleted = [f for f in before if not f in after]
overlapped = list(set(list(after)) & set(list(before)))
updated = [f for f in overlapped if before[f] < after[f]]
before = after
sql_queue = added + updated
# print sql_queue
for sql_file in sql_queue:
try:
query_sql(sql_file)
except:
pass
##### not working in queue
def multiple_thread():
NUMBER_OF_PROCESSES = 4
path = "Y:/"
sql_queue = []
before = check_files(path) # get the current dictionary of sql_files
task_queue = Queue()
done_queue = Queue()
while True: #while loop to check the changes of the files
time.sleep(5)
after = check_files(path)
added = [f for f in after if not f in before]
deleted = [f for f in before if not f in after]
overlapped = list(set(list(after)) & set(list(before)))
updated = [f for f in overlapped if before[f] < after[f]]
before = after
sql_queue = added + updated
TASKS = [(query_sql, sql_file) for sql_file in sql_queue]
# Create queues
#submit task
for task in TASKS:
task_queue.put(task)
for i in range(NUMBER_OF_PROCESSES):
p = Process(target=worker, args=(task_queue, done_queue)).start()
# try:
# p = Process(target=worker, args=(task_queue))
# p.start()
# except:
# pass
# Get and print results
print 'Unordered results:'
for i in range(len(TASKS)):
print '\t', done_queue.get()
# Tell child processes to stop
for i in range(NUMBER_OF_PROCESSES):
task_queue.put('STOP')
# single_thread()
if __name__ == '__main__':
# freeze_support()
multiple_thread()
Reference:
monitor file changes with python script: http://timgolden.me.uk/python/win32_how_do_i/watch_directory_for_changes.html
Multiprocessing:
https://docs.python.org/2/library/multiprocessing.html
Where did you define sql_file in multiple_thread() in
multiprocessing.Process(target=query_sql, args=(sql_file)).start()
You have not defined sql_file in the method and moreover you have used that variable in a for loop. The variable's scope is only confined to the for loop.
Try replacing this:
result = func(*args)
by this:
result = func(args)
I have figured this out. Thank your for the response inspired the thought.
Now the script can run a while loop to monitor the folder for new updated/added SQL script, and then distribute the data pulling to multiple threads. The solution comes from the queue.get(), and queue.put(). I assume the queue object takes care of the communication by itself.
This is the final code --
from glob import glob
import os, time
import sys
import pypyodbc
from multiprocessing import Process, Queue, Event, Pool, current_process, freeze_support
def query_sql(sql_file): #test func
#jsl file processing and SQL querying, data table will be saved to csv.
fo_name = os.path.splitext(sql_file)[0] + '.csv'
fo = open(fo_name, 'w')
print sql_file
fo.write("sql_file {0} is done\n".format(sql_file))
return "Query is done for \n".format(sql_file)
def check_files(path):
"""
arguments -- root path to monitor
returns -- dictionary of {file: timestamp, ...}
"""
sql_query_dirs = glob(path + "/*/IDABox/")
files_dict = {}
try:
for sql_query_dir in sql_query_dirs:
for root, dirs, filenames in os.walk(sql_query_dir):
[files_dict.update({(root + filename): os.path.getmtime(root + filename)}) for
filename in filenames if filename.endswith('.jsl')]
except:
pass
return files_dict
def worker_main(queue):
print os.getpid(),"working"
while True:
item = queue.get(True)
query_sql(item)
def main():
the_queue = Queue()
the_pool = Pool(4, worker_main,(the_queue,))
path = "Y:/"
before = check_files(path) # get the current dictionary of sql_files
while True: #while loop to check the changes of the files
time.sleep(5)
sql_queue = []
after = check_files(path)
added = [f for f in after if not f in before]
deleted = [f for f in before if not f in after]
overlapped = list(set(list(after)) & set(list(before)))
updated = [f for f in overlapped if before[f] < after[f]]
before = after
sql_queue = added + updated
if sql_queue:
for jsl_file in sql_queue:
try:
the_queue.put(jsl_file)
except:
print "{0} failed with error {1}. \n".format(jsl_file, str(sys.exc_info()[0]))
pass
else:
pass
if __name__ == "__main__":
main()
I'm currently doing some work with multithreading and i'm trying to figure out why my program isn't working as intended.
def input_watcher():
while True:
input_file = os.path.abspath(raw_input('Input file name: '))
compiler = raw_input('Choose compiler: ')
if os.path.isfile(input_file):
obj = FileObject(input_file, compiler)
with file_lock:
files.append(obj)
print 'Adding %s with %s as compiler' % (obj.file_name, obj.compiler)
else:
print 'File does not exists'
This is running in one thread and it works fine until i start adding adding the second fileobject.
This is the output from the console:
Input file name: C:\Users\Victor\Dropbox\Private\multiFile\main.py
Choose compiler: aImport
Adding main.py with aImport as compiler
Input file name: main.py updated
C:\Users\Victor\Dropbox\Private\multiFile\main.py
Choose compiler: Input file name: Input file name: Input file name: Input file name:
The input filename keeps popping up the second i added the second filename and it ask for a compiler. The program keeps printing input file name until it crashes.'
I have other code running in a different thread, i don't think it has anything to do with the error, but tell me if you think you need to see it and i will post it.
the full code:
import multiprocessing
import threading
import os
import time
file_lock = threading.Lock()
update_interval = 0.1
class FileMethods(object):
def a_import(self):
self.mod_check()
class FileObject(FileMethods):
def __init__(self, full_name, compiler):
self.full_name = os.path.abspath(full_name)
self.file_name = os.path.basename(self.full_name)
self.path_name = os.path.dirname(self.full_name)
name, exstention = os.path.splitext(full_name)
self.concat_name = name + '-concat' + exstention
self.compiler = compiler
self.compiler_methods = {'aImport': self.a_import}
self.last_updated = os.path.getatime(self.full_name)
self.subfiles = []
self.last_subfiles_mod = {}
def exists(self):
return os.path.isfile(self.full_name)
def mod_check(self):
if self.last_updated < os.path.getmtime(self.full_name):
self.last_updated = os.path.getmtime(self.full_name)
print '%s updated' % self.file_name
return True
else:
return False
def sub_mod_check(self):
for s in self.subfiles:
if self.last_subfiles_mod.get(s) < os.path.getmtime(s):
self.last_subfiles_mod[s] = os.path.getmtime(s)
return True
return False
files = []
def input_watcher():
while True:
input_file = os.path.abspath(raw_input('Input file name: '))
compiler = raw_input('Choose compiler: ')
if os.path.isfile(input_file):
obj = FileObject(input_file, compiler)
with file_lock:
files.append(obj)
print 'Adding %s with %s as compiler' % (obj.file_name, obj.compiler)
else:
print 'File does not exists'
def file_manipulation():
if __name__ == '__main__':
for f in files:
p = multiprocessing.Process(target=f.compiler_methods.get(f.compiler)())
p.start()
#f.compiler_methods.get(f.compiler)()
def file_watcher():
while True:
with file_lock:
file_manipulation()
time.sleep(update_interval)
iw = threading.Thread(target=input_watcher)
fw = threading.Thread(target=file_watcher)
iw.start()
fw.start()
This is happening because you're not using an if __name__ == "__main__": guard, while also using multiprocessing.Process on Windows. Windows needs to re-import your module in the child processes it spawns, which means it will keep creating new threads to handle inputs and watch files. This, of course, is a recipe for disaster. Do this to fix the issue:
if __name__ == "__main__":
iw = threading.Thread(target=input_watcher)
fw = threading.Thread(target=file_watcher)
iw.start()
fw.start()
See the "Safe importing of the main module" section in the multiprocessing docs for more info.
I also have a feeling file_watcher isn't really doing what you want it to (it will keep re-spawning processes for files you've already processed), but that's not really related to the original question.
I am very new to python and trying to work on this script which recieves data from multiple ftp sites and download yesterday data according to date directory to my local folder. but if the receives fails on any day it would not update that day records and would go to the next day. I want to sync the files that even if it is missed on particular it should complete sync teh new files to local folder I have tried looking at rsync but need your help to process it on the script.this is my script.
MAX_CHILDREN = 16
ftp_site_prog = "/usr/local/bin/ftp_site.py"
class SpawnQ:
def __init__(self, max_pids):
self.max_pids = max_pids
self.queue = []
tmp = re.split("/", ftp_site_prog)
self.my_name = tmp[-1]
def addQ(self, site_id):
self.queue.append(site_id)
return
def runQ(self):
while (len(self.queue) != 0):
# Check how many sessions are running
cmd = """ps -ef | grep "%s" | grep -v grep""" % self.my_name
num_pids = 0
for line in os.popen(cmd).readlines():
num_pids = num_pids + 1
if (num_pids < self.max_pids):
site_id = self.queue.pop()
# print site_id
# print "Forking........"
fpid = os.fork()
if fpid:
# print "Created child: ", fpid
os.waitpid(fpid, os.WNOHANG)
else:
# print "This is the Child"
# Exec the ftp_site
arg_string = "%s" % site_id
args = [arg_string]
os.execvp(ftp_site_prog, (ftp_site_prog,) + tuple(args))
how to call rsync on my py script.//os.system("rsync -ftp_site_prog, (ftp_site_prog,)+ tuple(args))
sys.exit(0)
else:
# print "Waiting for a spare process...."
time.sleep(10)
return
# Get a list of the sites
db_obj = nprint.celDb()
site_list = db_obj.get_all_site_ids()
myQ = SpawnQ(MAX_CHILDREN)
for site_id in site_list:
myQ.addQ(site_id)
myQ.runQ()
# Wait until we only have the parent left
# Check how many sessions are running
tmp = re.split("/",ftp_site_prog)
ftp_name = tmp[-1]
cmd = """ps -ef | grep "%s" | grep -v grep""" % ftp_name
num_pids = MAX_CHILDREN
while (num_pids > 0):
num_pids = 0
for line in os.popen(cmd).readlines():
num_pids = num_pids + 1
time.sleep(60)
today = datetime.date.today()
daydelta = datetime.timedelta(days=1)
yesterday = today - daydelta
Much of this can be accomplished with the ftplib module for the retrieval of files from standard FTP servers. If you are dealing with SFTP servers you can use the paramiko library.