I've written a Nautilus extension which reads picture's metadata (executing exiftool), but when I open folders with many files, it really slows down the file manager and hangs until it finishes reading the file's data.
Is there a way to make Nautilus keep its work while it runs my extension? Perhaps the Exif data could appear gradually in the columns while I go on with my work.
#!/usr/bin/python
# Richiede:
# nautilus-python
# exiftool
# gconf-python
# Versione 0.15
import gobject
import nautilus
from subprocess import Popen, PIPE
from urllib import unquote
import gconf
def getexiftool(filename):
options = '-fast2 -f -m -q -q -s3 -ExifIFD:DateTimeOriginal -IFD0:Software -ExifIFD:Flash -Composite:ImageSize -IFD0:Model'
exiftool=Popen(['/usr/bin/exiftool'] + options.split() + [filename],stdout=PIPE,stderr=PIPE)
#'-Nikon:ShutterCount' non utilizzabile con l'argomento -fast2
output,errors=exiftool.communicate()
return output.split('\n')
class ColumnExtension(nautilus.ColumnProvider, nautilus.InfoProvider, gobject.GObject):
def __init__(self):
pass
def get_columns(self):
return (
nautilus.Column("NautilusPython::ExifIFD:DateTimeOriginal","ExifIFD:DateTimeOriginal","Data (ExifIFD)","Data di scatto"),
nautilus.Column("NautilusPython::IFD0:Software","IFD0:Software","Software (IFD0)","Software utilizzato"),
nautilus.Column("NautilusPython::ExifIFD:Flash","ExifIFD:Flash","Flash (ExifIFD)","Modalit\u00e0 del flash"),
nautilus.Column("NautilusPython::Composite:ImageSize","Composite:ImageSize","Risoluzione (Exif)","Risoluzione dell'immagine"),
nautilus.Column("NautilusPython::IFD0:Model","IFD0:Model","Fotocamera (IFD0)","Modello fotocamera"),
#nautilus.Column("NautilusPython::Nikon:ShutterCount","Nikon:ShutterCount","Contatore scatti (Nikon)","Numero di scatti effettuati dalla macchina a questo file"),
nautilus.Column("NautilusPython::Mp","Mp","Megapixel (Exif)","Dimensione dell'immagine in megapixel"),
)
def update_file_info_full(self, provider, handle, closure, file):
client = gconf.client_get_default()
if not client.get_bool('/apps/nautilus/nautilus-metadata/enable'):
client.set_bool('/apps/nautilus/nautilus-metadata/enable',0)
return
if file.get_uri_scheme() != 'file':
return
if file.get_mime_type() in ('image/jpeg', 'image/png', 'image/gif', 'image/bmp', 'image/x-nikon-nef', 'image/x-xcf', 'image/vnd.adobe.photoshop'):
gobject.timeout_add_seconds(1, self.update_exif, provider, handle, closure, file)
return Nautilus.OperationResult.IN_PROGRESS
file.add_string_attribute('ExifIFD:DateTimeOriginal','')
file.add_string_attribute('IFD0:Software','')
file.add_string_attribute('ExifIFD:Flash','')
file.add_string_attribute('Composite:ImageSize','')
file.add_string_attribute('IFD0:Model','')
file.add_string_attribute('Nikon:ShutterCount','')
file.add_string_attribute('Mp','')
return Nautilus.OperationResult.COMPLETE
def update_exif(self, provider, handle, closure, file):
filename = unquote(file.get_uri()[7:])
data = getexiftool(filename)
file.add_string_attribute('ExifIFD:DateTimeOriginal',data[0].replace(':','-',2))
file.add_string_attribute('IFD0:Software',data[1])
file.add_string_attribute('ExifIFD:Flash',data[2])
file.add_string_attribute('Composite:ImageSize',data[3])
file.add_string_attribute('IFD0:Model',data[4])
#file.add_string_attribute('Nikon:ShutterCount',data[5])
width, height = data[3].split('x')
mp = float(width) * float(height) / 1000000
mp = "%.2f" % mp
file.add_string_attribute('Mp',str(mp) + ' Mp')
Nautilus.info_provider_update_complete_invoke(closure, provider, handle, Nautilus.OperationResult.COMPLETE)
return false
That happens because you are invoking update_file_info, which is part of the asynchronous IO system of Nautilus. Therefore, it blocks nautilus if the operations are not fast enough.
In your case it is exacerbated because you are calling an external program, and that is an expensive operation. Notice that update_file_info is called once per file. If you have 100 files, then you will call 100 times the external program, and Nautilus will have to wait for each one before processing the next one.
Since nautilus-python 0.7 are available update_file_info_full and cancel_update, which allows you to program async calls. You can check the documentation of Nautilus 0.7 for more details.
It worth to mention this was a limitation of nautilus-python only, which previously did not expose those methods available in C.
EDIT: Added a couple of examples.
The trick is make the process as fast as possible or make it asynchronous.
Example 1: Invoking an external program
Using a simplified version of your code, we make asynchronous using
GObject.timeout_add_seconds in update_file_info_full.
from gi.repository import Nautilus, GObject
from urllib import unquote
from subprocess import Popen, PIPE
def getexiftool(filename):
options = '-fast2 -f -m -q -q -s3 -ExifIFD:DateTimeOriginal'
exiftool = Popen(['/usr/bin/exiftool'] + options.split() + [filename],
stdout=PIPE, stderr=PIPE)
output, errors = exiftool.communicate()
return output.split('\n')
class MyExtension(Nautilus.ColumnProvider, Nautilus.InfoProvider, GObject.GObject):
def __init__(self):
pass
def get_columns(self):
return (
Nautilus.Column(name='MyExif::DateTime',
attribute='Exif:Image:DateTime',
label='Date Original',
description='Data time original'
),
)
def update_file_info_full(self, provider, handle, closure, file_info):
if file_info.get_uri_scheme() != 'file':
return
filename = unquote(file_info.get_uri()[7:])
attr = ''
if file_info.get_mime_type() in ('image/jpeg', 'image/png'):
GObject.timeout_add_seconds(1, self.update_exif,
provider, handle, closure, file_info)
return Nautilus.OperationResult.IN_PROGRESS
file_info.add_string_attribute('Exif:Image:DateTime', attr)
return Nautilus.OperationResult.COMPLETE
def update_exif(self, provider, handle, closure, file_info):
filename = unquote(file_info.get_uri()[7:])
try:
data = getexiftool(filename)
attr = data[0]
except:
attr = ''
file_info.add_string_attribute('Exif:Image:DateTime', attr)
Nautilus.info_provider_update_complete_invoke(closure, provider,
handle, Nautilus.OperationResult.COMPLETE)
return False
The code above will not block Nautilus, and if the column 'Date Original' is available in the column view, the JPEG and PNG images will show the 'unknown' value, and slowly they will being updated (the subprocess is called after 1 second).
Examples 2: Using a library
Rather than invoking an external program, it could be better to use a library. As the example below:
from gi.repository import Nautilus, GObject
from urllib import unquote
import pyexiv2
class MyExtension(Nautilus.ColumnProvider, Nautilus.InfoProvider, GObject.GObject):
def __init__(self):
pass
def get_columns(self):
return (
Nautilus.Column(name='MyExif::DateTime',
attribute='Exif:Image:DateTime',
label='Date Original',
description='Data time original'
),
)
def update_file_info_full(self, provider, handle, closure, file_info):
if file_info.get_uri_scheme() != 'file':
return
filename = unquote(file_info.get_uri()[7:])
attr = ''
if file_info.get_mime_type() in ('image/jpeg', 'image/png'):
metadata = pyexiv2.ImageMetadata(filename)
metadata.read()
try:
tag = metadata['Exif.Image.DateTime'].value
attr = tag.strftime('%Y-%m-%d %H:%M')
except:
attr = ''
file_info.add_string_attribute('Exif:Image:DateTime', attr)
return Nautilus.OperationResult.COMPLETE
Eventually, if the routine is slow you would need to make it asynchronous (maybe using something better than GObject.timeout_add_seconds.
At last but not least, in my examples I used GObject Introspection (typically for Nautilus 3), but it easy to change it to use the module nautilus directly.
The above solution is only partly correct.
Between state changes for file_info metadata, the user should call file_info.invalidate_extension_info() to notify nautilus of the change.
Failing to do this could end up with 'unknown' appearing in your columns.
file_info.add_string_attribute('video_width', video_width)
file_info.add_string_attribute('video_height', video_height)
file_info.add_string_attribute('name_suggestion', name_suggestion)
file_info.invalidate_extension_info()
Nautilus.info_provider_update_complete_invoke(closure, provider, handle, Nautilus.OperationResult.COMPLETE)
Full working example here:
Fully working example
API Documentation
thanks to Dave!
i was looking for a solution to the 'unknown' text in the column for ages
file_info.invalidate_extension_info()
Fixed the issue for me right away :)
Per the api API Documentation
https://projects-old.gnome.org/nautilus-python/documentation/html/class-nautilus-python-file-info.html#method-nautilus-python-file-info--invalidate-extension-info
Nautilus.FileInfo.invalidate_extension_info
def invalidate_extension_info()
Invalidates the information Nautilus has about this file, which causes it to request new information from its Nautilus.InfoProvider providers.
Related
Excuse the unhelpful variable names and unnecessarily bloated code, but I just quickly whipped this together and haven't had time to optimise or tidy up yet.
I wrote this program to dump all the images my friend and I had sent to each other using a webcam photo sharing service ( 321cheese.com ) by parsing a message log for the URLs. The problem is that my multithreading doesn't seem to work.
At the bottom of my code, you'll see my commented-out non-multithreaded download method, which consistently produces the correct results (which is 121 photos in this case). But when I try to send this action to a new thread, the program sometimes downloads 112 photos, sometimes 90, sometimes 115 photos, etc, but never gives out the correct result.
Why would this create a problem? Should I limit the number of simultaneous threads (and how)?
import urllib
import thread
def getName(input):
l = input.split(".com/")
m = l[1]
return m
def parseMessages():
theFile = open('messages.html', 'r')
theLines = theFile.readlines()
theFile.close()
theNewFile = open('new321.txt','w')
for z in theLines:
if "321cheese" in z:
theNewFile.write(z)
theNewFile.close()
def downloadImage(inputURL):
urllib.urlretrieve (inputURL, "./grabNew/" + d)
parseMessages()
f = open('new321.txt', 'r')
lines = f.readlines()
f.close()
g = open('output.txt', 'w')
for x in lines:
a = x.split("<a href=\"")
b = a[1].split("\"")
c = b[0]
if ".png" in c:
d = getName(c)
g.write(c+"\n")
thread.start_new_thread( downloadImage, (c,) )
##downloadImage(c)
g.close()
There are multiple issues in your code.
The main issue is d global name usage in multiple threads. To fix it, pass the name explicitly as an argument to downloadImage().
The easy way (code-wise) to limit the number of concurrent downloads is to use concurrent.futures (available on Python 2 as futures) or multiprocessing.Pool:
#!/usr/bin/env python
import urllib
from multiprocessing import Pool
from posixpath import basename
from urllib import unquote
from urlparse import urlsplit
download_dir = "grabNew"
def url2filename(url):
return basename(unquote(urlsplit(url).path).decode('utf-8'))
def download_image(url):
filename = None
try:
filename = os.path.join(download_dir, url2filename(url))
return urllib.urlretrieve(url, filename), None
except Exception as e:
return (filename, None), e
def main():
pool = Pool(processes=10)
for (filename, headers), error in pool.imap_unordered(download_image, get_urls()):
pass # do something with the downloaded file or handle an error
if __name__ == "__main__":
main()
Did you make sure your parsing is working correctly?
Also, you are launching too many threads.
And finally... threads in python are FAKE! Use the multiprocessing module if you want real parallelism, but since the images are probably all from the same server, if you open one hundred connections at the same time with the same server, probably its firewall will start dropping your connections.
I have a log file being written by another process which I want to watch for changes. Each time a change occurs I'd like to read the new data in to do some processing on it.
What's the best way to do this? I was hoping there'd be some sort of hook from the PyWin32 library. I've found the win32file.FindNextChangeNotification function but have no idea how to ask it to watch a specific file.
If anyone's done anything like this I'd be really grateful to hear how...
[Edit] I should have mentioned that I was after a solution that doesn't require polling.
[Edit] Curses! It seems this doesn't work over a mapped network drive. I'm guessing windows doesn't 'hear' any updates to the file the way it does on a local disk.
Did you try using Watchdog?
Python API library and shell utilities to monitor file system events.
Directory monitoring made easy with
A cross-platform API.
A shell tool to run commands in response to directory changes.
Get started quickly with a simple example in Quickstart...
If polling is good enough for you, I'd just watch if the "modified time" file stat changes. To read it:
os.stat(filename).st_mtime
(Also note that the Windows native change event solution does not work in all circumstances, e.g. on network drives.)
import os
class Monkey(object):
def __init__(self):
self._cached_stamp = 0
self.filename = '/path/to/file'
def ook(self):
stamp = os.stat(self.filename).st_mtime
if stamp != self._cached_stamp:
self._cached_stamp = stamp
# File has changed, so do something...
If you want a multiplatform solution, then check QFileSystemWatcher.
Here an example code (not sanitized):
from PyQt4 import QtCore
#QtCore.pyqtSlot(str)
def directory_changed(path):
print('Directory Changed!!!')
#QtCore.pyqtSlot(str)
def file_changed(path):
print('File Changed!!!')
fs_watcher = QtCore.QFileSystemWatcher(['/path/to/files_1', '/path/to/files_2', '/path/to/files_3'])
fs_watcher.connect(fs_watcher, QtCore.SIGNAL('directoryChanged(QString)'), directory_changed)
fs_watcher.connect(fs_watcher, QtCore.SIGNAL('fileChanged(QString)'), file_changed)
It should not work on windows (maybe with cygwin ?), but for unix user, you should use the "fcntl" system call. Here is an example in Python. It's mostly the same code if you need to write it in C (same function names)
import time
import fcntl
import os
import signal
FNAME = "/HOME/TOTO/FILETOWATCH"
def handler(signum, frame):
print "File %s modified" % (FNAME,)
signal.signal(signal.SIGIO, handler)
fd = os.open(FNAME, os.O_RDONLY)
fcntl.fcntl(fd, fcntl.F_SETSIG, 0)
fcntl.fcntl(fd, fcntl.F_NOTIFY,
fcntl.DN_MODIFY | fcntl.DN_CREATE | fcntl.DN_MULTISHOT)
while True:
time.sleep(10000)
Check out pyinotify.
inotify replaces dnotify (from an earlier answer) in newer linuxes and allows file-level rather than directory-level monitoring.
For watching a single file with polling, and minimal dependencies, here is a fully fleshed-out example, based on answer from Deestan (above):
import os
import sys
import time
class Watcher(object):
running = True
refresh_delay_secs = 1
# Constructor
def __init__(self, watch_file, call_func_on_change=None, *args, **kwargs):
self._cached_stamp = 0
self.filename = watch_file
self.call_func_on_change = call_func_on_change
self.args = args
self.kwargs = kwargs
# Look for changes
def look(self):
stamp = os.stat(self.filename).st_mtime
if stamp != self._cached_stamp:
self._cached_stamp = stamp
# File has changed, so do something...
print('File changed')
if self.call_func_on_change is not None:
self.call_func_on_change(*self.args, **self.kwargs)
# Keep watching in a loop
def watch(self):
while self.running:
try:
# Look for changes
time.sleep(self.refresh_delay_secs)
self.look()
except KeyboardInterrupt:
print('\nDone')
break
except FileNotFoundError:
# Action on file not found
pass
except:
print('Unhandled error: %s' % sys.exc_info()[0])
# Call this function each time a change happens
def custom_action(text):
print(text)
watch_file = 'my_file.txt'
# watcher = Watcher(watch_file) # simple
watcher = Watcher(watch_file, custom_action, text='yes, changed') # also call custom action function
watcher.watch() # start the watch going
Well after a bit of hacking of Tim Golden's script, I have the following which seems to work quite well:
import os
import win32file
import win32con
path_to_watch = "." # look at the current directory
file_to_watch = "test.txt" # look for changes to a file called test.txt
def ProcessNewData( newData ):
print "Text added: %s"%newData
# Set up the bits we'll need for output
ACTIONS = {
1 : "Created",
2 : "Deleted",
3 : "Updated",
4 : "Renamed from something",
5 : "Renamed to something"
}
FILE_LIST_DIRECTORY = 0x0001
hDir = win32file.CreateFile (
path_to_watch,
FILE_LIST_DIRECTORY,
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE,
None,
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_BACKUP_SEMANTICS,
None
)
# Open the file we're interested in
a = open(file_to_watch, "r")
# Throw away any exising log data
a.read()
# Wait for new data and call ProcessNewData for each new chunk that's written
while 1:
# Wait for a change to occur
results = win32file.ReadDirectoryChangesW (
hDir,
1024,
False,
win32con.FILE_NOTIFY_CHANGE_LAST_WRITE,
None,
None
)
# For each change, check to see if it's updating the file we're interested in
for action, file in results:
full_filename = os.path.join (path_to_watch, file)
#print file, ACTIONS.get (action, "Unknown")
if file == file_to_watch:
newText = a.read()
if newText != "":
ProcessNewData( newText )
It could probably do with a load more error checking, but for simply watching a log file and doing some processing on it before spitting it out to the screen, this works well.
Thanks everyone for your input - great stuff!
Check my answer to a similar question. You could try the same loop in Python. This page suggests:
import time
while 1:
where = file.tell()
line = file.readline()
if not line:
time.sleep(1)
file.seek(where)
else:
print line, # already has newline
Also see the question tail() a file with Python.
This is another modification of Tim Goldan's script that runs on unix types and adds a simple watcher for file modification by using a dict (file=>time).
usage: whateverName.py path_to_dir_to_watch
#!/usr/bin/env python
import os, sys, time
def files_to_timestamp(path):
files = [os.path.join(path, f) for f in os.listdir(path)]
return dict ([(f, os.path.getmtime(f)) for f in files])
if __name__ == "__main__":
path_to_watch = sys.argv[1]
print('Watching {}..'.format(path_to_watch))
before = files_to_timestamp(path_to_watch)
while 1:
time.sleep (2)
after = files_to_timestamp(path_to_watch)
added = [f for f in after.keys() if not f in before.keys()]
removed = [f for f in before.keys() if not f in after.keys()]
modified = []
for f in before.keys():
if not f in removed:
if os.path.getmtime(f) != before.get(f):
modified.append(f)
if added: print('Added: {}'.format(', '.join(added)))
if removed: print('Removed: {}'.format(', '.join(removed)))
if modified: print('Modified: {}'.format(', '.join(modified)))
before = after
Here is a simplified version of Kender's code that appears to do the same trick and does not import the entire file:
# Check file for new data.
import time
f = open(r'c:\temp\test.txt', 'r')
while True:
line = f.readline()
if not line:
time.sleep(1)
print 'Nothing New'
else:
print 'Call Function: ', line
Well, since you are using Python, you can just open a file and keep reading lines from it.
f = open('file.log')
If the line read is not empty, you process it.
line = f.readline()
if line:
// Do what you want with the line
You may be missing that it is ok to keep calling readline at the EOF. It will just keep returning an empty string in this case. And when something is appended to the log file, the reading will continue from where it stopped, as you need.
If you are looking for a solution that uses events, or a particular library, please specify this in your question. Otherwise, I think this solution is just fine.
Simplest solution for me is using watchdog's tool watchmedo
From https://pypi.python.org/pypi/watchdog I now have a process that looks up the sql files in a directory and executes them if necessary.
watchmedo shell-command \
--patterns="*.sql" \
--recursive \
--command='~/Desktop/load_files_into_mysql_database.sh' \
.
As you can see in Tim Golden's article, pointed by Horst Gutmann, WIN32 is relatively complex and watches directories, not a single file.
I'd like to suggest you look into IronPython, which is a .NET python implementation.
With IronPython you can use all the .NET functionality - including
System.IO.FileSystemWatcher
Which handles single files with a simple Event interface.
This is an example of checking a file for changes. One that may not be the best way of doing it, but it sure is a short way.
Handy tool for restarting application when changes have been made to the source. I made this when playing with pygame so I can see effects take place immediately after file save.
When used in pygame make sure the stuff in the 'while' loop is placed in your game loop aka update or whatever. Otherwise your application will get stuck in an infinite loop and you will not see your game updating.
file_size_stored = os.stat('neuron.py').st_size
while True:
try:
file_size_current = os.stat('neuron.py').st_size
if file_size_stored != file_size_current:
restart_program()
except:
pass
In case you wanted the restart code which I found on the web. Here it is. (Not relevant to the question, though it could come in handy)
def restart_program(): #restart application
python = sys.executable
os.execl(python, python, * sys.argv)
Have fun making electrons do what you want them to do.
Seems that no one has posted fswatch. It is a cross-platform file system watcher. Just install it, run it and follow the prompts.
I've used it with python and golang programs and it just works.
ACTIONS = {
1 : "Created",
2 : "Deleted",
3 : "Updated",
4 : "Renamed from something",
5 : "Renamed to something"
}
FILE_LIST_DIRECTORY = 0x0001
class myThread (threading.Thread):
def __init__(self, threadID, fileName, directory, origin):
threading.Thread.__init__(self)
self.threadID = threadID
self.fileName = fileName
self.daemon = True
self.dir = directory
self.originalFile = origin
def run(self):
startMonitor(self.fileName, self.dir, self.originalFile)
def startMonitor(fileMonitoring,dirPath,originalFile):
hDir = win32file.CreateFile (
dirPath,
FILE_LIST_DIRECTORY,
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE,
None,
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_BACKUP_SEMANTICS,
None
)
# Wait for new data and call ProcessNewData for each new chunk that's
# written
while 1:
# Wait for a change to occur
results = win32file.ReadDirectoryChangesW (
hDir,
1024,
False,
win32con.FILE_NOTIFY_CHANGE_LAST_WRITE,
None,
None
)
# For each change, check to see if it's updating the file we're
# interested in
for action, file_M in results:
full_filename = os.path.join (dirPath, file_M)
#print file, ACTIONS.get (action, "Unknown")
if len(full_filename) == len(fileMonitoring) and action == 3:
#copy to main file
...
Since I have it installed globally, my favorite approach is to use nodemon. If your source code is in src, and your entry point is src/app.py, then it's as easy as:
nodemon -w 'src/**' -e py,html --exec python src/app.py
... where -e py,html lets you control what file types to watch for changes.
Here's an example geared toward watching input files that write no more than one line per second but usually a lot less. The goal is to append the last line (most recent write) to the specified output file. I've copied this from one of my projects and just deleted all the irrelevant lines. You'll have to fill in or change the missing symbols.
from PyQt5.QtCore import QFileSystemWatcher, QSettings, QThread
from ui_main_window import Ui_MainWindow # Qt Creator gen'd
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
Ui_MainWindow.__init__(self)
self._fileWatcher = QFileSystemWatcher()
self._fileWatcher.fileChanged.connect(self.fileChanged)
def fileChanged(self, filepath):
QThread.msleep(300) # Reqd on some machines, give chance for write to complete
# ^^ About to test this, may need more sophisticated solution
with open(filepath) as file:
lastLine = list(file)[-1]
destPath = self._filemap[filepath]['dest file']
with open(destPath, 'a') as out_file: # a= append
out_file.writelines([lastLine])
Of course, the encompassing QMainWindow class is not strictly required, ie. you can use QFileSystemWatcher alone.
Just to put this out there since no one mentioned it: there's a Python module in the Standard Library named filecmp which has this cmp() function that compares two files.
Just make sure you don't do from filecmp import cmp to not overshadow the built-in cmp() function in Python 2.x. That's okay in Python 3.x, though, since there's no such built-in cmp() function anymore.
Anyway, this is how its use looks like:
import filecmp
filecmp.cmp(path_to_file_1, path_to_file_2, shallow=True)
The argument shallow defaults to True. If the argument's value is True, then only the metadata of the files are compared; however, if the argument's value is False, then the contents of the files are compared.
Maybe this information will be useful to someone.
watchfiles (https://github.com/samuelcolvin/watchfiles) is a Python API and CLI that uses the Notify (https://github.com/notify-rs/notify) library written in Rust.
The rust implementation currently (2022-10-09) supports:
Linux / Android: inotify
macOS: FSEvents or kqueue, see features
Windows: ReadDirectoryChangesW
FreeBSD / NetBSD / OpenBSD / DragonflyBSD: kqueue
All platforms: polling
Binaries available on PyPI (https://pypi.org/project/watchfiles/) and conda-forge (https://github.com/conda-forge/watchfiles-feedstock).
You can also use a simple library called repyt, here is an example:
repyt ./app.py
related #4Oh4 solution a smooth change for a list of files to watch;
import os
import sys
import time
class Watcher(object):
running = True
refresh_delay_secs = 1
# Constructor
def __init__(self, watch_files, call_func_on_change=None, *args, **kwargs):
self._cached_stamp = 0
self._cached_stamp_files = {}
self.filenames = watch_files
self.call_func_on_change = call_func_on_change
self.args = args
self.kwargs = kwargs
# Look for changes
def look(self):
for file in self.filenames:
stamp = os.stat(file).st_mtime
if not file in self._cached_stamp_files:
self._cached_stamp_files[file] = 0
if stamp != self._cached_stamp_files[file]:
self._cached_stamp_files[file] = stamp
# File has changed, so do something...
file_to_read = open(file, 'r')
value = file_to_read.read()
print("value from file", value)
file_to_read.seek(0)
if self.call_func_on_change is not None:
self.call_func_on_change(*self.args, **self.kwargs)
# Keep watching in a loop
def watch(self):
while self.running:
try:
# Look for changes
time.sleep(self.refresh_delay_secs)
self.look()
except KeyboardInterrupt:
print('\nDone')
break
except FileNotFoundError:
# Action on file not found
pass
except Exception as e:
print(e)
print('Unhandled error: %s' % sys.exc_info()[0])
# Call this function each time a change happens
def custom_action(text):
print(text)
# pass
watch_files = ['/Users/mexekanez/my_file.txt', '/Users/mexekanez/my_file1.txt']
# watcher = Watcher(watch_file) # simple
if __name__ == "__main__":
watcher = Watcher(watch_files, custom_action, text='yes, changed') # also call custom action function
watcher.watch() # start the watch going
The best and simplest solution is to use pygtail:
https://pypi.python.org/pypi/pygtail
from pygtail import Pygtail
import sys
while True:
for line in Pygtail("some.log"):
sys.stdout.write(line)
import inotify.adapters
from datetime import datetime
LOG_FILE='/var/log/mysql/server_audit.log'
def main():
start_time = datetime.now()
while True:
i = inotify.adapters.Inotify()
i.add_watch(LOG_FILE)
for event in i.event_gen(yield_nones=False):
break
del i
with open(LOG_FILE, 'r') as f:
for line in f:
entry = line.split(',')
entry_time = datetime.strptime(entry[0],
'%Y%m%d %H:%M:%S')
if entry_time > start_time:
start_time = entry_time
print(entry)
if __name__ == '__main__':
main()
The easiest solution would get the two instances of the same file after an interval and Compare them. You Could try something like this
while True:
# Capturing the two instances models.py after certain interval of time
print("Looking for changes in " + app_name.capitalize() + " models.py\nPress 'CTRL + C' to stop the program")
with open(app_name.capitalize() + '/filename', 'r+') as app_models_file:
filename_content = app_models_file.read()
time.sleep(5)
with open(app_name.capitalize() + '/filename', 'r+') as app_models_file_1:
filename_content_1 = app_models_file_1.read()
# Comparing models.py after certain interval of time
if filename_content == filename_content_1:
pass
else:
print("You made a change in " + app_name.capitalize() + " filename.\n")
cmd = str(input("Do something with the file?(y/n):"))
if cmd == 'y':
# Do Something
elif cmd == 'n':
# pass or do something
else:
print("Invalid input")
If you're using windows, create this POLL.CMD file
#echo off
:top
xcopy /m /y %1 %2 | find /v "File(s) copied"
timeout /T 1 > nul
goto :top
then you can type "poll dir1 dir2" and it will copy all the files from dir1 to dir2 and check for updates once per second.
The "find" is optional, just to make the console less noisy.
This is not recursive. Maybe you could make it recursive using /e on the xcopy.
I don't know any Windows specific function. You could try getting the MD5 hash of the file every second/minute/hour (depends on how fast you need it) and compare it to the last hash. When it differs you know the file has been changed and you read out the newest lines.
I'd try something like this.
try:
f = open(filePath)
except IOError:
print "No such file: %s" % filePath
raw_input("Press Enter to close window")
try:
lines = f.readlines()
while True:
line = f.readline()
try:
if not line:
time.sleep(1)
else:
functionThatAnalisesTheLine(line)
except Exception, e:
# handle the exception somehow (for example, log the trace) and raise the same exception again
raw_input("Press Enter to close window")
raise e
finally:
f.close()
The loop checks if there is a new line(s) since last time file was read - if there is, it's read and passed to the functionThatAnalisesTheLine function. If not, script waits 1 second and retries the process.
I have a setup.py script which needs to probe the compiler for certain things like the support for TR1, the presence of windows.h (to add NOMINMAX define), etc. I do these checks by creating a simple program and trying to compile it with Distutils' Compiler class. The presence/lack of errors is my answer.
This works well, but it means that the compiler's ugly error messages get printed to the console. Is there a way to suppress error messages for when the compile function is called manually?
Here is my function which tries to compile the program, which now DOES eliminate the error messages by piping the error stream to a file (answered my own question):
def see_if_compiles(program, include_dirs, define_macros):
""" Try to compile the passed in program and report if it compiles successfully or not. """
from distutils.ccompiler import new_compiler, CompileError
from shutil import rmtree
import tempfile
import os
try:
tmpdir = tempfile.mkdtemp()
except AttributeError:
# Python 2.2 doesn't have mkdtemp().
tmpdir = "compile_check_tempdir"
try:
os.mkdir(tmpdir)
except OSError:
print "Can't create temporary directory. Aborting."
sys.exit()
old = os.getcwd()
os.chdir(tmpdir)
# Write the program
f = open('compiletest.cpp', 'w')
f.write(program)
f.close()
# redirect the error stream to keep ugly compiler error messages off the command line
devnull = open('errors.txt', 'w')
oldstderr = os.dup(sys.stderr.fileno())
os.dup2(devnull.fileno(), sys.stderr.fileno())
#
try:
c = new_compiler()
for macro in define_macros:
c.define_macro(name=macro[0], value=macro[1])
c.compile([f.name], include_dirs=include_dirs)
success = True
except CompileError:
success = False
# undo the error stream redirect
os.dup2(oldstderr, sys.stderr.fileno())
devnull.close()
os.chdir(old)
rmtree(tmpdir)
return success
Here is a function which uses the above to check for the presence of a header.
def check_for_header(header, include_dirs, define_macros):
"""Check for the existence of a header file by creating a small program which includes it and see if it compiles."""
program = "#include <%s>\n" % header
sys.stdout.write("Checking for <%s>... " % header)
success = see_if_compiles(program, include_dirs, define_macros)
if (success):
sys.stdout.write("OK\n");
else:
sys.stdout.write("Not found\n");
return success
Zac's comment spurred me to look a bit more and I found that Mercurial's setup.py script has a working method for his approach. You can't just assign the stream because the change won't get inherited by the compiler process, but apparently Python has our good friend dup2() in the form of os.dup2(). That allows the same OS-level stream shenanigans that we all know and love, which do get inherited to child processes.
Mercurial's function redirects to /dev/null, but to keep Windows compatibility I just redirect to a file then delete it.
Quoth Mercurial:
# simplified version of distutils.ccompiler.CCompiler.has_function
# that actually removes its temporary files.
def hasfunction(cc, funcname):
tmpdir = tempfile.mkdtemp(prefix='hg-install-')
devnull = oldstderr = None
try:
try:
fname = os.path.join(tmpdir, 'funcname.c')
f = open(fname, 'w')
f.write('int main(void) {\n')
f.write(' %s();\n' % funcname)
f.write('}\n')
f.close()
# Redirect stderr to /dev/null to hide any error messages
# from the compiler.
# This will have to be changed if we ever have to check
# for a function on Windows.
devnull = open('/dev/null', 'w')
oldstderr = os.dup(sys.stderr.fileno())
os.dup2(devnull.fileno(), sys.stderr.fileno())
objects = cc.compile([fname], output_dir=tmpdir)
cc.link_executable(objects, os.path.join(tmpdir, "a.out"))
except:
return False
return True
finally:
if oldstderr is not None:
os.dup2(oldstderr, sys.stderr.fileno())
if devnull is not None:
devnull.close()
shutil.rmtree(tmpdir)
Here's a context manager that I recently wrote and found useful, because I was having the same problem with distutils.ccompiler.CCompiler.has_function while working on pymssql. I was going to use your approach (nice, thanks for sharing!) but then I thought that it could be done with less code and would be more general and flexible if I used a context manager. Here's what I came up with:
import contextlib
#contextlib.contextmanager
def stdchannel_redirected(stdchannel, dest_filename):
"""
A context manager to temporarily redirect stdout or stderr
e.g.:
with stdchannel_redirected(sys.stderr, os.devnull):
if compiler.has_function('clock_gettime', libraries=['rt']):
libraries.append('rt')
"""
try:
oldstdchannel = os.dup(stdchannel.fileno())
dest_file = open(dest_filename, 'w')
os.dup2(dest_file.fileno(), stdchannel.fileno())
yield
finally:
if oldstdchannel is not None:
os.dup2(oldstdchannel, stdchannel.fileno())
if dest_file is not None:
dest_file.close()
The context for why I created this is at this blog post. Pretty much the same as yours I think.
This uses code that I borrowed from you to do the redirection (e.g.: os.dup2, etc.), but I wrapped it in a context manager so it's more general and reusable.
I use it like this in a setup.py:
with stdchannel_redirected(sys.stderr, os.devnull):
if compiler.has_function('clock_gettime', libraries=['rt']):
libraries.append('rt')
#Adam I just want to point out that there is /dev/null equivalent on Windows. It's 'NUL' but good practice is to get it from os.devnull
I'm pretty new to programming and python, so disregard this if it's a stupid suggestion, but can't you just reroute the error messages to a text file instead of the screen/interactive window/whatever?
I'm pretty sure I read somewhere you can do something like
error = open('yourerrorlog.txt','w')
sys.stderr = error
Again, sorry I'm probably repeating something you already know, but if the problem is you WANT the errors when it's called by another function (automated) and no errors when it's ran manual, can't you just add a keyword argument like compile(arg1, arg2, manual=True ) and then under your "except:" you add
if manual == False: print errors to console/interactive window
else: print to error
Then when it's called by the program and not manually you just call it with compile(arg1,arg2, manual=False) so that it redirects to the file.
Does running in quiet mode help at all? setup.py -q build
Not a direct answer to your question, but related to your use case: there is a config command in distutils that’s designed to be subclassed and used to check for C features. It’s not documented yet, you have to read the source.
The Python module tempfile contains both NamedTemporaryFile and TemporaryFile. The documentation for the former says
Whether the name can be used to open the file a second time, while the named temporary file is still open, varies across platforms (it can be so used on Unix; it cannot on Windows NT or later)
What is the point of the file having a name if I can't use that name? If I want the useful (for me) behaviour of Unix on Windows, I've got to make a copy of the code and rip out all the bits that say if _os.name == 'nt' and the like.
What gives? Surely this is useful for something, since it was deliberately coded this way, but what is that something?
It states that accessing it a second time while it is still open. You can still use the name otherwise, just be sure to pass delete=False when creating the NamedTemporaryFile so that it persists after it is closed.
I use this:
import os, tempfile, gc
class TemporaryFile:
def __init__(self, name, io, delete):
self.name = name
self.__io = io
self.__delete = delete
def __getattr__(self, k):
return getattr(self.__io, k)
def __del__(self):
if self.__delete:
try:
os.unlink(self.name)
except FileNotFoundError:
pass
def NamedTemporaryFile(mode='w+b', bufsize=-1, suffix='', prefix='tmp', dir=None, delete=True):
if not dir:
dir = tempfile.gettempdir()
name = os.path.join(dir, prefix + os.urandom(32).hex() + suffix)
if mode is None:
return TemporaryFile(name, None, delete)
fh = open(name, "w+b", bufsize)
if mode != "w+b":
fh.close()
fh = open(name, mode)
return TemporaryFile(name, fh, delete)
def test_ntf_txt():
x = NamedTemporaryFile("w")
x.write("hello")
x.close()
assert os.path.exists(x.name)
with open(x.name) as f:
assert f.read() == "hello"
def test_ntf_name():
x = NamedTemporaryFile(suffix="s", prefix="p")
assert os.path.basename(x.name)[0] == 'p'
assert os.path.basename(x.name)[-1] == 's'
x.write(b"hello")
x.seek(0)
assert x.read() == b"hello"
def test_ntf_del():
x = NamedTemporaryFile(suffix="s", prefix="p")
assert os.path.exists(x.name)
name = x.name
del x
gc.collect()
assert not os.path.exists(name)
def test_ntf_mode_none():
x = NamedTemporaryFile(suffix="s", prefix="p", mode=None)
assert not os.path.exists(x.name)
name = x.name
f = open(name, "w")
f.close()
assert os.path.exists(name)
del x
gc.collect()
assert not os.path.exists(name)
works on all platforms, you can close it, open it up again, etc.
The feature mode=None is what you want, you can ask for a tempfile, specify mode=None, which gives you a UUID styled temp name with the dir/suffix/prefix that you want. The link references tests that show usage.
It's basically the same as NamedTemporaryFile, except the file will get auto-deleted when the object returned is garbage collected... not on close.
You don't want to "rip out all the bits...". It's coded like that for a reason. It says you can't open it a SECOND time while it's still open. Don't. Just use it once, and throw it away (after all, it is a temporary file). If you want a permanent file, create your own.
"Surely this is useful for something, since it was deliberately coded this way, but what is that something". Well, I've used it to write emails to (in a binary format) before copying them to a location where our Exchange Server picks them up & sends them. I'm sure there are lots of other use cases.
I'm pretty sure the Python library writers didn't just decide to make NamedTemporaryFile behave differently on Windows for laughs. All those _os.name == 'nt' tests will be there because of platform differences between Windows and Unix. So my inference from that documentation is that on Windows a file opened the way NamedTemporaryFile opens it cannot be opened again while NamedTemporaryFile still has it open, and that this is due to the way Windows works.
I have a log file being written by another process which I want to watch for changes. Each time a change occurs I'd like to read the new data in to do some processing on it.
What's the best way to do this? I was hoping there'd be some sort of hook from the PyWin32 library. I've found the win32file.FindNextChangeNotification function but have no idea how to ask it to watch a specific file.
If anyone's done anything like this I'd be really grateful to hear how...
[Edit] I should have mentioned that I was after a solution that doesn't require polling.
[Edit] Curses! It seems this doesn't work over a mapped network drive. I'm guessing windows doesn't 'hear' any updates to the file the way it does on a local disk.
Did you try using Watchdog?
Python API library and shell utilities to monitor file system events.
Directory monitoring made easy with
A cross-platform API.
A shell tool to run commands in response to directory changes.
Get started quickly with a simple example in Quickstart...
If polling is good enough for you, I'd just watch if the "modified time" file stat changes. To read it:
os.stat(filename).st_mtime
(Also note that the Windows native change event solution does not work in all circumstances, e.g. on network drives.)
import os
class Monkey(object):
def __init__(self):
self._cached_stamp = 0
self.filename = '/path/to/file'
def ook(self):
stamp = os.stat(self.filename).st_mtime
if stamp != self._cached_stamp:
self._cached_stamp = stamp
# File has changed, so do something...
If you want a multiplatform solution, then check QFileSystemWatcher.
Here an example code (not sanitized):
from PyQt4 import QtCore
#QtCore.pyqtSlot(str)
def directory_changed(path):
print('Directory Changed!!!')
#QtCore.pyqtSlot(str)
def file_changed(path):
print('File Changed!!!')
fs_watcher = QtCore.QFileSystemWatcher(['/path/to/files_1', '/path/to/files_2', '/path/to/files_3'])
fs_watcher.connect(fs_watcher, QtCore.SIGNAL('directoryChanged(QString)'), directory_changed)
fs_watcher.connect(fs_watcher, QtCore.SIGNAL('fileChanged(QString)'), file_changed)
It should not work on windows (maybe with cygwin ?), but for unix user, you should use the "fcntl" system call. Here is an example in Python. It's mostly the same code if you need to write it in C (same function names)
import time
import fcntl
import os
import signal
FNAME = "/HOME/TOTO/FILETOWATCH"
def handler(signum, frame):
print "File %s modified" % (FNAME,)
signal.signal(signal.SIGIO, handler)
fd = os.open(FNAME, os.O_RDONLY)
fcntl.fcntl(fd, fcntl.F_SETSIG, 0)
fcntl.fcntl(fd, fcntl.F_NOTIFY,
fcntl.DN_MODIFY | fcntl.DN_CREATE | fcntl.DN_MULTISHOT)
while True:
time.sleep(10000)
Check out pyinotify.
inotify replaces dnotify (from an earlier answer) in newer linuxes and allows file-level rather than directory-level monitoring.
For watching a single file with polling, and minimal dependencies, here is a fully fleshed-out example, based on answer from Deestan (above):
import os
import sys
import time
class Watcher(object):
running = True
refresh_delay_secs = 1
# Constructor
def __init__(self, watch_file, call_func_on_change=None, *args, **kwargs):
self._cached_stamp = 0
self.filename = watch_file
self.call_func_on_change = call_func_on_change
self.args = args
self.kwargs = kwargs
# Look for changes
def look(self):
stamp = os.stat(self.filename).st_mtime
if stamp != self._cached_stamp:
self._cached_stamp = stamp
# File has changed, so do something...
print('File changed')
if self.call_func_on_change is not None:
self.call_func_on_change(*self.args, **self.kwargs)
# Keep watching in a loop
def watch(self):
while self.running:
try:
# Look for changes
time.sleep(self.refresh_delay_secs)
self.look()
except KeyboardInterrupt:
print('\nDone')
break
except FileNotFoundError:
# Action on file not found
pass
except:
print('Unhandled error: %s' % sys.exc_info()[0])
# Call this function each time a change happens
def custom_action(text):
print(text)
watch_file = 'my_file.txt'
# watcher = Watcher(watch_file) # simple
watcher = Watcher(watch_file, custom_action, text='yes, changed') # also call custom action function
watcher.watch() # start the watch going
Well after a bit of hacking of Tim Golden's script, I have the following which seems to work quite well:
import os
import win32file
import win32con
path_to_watch = "." # look at the current directory
file_to_watch = "test.txt" # look for changes to a file called test.txt
def ProcessNewData( newData ):
print "Text added: %s"%newData
# Set up the bits we'll need for output
ACTIONS = {
1 : "Created",
2 : "Deleted",
3 : "Updated",
4 : "Renamed from something",
5 : "Renamed to something"
}
FILE_LIST_DIRECTORY = 0x0001
hDir = win32file.CreateFile (
path_to_watch,
FILE_LIST_DIRECTORY,
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE,
None,
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_BACKUP_SEMANTICS,
None
)
# Open the file we're interested in
a = open(file_to_watch, "r")
# Throw away any exising log data
a.read()
# Wait for new data and call ProcessNewData for each new chunk that's written
while 1:
# Wait for a change to occur
results = win32file.ReadDirectoryChangesW (
hDir,
1024,
False,
win32con.FILE_NOTIFY_CHANGE_LAST_WRITE,
None,
None
)
# For each change, check to see if it's updating the file we're interested in
for action, file in results:
full_filename = os.path.join (path_to_watch, file)
#print file, ACTIONS.get (action, "Unknown")
if file == file_to_watch:
newText = a.read()
if newText != "":
ProcessNewData( newText )
It could probably do with a load more error checking, but for simply watching a log file and doing some processing on it before spitting it out to the screen, this works well.
Thanks everyone for your input - great stuff!
Check my answer to a similar question. You could try the same loop in Python. This page suggests:
import time
while 1:
where = file.tell()
line = file.readline()
if not line:
time.sleep(1)
file.seek(where)
else:
print line, # already has newline
Also see the question tail() a file with Python.
This is another modification of Tim Goldan's script that runs on unix types and adds a simple watcher for file modification by using a dict (file=>time).
usage: whateverName.py path_to_dir_to_watch
#!/usr/bin/env python
import os, sys, time
def files_to_timestamp(path):
files = [os.path.join(path, f) for f in os.listdir(path)]
return dict ([(f, os.path.getmtime(f)) for f in files])
if __name__ == "__main__":
path_to_watch = sys.argv[1]
print('Watching {}..'.format(path_to_watch))
before = files_to_timestamp(path_to_watch)
while 1:
time.sleep (2)
after = files_to_timestamp(path_to_watch)
added = [f for f in after.keys() if not f in before.keys()]
removed = [f for f in before.keys() if not f in after.keys()]
modified = []
for f in before.keys():
if not f in removed:
if os.path.getmtime(f) != before.get(f):
modified.append(f)
if added: print('Added: {}'.format(', '.join(added)))
if removed: print('Removed: {}'.format(', '.join(removed)))
if modified: print('Modified: {}'.format(', '.join(modified)))
before = after
Here is a simplified version of Kender's code that appears to do the same trick and does not import the entire file:
# Check file for new data.
import time
f = open(r'c:\temp\test.txt', 'r')
while True:
line = f.readline()
if not line:
time.sleep(1)
print 'Nothing New'
else:
print 'Call Function: ', line
Well, since you are using Python, you can just open a file and keep reading lines from it.
f = open('file.log')
If the line read is not empty, you process it.
line = f.readline()
if line:
// Do what you want with the line
You may be missing that it is ok to keep calling readline at the EOF. It will just keep returning an empty string in this case. And when something is appended to the log file, the reading will continue from where it stopped, as you need.
If you are looking for a solution that uses events, or a particular library, please specify this in your question. Otherwise, I think this solution is just fine.
Simplest solution for me is using watchdog's tool watchmedo
From https://pypi.python.org/pypi/watchdog I now have a process that looks up the sql files in a directory and executes them if necessary.
watchmedo shell-command \
--patterns="*.sql" \
--recursive \
--command='~/Desktop/load_files_into_mysql_database.sh' \
.
As you can see in Tim Golden's article, pointed by Horst Gutmann, WIN32 is relatively complex and watches directories, not a single file.
I'd like to suggest you look into IronPython, which is a .NET python implementation.
With IronPython you can use all the .NET functionality - including
System.IO.FileSystemWatcher
Which handles single files with a simple Event interface.
This is an example of checking a file for changes. One that may not be the best way of doing it, but it sure is a short way.
Handy tool for restarting application when changes have been made to the source. I made this when playing with pygame so I can see effects take place immediately after file save.
When used in pygame make sure the stuff in the 'while' loop is placed in your game loop aka update or whatever. Otherwise your application will get stuck in an infinite loop and you will not see your game updating.
file_size_stored = os.stat('neuron.py').st_size
while True:
try:
file_size_current = os.stat('neuron.py').st_size
if file_size_stored != file_size_current:
restart_program()
except:
pass
In case you wanted the restart code which I found on the web. Here it is. (Not relevant to the question, though it could come in handy)
def restart_program(): #restart application
python = sys.executable
os.execl(python, python, * sys.argv)
Have fun making electrons do what you want them to do.
Seems that no one has posted fswatch. It is a cross-platform file system watcher. Just install it, run it and follow the prompts.
I've used it with python and golang programs and it just works.
ACTIONS = {
1 : "Created",
2 : "Deleted",
3 : "Updated",
4 : "Renamed from something",
5 : "Renamed to something"
}
FILE_LIST_DIRECTORY = 0x0001
class myThread (threading.Thread):
def __init__(self, threadID, fileName, directory, origin):
threading.Thread.__init__(self)
self.threadID = threadID
self.fileName = fileName
self.daemon = True
self.dir = directory
self.originalFile = origin
def run(self):
startMonitor(self.fileName, self.dir, self.originalFile)
def startMonitor(fileMonitoring,dirPath,originalFile):
hDir = win32file.CreateFile (
dirPath,
FILE_LIST_DIRECTORY,
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE,
None,
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_BACKUP_SEMANTICS,
None
)
# Wait for new data and call ProcessNewData for each new chunk that's
# written
while 1:
# Wait for a change to occur
results = win32file.ReadDirectoryChangesW (
hDir,
1024,
False,
win32con.FILE_NOTIFY_CHANGE_LAST_WRITE,
None,
None
)
# For each change, check to see if it's updating the file we're
# interested in
for action, file_M in results:
full_filename = os.path.join (dirPath, file_M)
#print file, ACTIONS.get (action, "Unknown")
if len(full_filename) == len(fileMonitoring) and action == 3:
#copy to main file
...
Since I have it installed globally, my favorite approach is to use nodemon. If your source code is in src, and your entry point is src/app.py, then it's as easy as:
nodemon -w 'src/**' -e py,html --exec python src/app.py
... where -e py,html lets you control what file types to watch for changes.
Here's an example geared toward watching input files that write no more than one line per second but usually a lot less. The goal is to append the last line (most recent write) to the specified output file. I've copied this from one of my projects and just deleted all the irrelevant lines. You'll have to fill in or change the missing symbols.
from PyQt5.QtCore import QFileSystemWatcher, QSettings, QThread
from ui_main_window import Ui_MainWindow # Qt Creator gen'd
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
Ui_MainWindow.__init__(self)
self._fileWatcher = QFileSystemWatcher()
self._fileWatcher.fileChanged.connect(self.fileChanged)
def fileChanged(self, filepath):
QThread.msleep(300) # Reqd on some machines, give chance for write to complete
# ^^ About to test this, may need more sophisticated solution
with open(filepath) as file:
lastLine = list(file)[-1]
destPath = self._filemap[filepath]['dest file']
with open(destPath, 'a') as out_file: # a= append
out_file.writelines([lastLine])
Of course, the encompassing QMainWindow class is not strictly required, ie. you can use QFileSystemWatcher alone.
Just to put this out there since no one mentioned it: there's a Python module in the Standard Library named filecmp which has this cmp() function that compares two files.
Just make sure you don't do from filecmp import cmp to not overshadow the built-in cmp() function in Python 2.x. That's okay in Python 3.x, though, since there's no such built-in cmp() function anymore.
Anyway, this is how its use looks like:
import filecmp
filecmp.cmp(path_to_file_1, path_to_file_2, shallow=True)
The argument shallow defaults to True. If the argument's value is True, then only the metadata of the files are compared; however, if the argument's value is False, then the contents of the files are compared.
Maybe this information will be useful to someone.
watchfiles (https://github.com/samuelcolvin/watchfiles) is a Python API and CLI that uses the Notify (https://github.com/notify-rs/notify) library written in Rust.
The rust implementation currently (2022-10-09) supports:
Linux / Android: inotify
macOS: FSEvents or kqueue, see features
Windows: ReadDirectoryChangesW
FreeBSD / NetBSD / OpenBSD / DragonflyBSD: kqueue
All platforms: polling
Binaries available on PyPI (https://pypi.org/project/watchfiles/) and conda-forge (https://github.com/conda-forge/watchfiles-feedstock).
You can also use a simple library called repyt, here is an example:
repyt ./app.py
related #4Oh4 solution a smooth change for a list of files to watch;
import os
import sys
import time
class Watcher(object):
running = True
refresh_delay_secs = 1
# Constructor
def __init__(self, watch_files, call_func_on_change=None, *args, **kwargs):
self._cached_stamp = 0
self._cached_stamp_files = {}
self.filenames = watch_files
self.call_func_on_change = call_func_on_change
self.args = args
self.kwargs = kwargs
# Look for changes
def look(self):
for file in self.filenames:
stamp = os.stat(file).st_mtime
if not file in self._cached_stamp_files:
self._cached_stamp_files[file] = 0
if stamp != self._cached_stamp_files[file]:
self._cached_stamp_files[file] = stamp
# File has changed, so do something...
file_to_read = open(file, 'r')
value = file_to_read.read()
print("value from file", value)
file_to_read.seek(0)
if self.call_func_on_change is not None:
self.call_func_on_change(*self.args, **self.kwargs)
# Keep watching in a loop
def watch(self):
while self.running:
try:
# Look for changes
time.sleep(self.refresh_delay_secs)
self.look()
except KeyboardInterrupt:
print('\nDone')
break
except FileNotFoundError:
# Action on file not found
pass
except Exception as e:
print(e)
print('Unhandled error: %s' % sys.exc_info()[0])
# Call this function each time a change happens
def custom_action(text):
print(text)
# pass
watch_files = ['/Users/mexekanez/my_file.txt', '/Users/mexekanez/my_file1.txt']
# watcher = Watcher(watch_file) # simple
if __name__ == "__main__":
watcher = Watcher(watch_files, custom_action, text='yes, changed') # also call custom action function
watcher.watch() # start the watch going
The best and simplest solution is to use pygtail:
https://pypi.python.org/pypi/pygtail
from pygtail import Pygtail
import sys
while True:
for line in Pygtail("some.log"):
sys.stdout.write(line)
import inotify.adapters
from datetime import datetime
LOG_FILE='/var/log/mysql/server_audit.log'
def main():
start_time = datetime.now()
while True:
i = inotify.adapters.Inotify()
i.add_watch(LOG_FILE)
for event in i.event_gen(yield_nones=False):
break
del i
with open(LOG_FILE, 'r') as f:
for line in f:
entry = line.split(',')
entry_time = datetime.strptime(entry[0],
'%Y%m%d %H:%M:%S')
if entry_time > start_time:
start_time = entry_time
print(entry)
if __name__ == '__main__':
main()
The easiest solution would get the two instances of the same file after an interval and Compare them. You Could try something like this
while True:
# Capturing the two instances models.py after certain interval of time
print("Looking for changes in " + app_name.capitalize() + " models.py\nPress 'CTRL + C' to stop the program")
with open(app_name.capitalize() + '/filename', 'r+') as app_models_file:
filename_content = app_models_file.read()
time.sleep(5)
with open(app_name.capitalize() + '/filename', 'r+') as app_models_file_1:
filename_content_1 = app_models_file_1.read()
# Comparing models.py after certain interval of time
if filename_content == filename_content_1:
pass
else:
print("You made a change in " + app_name.capitalize() + " filename.\n")
cmd = str(input("Do something with the file?(y/n):"))
if cmd == 'y':
# Do Something
elif cmd == 'n':
# pass or do something
else:
print("Invalid input")
If you're using windows, create this POLL.CMD file
#echo off
:top
xcopy /m /y %1 %2 | find /v "File(s) copied"
timeout /T 1 > nul
goto :top
then you can type "poll dir1 dir2" and it will copy all the files from dir1 to dir2 and check for updates once per second.
The "find" is optional, just to make the console less noisy.
This is not recursive. Maybe you could make it recursive using /e on the xcopy.
I don't know any Windows specific function. You could try getting the MD5 hash of the file every second/minute/hour (depends on how fast you need it) and compare it to the last hash. When it differs you know the file has been changed and you read out the newest lines.
I'd try something like this.
try:
f = open(filePath)
except IOError:
print "No such file: %s" % filePath
raw_input("Press Enter to close window")
try:
lines = f.readlines()
while True:
line = f.readline()
try:
if not line:
time.sleep(1)
else:
functionThatAnalisesTheLine(line)
except Exception, e:
# handle the exception somehow (for example, log the trace) and raise the same exception again
raw_input("Press Enter to close window")
raise e
finally:
f.close()
The loop checks if there is a new line(s) since last time file was read - if there is, it's read and passed to the functionThatAnalisesTheLine function. If not, script waits 1 second and retries the process.