I'm trying to identify a good way to watch for the appearance of a file using Python's asyncio library. This is what I've come up with so far:
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Watches for the appearance of a file."""
import argparse
import asyncio
import os.path
#asyncio.coroutine
def watch_for_file(file_path, interval=1):
while True:
if not os.path.exists(file_path):
print("{} not found yet.".format(file_path))
yield from asyncio.sleep(interval)
else:
print("{} found!".format(file_path))
break
def make_cli_parser():
cli_parser = argparse.ArgumentParser(description=__doc__)
cli_parser.add_argument('file_path')
return cli_parser
def main(argv=None):
cli_parser = make_cli_parser()
args = cli_parser.parse_args(argv)
loop = asyncio.get_event_loop()
loop.run_until_complete(watch_for_file(args.file_path))
if __name__ == '__main__':
main()
I saved this as watch_for_file.py, and can run it with
python3 watch_for_file.py testfile
In another shell session, I issue
touch testfile
to end the loop.
Is there a more elegant solution than using this infinite loop and yield from asyncio.sleep()?
Well, there are nicer, platform-specific ways of being notified when a file is created. Gerrat linked to one for Windows in his comment, and pyinotify can be used for Linux. Those platform-specific approaches can probably be plugged into asyncio, but you'd end up writing a whole bunch of code to make it work in a platform independent way, which probably isn't worth the effort to just check for the appearance of a single file. If you need more sophisticated filesystem watching in addition to this, it is might be worth pursuing, though. It looks like pyinotify can be tweaked to add a subclass of its Notifier class that plugins into the asyncio event loop (there are already classes for tornado and asyncore), for example.
For your simple use-case, I think your infinite loop approach to polling is fine, but you could also just schedule callbacks with the event loop, if you wanted:
def watch_for_file(file_path, interval=1, loop=None):
if not loop: loop = asyncio.get_event_loop()
if not os.path.exists(file_path):
print("{} not found yet.".format(file_path))
loop.call_later(interval, watch_for_file, file_path, interval, loop)
else:
print("{} found!".format(file_path))
loop.stop()
def main(argv=None):
cli_parser = make_cli_parser()
args = cli_parser.parse_args(argv)
loop = asyncio.get_event_loop()
loop.call_soon(watch_for_file, args.file_path)
loop.run_forever()
I'm not sure this is much more elegant than the infinite loop, though.
Edit:
Just for fun, I wrote a solution using pyinotify:
import pyinotify
import asyncio
import argparse
import os.path
class AsyncioNotifier(pyinotify.Notifier):
"""
Notifier subclass that plugs into the asyncio event loop.
"""
def __init__(self, watch_manager, loop, callback=None,
default_proc_fun=None, read_freq=0, threshold=0, timeout=None):
self.loop = loop
self.handle_read_callback = callback
pyinotify.Notifier.__init__(self, watch_manager, default_proc_fun, read_freq,
threshold, timeout)
loop.add_reader(self._fd, self.handle_read)
def handle_read(self, *args, **kwargs):
self.read_events()
self.process_events()
if self.handle_read_callback is not None:
self.handle_read_callback(self)
class EventHandler(pyinotify.ProcessEvent):
def my_init(self, file=None, loop=None):
if not file:
raise ValueError("file keyword argument must be provided")
self.loop = loop if loop else asyncio.get_event_loop()
self.filename = file
def process_IN_CREATE(self, event):
print("Creating:", event.pathname)
if os.path.basename(event.pathname) == self.filename:
print("Found it!")
self.loop.stop()
def make_cli_parser():
cli_parser = argparse.ArgumentParser(description=__doc__)
cli_parser.add_argument('file_path')
return cli_parser
def main(argv=None):
cli_parser = make_cli_parser()
args = cli_parser.parse_args(argv)
loop = asyncio.get_event_loop()
# set up pyinotify stuff
wm = pyinotify.WatchManager()
mask = pyinotify.IN_CREATE # watched events
dir_, filename = os.path.split(args.file_path)
if not dir_:
dir_ = "."
wm.add_watch(dir_, mask)
handler = EventHandler(file=filename, loop=loop)
notifier = pyinotify.AsyncioNotifier(wm, loop, default_proc_fun=handler)
loop.run_forever()
if __name__ == '__main__':
main()
Butter https://pypi.python.org/pypi/butter has support for asyncio out of the box, BTW.
import asyncio
from butter.inotify import IN_ALL_EVENTS
from butter.asyncio.inotify import Inotify_async
#asyncio.coroutine
def watcher(loop):
inotify = Inotify_async(loop=loop)
print(inotify)
wd = inotify.watch('/tmp', IN_ALL_EVENTS)
for i in range(5):
event = yield from inotify.get_event()
print(event)
inotify.ignore(wd)
print('done')
event = yield from inotify.get_event()
print(event)
inotify.close()
print(inotify)
loop = asyncio.get_event_loop()
task = loop.create_task(watcher(loop))
loop.run_until_complete(task)
Butter is really cool. Another alternative is minotaur which is similar, but only implements inotify
async def main():
with Inotify(blocking=False) as n:
n.add_watch('.', Mask.CREATE | Mask.DELETE | Mask.MOVE)
async for evt in n:
print(evt)
Related
I am making a discord bot that will grab a json using requests from time to time, and then send the relevant information to a specific channel.
I have the following classes:
Helper, which is the discord bot itself, that runs async from the start, inside an asyncio.gather;
tasker that controls the interval which calls the class that will do the requests. It runs in a different thread so it doesn't stop the async Helper while it waits
getInfo that does the requests, store the info and should talk with Helper
I am having 2 problems right now:
While the tasker is on a different thread, every time I try to talk with Helper via getInfo it gives me the errors RuntimeError: no running event loop and RuntimeWarning: coroutine 'getInfo.discordmsg' was never awaited
If I dont run it on a different thread, however, it does work on the TestStatus: 1 but it makes Helper get stuck and stop running with TestStatus: 2
Anyway, here is the code
import requests
import asyncio
import discord
from discord.ext import commands, tasks
from datetime import datetime, timedelta
import threading
class Helper(discord.Client):
async def on_ready(self):
global discordbot, taskervar
servername = 'ServerName'
discordbot = self
self.servidores = dict()
self.canais = dict()
for i in range(len(self.guilds)):
self.servidores[self.guilds[i].name] = {}
self.servidores[self.guilds[i].name]['guild']=self.guilds[i]
servidor = self.guilds[i]
for k in range(len(servidor.channels)):
canal = servidor.channels[k]
self.canais[str(canal.name)] = canal
if 'bottalk' not in self.canais.keys():
newchan = await self.servidores[self.guilds[i].name]['guild'].create_text_channel('bottalk')
self.canais[str(newchan.name)] = newchan
self.servidores[self.guilds[i].name]['canais'] = self.canais
self.bottalk = self.get_channel(self.servidores[servername]['canais']['bottalk'].id)
await self.msg("Bot online: " + converteHora(datetime.now(),True))
print(f'{self.user} has connected to Discord!')
taskervar.startprocess()
async def msg(self, msg):
await self.bottalk.send(msg)
async def on_message(self, message):
if message.author == self.user:
return
else:
print(message)
class tasker:
def __init__(self):
global discordbot, taskervar
print('Tasker start')
taskervar = self
self.waiter = threading.Event()
self.lastupdate = datetime.now()
self.nextupdate = datetime.now()
self.thread = threading.Thread(target=self.requests)
def startprocess(self):
if not self.thread.is_alive():
self.waiter = threading.Event()
self.interval = 60*5
self.thread = threading.Thread(target=self.requests)
self.thread.start()
def requests(self):
while not self.waiter.is_set():
getInfo()
self.lastupdate = datetime.now()
self.nextupdate = datetime.now()+timedelta(seconds=self.interval)
self.waiter.wait(self.interval)
def stopprocess(self):
self.waiter.set()
class getInfo:
def __init__(self):
global discordbot, taskervar
self.requests()
async def discordmsg(self,msg):
await discordbot.msg(msg)
def requests(self):
jsondata = {"TestStatus": 1}
if jsondata['TestStatus'] == 1:
print('here')
asyncio.create_task(self.discordmsg("SOMETHING WENT WRONG"))
taskervar.stopprocess()
return
elif jsondata['TestStatus'] == 2:
print('test')
hora = converteHora(datetime.now(),True)
asyncio.create_task(self.discordmsg(str("Everything is fine but not now: " + hora )))
print('test2')
def converteHora(dateUTC, current=False):
if current:
response = (dateUTC.strftime("%d/%m/%Y, %H:%M:%S"))
else:
response = (dateutil.parser.isoparse(dateUTC)-timedelta(hours=3)).strftime("%d/%m/%Y, %H:%M:%S")
return response
async def main():
TOKEN = 'TOKEN GOES HERE'
tasker()
await asyncio.gather(
await Helper().start(TOKEN)
)
if __name__ == '__main__':
asyncio.run(main())
Your primary problem is you don't give your secondary thread access to the asyncio event loop. You can't just await and/or create_task a coroutine on a global object (One of many reasons to avoid using global objects in the first place). Here is how you could modify your code to accomplish that:
class tasker:
def __init__(self):
# ...
self.loop = asyncio.get_running_loop()
# ...
class getInfo:
#...
def requests(self):
# replace the create_tasks calls with this.
asyncio.run_coroutine_threadsafe(self.discordmsg, taskervar.loop)
This uses your global variables because I don't want to rewrite your entire program, but I still strongly recommend avoiding them and considering a re-write yourself.
All that being said, I suspect you will still have this bug:
If I dont run it on a different thread, however, it does work on the TestStatus: 1 but it makes Helper get stuck and stop running with TestStatus: 2
I can't tell what would cause this issue and I'm running into trouble reproducing this on my machine. Your code is pretty hard to read and is missing some details for reproducibility. I would imagine that is part of the reason why you didn't get an answer in the first place. I'm sure you're aware of this article but might be worth a re-visit for better practices in sharing code. https://stackoverflow.com/help/minimal-reproducible-example
Hey I'm learning psutil package and I want to know how to display current CPU usage when function is in progress? I suppose I need some threading or something like this, but how to do it? Thank u for any answers.
import psutil
import random
def iHateThis():
tab = []
for i in range(100000):
tab.append(random.randint(1, 10000))
tab.sort()
return tab;
while(True):
currentProcess = psutil.Process()
print(currentProcess.cpu_percent(interval=1))
You can use threading to run iHateThis or to run function with cpu_percent(). I choose second version. I will run cpu_percent() in thread.
Because it uses while True so thread would run forever and there wouldn't be nice method to stop thread so I use global variaable running with while running to have method to stop this loop.
import threading
import psutil
def display_cpu():
global running
running = True
currentProcess = psutil.Process()
# start loop
while running:
print(currentProcess.cpu_percent(interval=1))
def start():
global t
# create thread and start it
t = threading.Thread(target=display_cpu)
t.start()
def stop():
global running
global t
# use `running` to stop loop in thread so thread will end
running = False
# wait for thread's end
t.join()
and now I can use it to start and stop thread which will display CPU. Because I may have to stop process using Ctrl+C so it will raise error so I use try/finally to stop thread even if there will be error.
def i_hate_this():
tab = []
for i in range(1000000):
tab.append(random.randint(1, 10000))
tab.sort()
return tab
# ---
start()
try:
result = i_hate_this()
finally: # stop thread even if I press Ctrl+C
stop()
Full code:
import random
import threading
import psutil
def display_cpu():
global running
running = True
currentProcess = psutil.Process()
# start loop
while running:
print(currentProcess.cpu_percent(interval=1))
def start():
global t
# create thread and start it
t = threading.Thread(target=display_cpu)
t.start()
def stop():
global running
global t
# use `running` to stop loop in thread so thread will end
running = False
# wait for thread's end
t.join()
# ---
def i_hate_this():
tab = []
for i in range(1000000):
tab.append(random.randint(1, 10000))
tab.sort()
return tab
# ---
start()
try:
result = i_hate_this()
finally: # stop thread even if I press Ctrl+C
stop()
BTW: this can be converted to class which inherits from class Thread and then it can hide variable running in class.
import psutil
import random
import threading
class DisplayCPU(threading.Thread):
def run(self):
self.running = True
currentProcess = psutil.Process()
while self.running:
print(currentProcess.cpu_percent(interval=1))
def stop(self):
self.running = False
# ----
def i_hate_this():
tab = []
for i in range(1000000):
tab.append(random.randint(1, 10000))
tab.sort()
return tab
# ---
display_cpu = DisplayCPU()
display_cpu.start()
try:
result = i_hate_this()
finally: # stop thread even when I press Ctrl+C
display_cpu.stop()
It could be also converted to context manager to run it as
with display_cpu():
i_hate_this()
but I skip this part.
You can do this with the multiprocessing library. multiprocessing.Process is a class that represents a threaded process, is initiated with a function and name, and can be run at any time with .start().
import multiprocessing
import psutil
import random
def iHateThis():
tab = []
for i in range(100000):
tab.append(random.randint(1, 10000))
tab.sort()
return tab;
hate = multiprocessing.Process(name='hate', target=iHateThis)
hate.start()
while(True):
currentProcess = psutil.Process()
print(currentProcess.cpu_percent(interval=1))
I don't think you need to use psutil Process class as I think it is intended to be used to monitor a specific process. Using the code snippet from #furas (the accepted answer), you can do it with a thread like this:
def run(self):
self.run = True
while self.run:
psutil.cpu_percent(interval=1)
it works the same as the accepted answer in the following case:
_monitor.start()
try:
for i in range(50):
time.sleep(0.2)
finally:
_monitor.stop()
If you don't want to code it, I am doing it in a public repo if it can be of any help for someone: https://github.com/GTimothee/monitor
Let approach the problem differently and propose a decorator that can serve to measure CPU utilization while running
from functools import partial, wraps
def log_cpu_usage(func=None, msg_prefix: str = None):
"""
This function is a decorator that measures the execution time of a function and logs it.
"""
debug = True
if not debug:
return func
if func is None:
return partial(log_cpu_usage, msg_prefix=msg_prefix)
def new_func(data: mp.Queue, *args, **kwargs):
result = func(*args, **kwargs)
data.put(result)
#wraps(func)
def trace_execution(*args, **kwargs):
manager = mp.Queue() # to save return val between multi process
worker_process = mp.Process(target=new_func, args=(manager, *args), kwargs=kwargs)
worker_process.start()
p = psutil.Process(worker_process.pid)
cpu_percents = []
while worker_process.is_alive(): # while the subprocess is running
cpu_percents.append(p.cpu_percent() / psutil.cpu_count())
time.sleep(0.01)
worker_process.join()
ret_values = manager.get()
return sum(cpu_percents) / len(cpu_percents), ret_values
#log_cpu_usage
def iHateThis():
pass
Hey I'm learning psutil package and I want to know how to display current CPU usage when function is in progress? I suppose I need some threading or something like this, but how to do it? Thank u for any answers.
import psutil
import random
def iHateThis():
tab = []
for i in range(100000):
tab.append(random.randint(1, 10000))
tab.sort()
return tab;
while(True):
currentProcess = psutil.Process()
print(currentProcess.cpu_percent(interval=1))
You can use threading to run iHateThis or to run function with cpu_percent(). I choose second version. I will run cpu_percent() in thread.
Because it uses while True so thread would run forever and there wouldn't be nice method to stop thread so I use global variaable running with while running to have method to stop this loop.
import threading
import psutil
def display_cpu():
global running
running = True
currentProcess = psutil.Process()
# start loop
while running:
print(currentProcess.cpu_percent(interval=1))
def start():
global t
# create thread and start it
t = threading.Thread(target=display_cpu)
t.start()
def stop():
global running
global t
# use `running` to stop loop in thread so thread will end
running = False
# wait for thread's end
t.join()
and now I can use it to start and stop thread which will display CPU. Because I may have to stop process using Ctrl+C so it will raise error so I use try/finally to stop thread even if there will be error.
def i_hate_this():
tab = []
for i in range(1000000):
tab.append(random.randint(1, 10000))
tab.sort()
return tab
# ---
start()
try:
result = i_hate_this()
finally: # stop thread even if I press Ctrl+C
stop()
Full code:
import random
import threading
import psutil
def display_cpu():
global running
running = True
currentProcess = psutil.Process()
# start loop
while running:
print(currentProcess.cpu_percent(interval=1))
def start():
global t
# create thread and start it
t = threading.Thread(target=display_cpu)
t.start()
def stop():
global running
global t
# use `running` to stop loop in thread so thread will end
running = False
# wait for thread's end
t.join()
# ---
def i_hate_this():
tab = []
for i in range(1000000):
tab.append(random.randint(1, 10000))
tab.sort()
return tab
# ---
start()
try:
result = i_hate_this()
finally: # stop thread even if I press Ctrl+C
stop()
BTW: this can be converted to class which inherits from class Thread and then it can hide variable running in class.
import psutil
import random
import threading
class DisplayCPU(threading.Thread):
def run(self):
self.running = True
currentProcess = psutil.Process()
while self.running:
print(currentProcess.cpu_percent(interval=1))
def stop(self):
self.running = False
# ----
def i_hate_this():
tab = []
for i in range(1000000):
tab.append(random.randint(1, 10000))
tab.sort()
return tab
# ---
display_cpu = DisplayCPU()
display_cpu.start()
try:
result = i_hate_this()
finally: # stop thread even when I press Ctrl+C
display_cpu.stop()
It could be also converted to context manager to run it as
with display_cpu():
i_hate_this()
but I skip this part.
You can do this with the multiprocessing library. multiprocessing.Process is a class that represents a threaded process, is initiated with a function and name, and can be run at any time with .start().
import multiprocessing
import psutil
import random
def iHateThis():
tab = []
for i in range(100000):
tab.append(random.randint(1, 10000))
tab.sort()
return tab;
hate = multiprocessing.Process(name='hate', target=iHateThis)
hate.start()
while(True):
currentProcess = psutil.Process()
print(currentProcess.cpu_percent(interval=1))
I don't think you need to use psutil Process class as I think it is intended to be used to monitor a specific process. Using the code snippet from #furas (the accepted answer), you can do it with a thread like this:
def run(self):
self.run = True
while self.run:
psutil.cpu_percent(interval=1)
it works the same as the accepted answer in the following case:
_monitor.start()
try:
for i in range(50):
time.sleep(0.2)
finally:
_monitor.stop()
If you don't want to code it, I am doing it in a public repo if it can be of any help for someone: https://github.com/GTimothee/monitor
Let approach the problem differently and propose a decorator that can serve to measure CPU utilization while running
from functools import partial, wraps
def log_cpu_usage(func=None, msg_prefix: str = None):
"""
This function is a decorator that measures the execution time of a function and logs it.
"""
debug = True
if not debug:
return func
if func is None:
return partial(log_cpu_usage, msg_prefix=msg_prefix)
def new_func(data: mp.Queue, *args, **kwargs):
result = func(*args, **kwargs)
data.put(result)
#wraps(func)
def trace_execution(*args, **kwargs):
manager = mp.Queue() # to save return val between multi process
worker_process = mp.Process(target=new_func, args=(manager, *args), kwargs=kwargs)
worker_process.start()
p = psutil.Process(worker_process.pid)
cpu_percents = []
while worker_process.is_alive(): # while the subprocess is running
cpu_percents.append(p.cpu_percent() / psutil.cpu_count())
time.sleep(0.01)
worker_process.join()
ret_values = manager.get()
return sum(cpu_percents) / len(cpu_percents), ret_values
#log_cpu_usage
def iHateThis():
pass
I'm trying to light a 5mm LED while a function is running. When this function (more details about this below) is finished and has returned a value I would like to break the while loop.
Current code for while loop:
pins = [3,5,8,15,16]
def piBoard():
finished = 0
while finished!=10:
for pin in pins
GPIO.output(
pin, GPIO.HIGH
)
time.sleep(0.1)
GPIO.output(
pin, GPIO.LOW
)
finished+=1
Now in the above example I just run the while loop until the count is equal to 10, not best practice. I would like the while loop to break if my next function has returned a value.
Function I want to break my while loop when returned its value
def myFunction():
Thread(target = piBoard().start()
// Trying to recognize the song
return the song which is recognized
Thanks, - K.
It sounds to me like you want to write a class that extends Thread and implements __enter__ and __exit__ methods to make it work in the with statement. Simple to implement, simple syntax, works pretty well. The class will look like this:
import threading
class Blinky(threading.Thread):
def __init__(self):
super().__init__()
self.daemon = True
self._finished = False
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def run(self):
# turn light on
while not self._finished:
time.sleep(.5)
# turn light off
def stop(self):
self._finished = True
Then, to run your function, you simply put:
with Blinky():
my_function()
The light should turn on once the with statement is reached and turn off up to a half second after the context of the with is exited.
In while condition put true and in while loop put if statement which will check if your function return any value if return write break
You need some kind of inter-thread communication. threading.Event is about as simple as you can get.
import threading
song_recognized_event = threading.event()
in your song recognizer, call set() once the song is recognized.
In your LED loop, check isSet() occasionally while toggling LEDs.
while not song_recognized_event.isSet():
# toggle LEDs
Run clear() to reset it.
if you are open to using threads.
you can achieve this by using threads.
here's the example code
from concurrent.futures._base import as_completed
from concurrent.futures.thread import ThreadPoolExecutor
WORK_FINISHED = False
def piBoard():
while not WORK_FINISHED:
# Do some stuff
# Drink some coffee
def myFunction():
time.sleep(5)
global WORK_FINISHED
WORK_FINISHED = True #update gobal status flag
return something
if __name__ == '__main__':
futures = []
MAX_WORKERS = 5 #max number of threads you want to create
with ThreadPoolExecutor(MAX_WORKERS) as executor:
executor.submit(piBoard)
# submit your function to worker thread
futures.append(executor.submit(myFunction))
# if you need to get return value from `myFunction`
for fut in as_completed(futures):
res = fut.result()
Hope this helps.
Using decorator and asyncio, inspired by #Eric Ed Lohmar:
import asyncio
def Blink():
from functools import wraps
async def _blink():
while True:
print("OFF")
await asyncio.sleep(.5)
print("ON")
await asyncio.sleep(.5)
def Blink_decorator(func):
#wraps(func)
async def wrapper(*args,**kwargs):
asyncio.ensure_future(_blink())
await func(*args,**kwargs)
return wrapper
return Blink_decorator
#Blink()
async def longTask():
print("Mission Start")
await asyncio.sleep(3)
print("Mission End")
def main():
loop = asyncio.get_event_loop()
loop.run_until_complete(longTask())
I'm trying to get the clipboard content using a Python script on my Mac Lion.
I'm searching for an event or something similar, because if I use a loop, my application spends all its time watching the clipboard.
Any ideas?
Have you thought about using an endless loop and "sleeping" between tries?
I used pyperclip for a simple PoC and it worked like a charm, and Windows and Linux.
import time
import sys
import os
import pyperclip
recent_value = ""
while True:
tmp_value = pyperclip.paste()
if tmp_value != recent_value:
recent_value = tmp_value
print("Value changed: %s" % str(recent_value)[:20])
time.sleep(0.1)
Instead of the print, do whatever you want.
Here is a complete multithreading example.
import time
import threading
import pyperclip
def is_url_but_not_bitly(url):
if url.startswith("http://") and not "bit.ly" in url:
return True
return False
def print_to_stdout(clipboard_content):
print ("Found url: %s" % str(clipboard_content))
class ClipboardWatcher(threading.Thread):
def __init__(self, predicate, callback, pause=5.):
super(ClipboardWatcher, self).__init__()
self._predicate = predicate
self._callback = callback
self._pause = pause
self._stopping = False
def run(self):
recent_value = ""
while not self._stopping:
tmp_value = pyperclip.paste()
if tmp_value != recent_value:
recent_value = tmp_value
if self._predicate(recent_value):
self._callback(recent_value)
time.sleep(self._pause)
def stop(self):
self._stopping = True
def main():
watcher = ClipboardWatcher(is_url_but_not_bitly,
print_to_stdout,
5.)
watcher.start()
while True:
try:
print("Waiting for changed clipboard...")
time.sleep(10)
except KeyboardInterrupt:
watcher.stop()
break
if __name__ == "__main__":
main()
I create a subclass of threading.Thread, override the methods run and __init__ and create an instance of this class. By calling watcher.start() (not run()!), you start the thread.
To safely stop the thread, I wait for <Ctrl>-C (keyboard interrupt) and tell the thread to stop itself.
In the initialization of the class, you also have a parameter pause to control how long to wait between tries.
Use the class ClipboardWatcher like in my example, replace the callback with what you do, e.g., lambda x: bitly(x, username, password).
Looking at pyperclip the meat of it on Macosx is :
import os
def macSetClipboard(text):
outf = os.popen('pbcopy', 'w')
outf.write(text)
outf.close()
def macGetClipboard():
outf = os.popen('pbpaste', 'r')
content = outf.read()
outf.close()
return content
These work for me how do you get on?
I don't quite follow your comment on being in a loop.
EDIT Added 'orrid polling example that shows how changeCount() bumps up on each copy to the pasteboard. It's still not what the OP wants as there seems no event or notification for modifications to the NSPasteboard.
from LaunchServices import *
from AppKit import *
import os
from threading import Timer
def poll_clipboard():
pasteboard = NSPasteboard.generalPasteboard()
print pasteboard.changeCount()
def main():
while True:
t = Timer(1, poll_clipboard)
t.start()
t.join()
if __name__ == "__main__":
main()
simple!
import os
def macSetClipboard(text):
outf = os.popen('pbcopy', 'w')
outf.write(text)
outf.close()
def macGetClipboard():
outf = os.popen('pbpaste', 'r')
content = outf.read()
outf.close()
return content
current_clipboard = macGetClipboard()
while True:
clipboard = macGetClipboard()
if clipboard != current_clipboard:
print(clipboard)
macSetClipboard("my new string")
print(macGetClipboard())
break
I originaly posted my answer on a duplicate Run a python code when copying text with specific keyword
Here the answer I came up with.
import clipboard
import asyncio
# Exemple function.
async def your_function():
print("Running...")
async def wait4update(value):
while True:
if clipboard.paste() != value : # If the clipboard changed.
return
async def main():
value = clipboard.paste() # Set the default value.
while True :
update = asyncio.create_task(wait4update(value))
await update
value = clipboard.paste() # Change the value.
asyncio.create_task(your_function()) #Start your function.
asyncio.run(main())