I am trying to populate lists self.images and self.servers with functions self.refresh_images_list and self.refresh_server_list using multiprocessing. I am doing this so when the object is created they will kick off async. I am using shared lists so the child copy will update the original objects list.
However, I am getting a Pickle error so I am pretty stuck.
class Account():
def __init__(self, username, api, pipe_object):
manager = Manager()
self.images = manager.list()
self.servers = manager.list()
self.images_timestamp = None
self.servers_timestamp = None
#needed a dictionary instead of
#list/tuple. This works best for
#the generator.
self.regions = {
"DFW" : pyrax.connect_to_cloudservers("DFW"),
"ORD" : pyrax.connect_to_cloudservers("ORD"),
"SYD" : pyrax.connect_to_cloudservers("SYD")
}
p1 = Process(target = self.refresh_server_list, args=())
p2 = Process(target = self.refresh_image_list, args=())
p1.start()
p2.start()
p1.join()
p2.join()
flavors = None
#multiprocessing shares lists only for __init__
#after __init__, we want to break the share
unshare_lists = False
def refresh_server_list(self):
if self.unshare_lists:
self.servers = []
self.servers_timestamp = time.strftime(
"%I:%M:%S", time.localtime()
)
with Redirect(self.pipe_object):
print "\nRefreshing server cache...hold on!"
for region, value in self.regions.iteritems():
region_servers = value.servers.list()
for region_servers in generator(region_servers, region):
self.servers.append(region_servers)
with Redirect(self.pipe_object):
print "\nServer cache completed!"
def server_list(self):
if not self.servers:
self.refresh_server_list()
with Redirect(self.pipe_object):
print_header("Cached Server List", "-")
for srv in self.servers:
print "\nName: %s" % srv.name
print "Created: %s" % srv.created
print "Progress: %s" % srv.progress
print "Status: %s" % srv.status
print "URL: %s" % srv.links[0]["href"]
print "Networks: %s" % srv.networks
print "\nLast Refresh time: %s" % self.servers_timestamp
def refresh_image_list(self):
if self.unshare_lists:
self.images = []
self.images_timestamp = time.strftime(
"%I:%M:%S", time.localtime()
)
with Redirect(self.pipe_object):
# print_header("Active Image List", "-")
print "\nRefreshing image cache...hold on!"
for region, value in self.regions.iteritems():
region_images = value.images.list()
for region_images in generator(region_images, region):
self.images.append(region_images)
with Redirect(self.pipe_object):
print "\nImage cache completed!"
def image_list(self):
if not self.images:
self.refresh_image_list()
with Redirect(self.pipe_object):
print_header("List Images", "-")
for img in self.images:
print (
str(self.images.index(img)+1) + ") "
+ "Name: %s\n ID: %s Status: %s" %
(img.name, img.id, img.status)
)
print "\nLast Refresh time: %s" % self.images_timestamp
The error I get:
Refreshing server cache...hold on!
Traceback (most recent call last):
File "menu.py", line 162, in <module>
main()
File "menu.py", line 156, in main
menus[value](hash_table, accounts)
File "menu.py", line 104, in menu
choices[value]()
File "/home/work/modules/classes.py", line 87, in server_list
self.refresh_server_list()
File "/home/work/modules/classes.py", line 80, in refresh_server_list
self.servers.append(region_servers)
File "<string>", line 2, in append
File "/usr/lib64/python2.7/multiprocessing/managers.py", line 758, in _callmethod
conn.send((self._id, methodname, args, kwds))
cPickle.PicklingError: Can't pickle <type 'instancemethod'>: attribute lookup __builtin__.instancemethod failed
Related
I need some help with a python script. The script is an Example Script form the Pyinsane Module for Python. (Pyinsane https://github.com/jflesch/pyinsane)
I want to write my own scanner script but to do that i should understand the example code or the output error.
Example Script.
import sys
``from PIL import Image
try:
import src.abstract as pyinsane
except ImportError:
import pyinsane.abstract as pyinsane
def set_scanner_opt(scanner, opt, value):
print("Setting %s to %s" % (opt, str(value)))
try:
scanner.options[opt].value = value
except (KeyError, pyinsane.SaneException) as exc:
print("Failed to set %s to %s: %s" % (opt, str(value), str(exc)))
if __name__ == "__main__":
steps = False
args = sys.argv[1:]
if len(args) <= 0 or args[0] == "-h" or args[0] == "--help":
print("Syntax:")
print(" %s [-s] <output file (JPG)>" % sys.argv[0])
print("")
print("Options:")
print(" -s : Generate intermediate images (may generate a lot of"
" images !)")
sys.exit(1)
for arg in args[:]:
if arg == "-s":
steps = True
args.remove(arg)
output_file = args[0]
print("Output file: %s" % output_file)
print("Looking for scanners ...")
devices = pyinsane.get_devices()
if (len(devices) <= 0):
print("No scanner detected !")
sys.exit(1)
print("Devices detected:")
print("- " + "\n- ".join([str(d) for d in devices]))
print("")
device = devices[0]
print("Will use: %s" % str(device))
print("")
source = 'Auto'
if (device.options['source'].constraint_type
== pyinsane.SaneConstraintType.STRING_LIST):
if 'Auto' in device.options['source'].constraint:
source = 'Auto'
elif 'FlatBed' in device.options['source'].constraint:
source = 'FlatBed'
else:
print("Warning: Unknown constraint type on the source: %d"
% device.options['source'].constraint_type)
set_scanner_opt(device, 'resolution', 300)
set_scanner_opt(device, 'source', source)
set_scanner_opt(device, 'mode', 'Color')
print("")
print("Scanning ... ")
scan_session = device.scan(multiple=False)
if steps and scan_session.scan.expected_size[1] < 0:
print("Warning: requested step by step scan images, but"
" scanner didn't report the expected number of lines"
" in the final image --> can't do")
print("Step by step scan images won't be recorded")
steps = False
if steps:
last_line = 0
expected_size = scan_session.scan.expected_size
img = Image.new("RGB", expected_size, "#ff00ff")
sp = output_file.split(".")
steps_filename = (".".join(sp[:-1]), sp[-1])
try:
PROGRESSION_INDICATOR = ['|', '/', '-', '\\']
i = -1
while True:
i += 1
i %= len(PROGRESSION_INDICATOR)
sys.stdout.write("\b%s" % PROGRESSION_INDICATOR[i])
sys.stdout.flush()
scan_session.scan.read()
if steps:
next_line = scan_session.scan.available_lines[1]
if (next_line > last_line):
subimg = scan_session.scan.get_image(last_line, next_line)
img.paste(subimg, (0, last_line))
img.save("%s-%05d.%s" % (steps_filename[0], last_line,
steps_filename[1]), "JPEG")
last_line = next_line
except EOFError:
pass
print("\b ")
print("Writing output file ...")
img = scan_session.images[0]
img.save(output_file, "JPEG")
print("Done")
Now the Output:
sudo python /home/pi/Desktop/scantest.py -s 1
Output file: 1
Looking for scanners ...
Devices detected:
- Scanner 'genesys:libusb:001:006' (Canon, LiDE 110, flatbed scanner)
Will use: Scanner 'genesys:libusb:001:006' (Canon, LiDE 110, flatbed scanner)
Setting resolution to 300
Setting source to Auto
Failed to set source to Auto: <class 'pyinsane.rawapi.SaneStatus'> : Data is invalid (4)
Setting mode to Color
Scanning ...
|Traceback (most recent call last):
File "/home/pi/Desktop/scantest.py", line 107, in <module>
steps_filename[1]), "JPEG")
File "/usr/local/lib/python2.7/dist-packages/PIL/Image.py", line 1439, in save
save_handler(self, fp, filename)
File "/usr/local/lib/python2.7/dist-packages/PIL/PngImagePlugin.py", line 572, in _save
ImageFile._save(im, _idat(fp, chunk), [("zip", (0,0)+im.size, 0, rawmode)])
File "/usr/local/lib/python2.7/dist-packages/PIL/ImageFile.py", line 481, in _save
e = Image._getencoder(im.mode, e, a, im.encoderconfig)
File "/usr/local/lib/python2.7/dist-packages/PIL/Image.py", line 399, in _getencoder
return apply(encoder, (mode,) + args + extra)
TypeError: an integer is required
Please Can anybody explain the example code or help me with the output error?
Thanks a lot
laurin
Is your PIL / Pillow library compiled with JPEG support ?
If not, try replacing:
img.save("%s-%05d.%s" % (steps_filename[0], last_line,
steps_filename[1]), "JPEG")
by
img.save("%s-%05d.%s" % (steps_filename[0], last_line,
steps_filename[1]), "PNG")
And
img.save(output_file, "JPEG")
by
img.save(output_file, "PNG")
Code to add and delete values in a list are operations performed in different threads.
using these global variables in multi-threading:
from threading import Thread
import time
a=[]
i = 0
j = 0
function for thread1:
def val_in():
while 1:
a.append(raw_input())
print "%s value at %d: %d added" % ( time.ctime(time.time()), i ,int(a[i])) // line 14
i+=1
function for thread 2:
def val_out():
while 1:
time.sleep(5)
try:
print "%s value at %d: %d deleted" % (time.ctime(time.time()), j, int(a.pop(j)))
i-=1
except:
print"no values lefts"
time.sleep(2)
defining and starting threads:
t = Thread(target = val_in)
t1 = Thread(target= val_out)
t.start()
t1.start()
Now when input is given as 1, it should be added to the list a, but there is an error:
Error: Exception in thread Thread-1:
Traceback (most recent call last):
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "/Users/dhiraj.agarwal/Documents/workspace/try3/multithread.py", line 14, in val_in
UnboundLocalError: local variable 'i' referenced before assignment
You should tell python that i is global:
def val_in():
global i
...
def val_out():
global i
...
This is an issue with the scope of the variable. You might used local variable in the thread for different methods. If that the case then you have to make the variable as global.
def val_in():
global i # add this line
while 1:
a.append(raw_input())
print "%s value at %d: %d added" % ( time.ctime(time.time()), i ,int(a[i]))
i+=1
def val_out():
global i # add this line
while 1:
time.sleep(5)
try:
print "%s value at %d: %d deleted" % (time.ctime(time.time()), j, int(a.pop(j)))
i-=1
except:
print"no values lefts"
time.sleep(2)
I want to keep some file content loaded in memory so that it can be queried in retrived instantly.
In gearman worker, I am loading the file and put it in listening mode. While making request using gearman client, worker returns loaded content only once, next time client receives None
worker :
class GetLexiconFiles(object):
def __init__(self):
self.gm_worker = gearman.GearmanWorker(['localhost:4730'])
self.loadFiles()
self.gm_worker.register_task('load_db', self.task_listener_reverse)
#self.loadFiles()
#self.gm_worker.work()
def task_listener_reverse(self, gearman_worker, gearman_job):
k=float('inf')
#print "Started loading file"
self.input_text = self.input_text.split('\n')
print "Loading completed"
lexicon = defaultdict(list)
for i, line in enumerate(self.input_text):
#print "line is : ", line
if i >= k: break
#if i % 100000 == 0: print >>sys.stderr, i
try:
if line != '':
nl, dbs = line.split(' ', 1)
nl = int(nl)
dbs = self.str2obj(dbs)
lexicon[nl].append(dbs)
else:
pass
except:
print >>sys.stderr, 'could not parse line %r' % line
print traceback.format_exc()
continue
return json.dumps(lexicon)
if __name__ == '__main__':
GetLexiconFiles().gm_worker.work()
client :
def check_request_status(job_request):
if job_request.complete:
#data = json.loads(job_request.result)
print "Job %s finished! Result: %s - %s" % (job_request.job.unique, job_request.state, job_request.result)
elif job_request.timed_out:
print "Job %s timed out!"
elif job_request.state == JOB_UNKNOWN:
print "Job %s connection failed!"
gm_client = gearman.GearmanClient(['localhost:4730'])
tasks = [{'task': 'load_lexicon', 'data': 'This is testing sentence'}, {'task': 'load_db', 'data': 'This is db testing'}]
submitted_requests = gm_client.submit_multiple_jobs(tasks, background=False, wait_until_complete=False)
completed_requests = gm_client.wait_until_jobs_completed(submitted_requests)
print completed_requests[1].result
for completed_job_request in completed_requests:
check_request_status(completed_job_request)
self.input_text = self.input_text.split('\n')
With this line of code you are converting a string to a list of strings.
Since you save the result back in self.input_text the next time that that function gets called self.input_text will already be a list and it'll raise an exception.
I'm having an issue with a race condition in my script. The goal is to connect to Deluge and gather information using Twisted.
Here is the script:
#!/usr/bin/python
import json
import sys
import os.path
from datetime import datetime
from deluge.ui.client import client
from twisted.internet import reactor, task
class Deluge(object):
def __init__(self,*args):
for key, value in enumerate(args):
self.key = value
def getDownloadQueue(self):
print "Started getDownloadQueue()"
self.connect()
print "Finished getDownloadQueue()"
def connect(self):
print "Started connect()"
deluge = client.connect()
#deluge.addCallback(self.onConnect,params).addErrback(self.onConnectFail).addBoth(self.disconnect)
print "task.react()"
test = task.react(self.onConnect, [])
print "deluge.addCallback()"
test.addCallback(deluge).addErrback(self.onConnectFail).addBoth(self.disconnect)
#deluge.addErrback(self.onConnectFail)
print "Finished connect()"
def disconnect(self):
client.disconnect()
print "Finished disconnect()"
def onConnect(self, reactor):
print "Started onConnect()"
def onGetTorrentStatus(torrentInfo):
print "Started onGetTorrentStatus()"
print torrentInfo["name"] + " " + torrentInfo["label"]
if torrent["name"] == torrent_name:
print "File '%s' already exists" % torrent["name"]
print "Finished onGetTorrentStatus()"
return
def onGetSessionState(torrent_ids):
print "Started onGetSessionState()"
print torrent_ids
print "Got all torrent ids"
for id in torrent_ids:
d = client.core.get_torrent_status(id, ["name","label"]).addCallback(onGetTorrentStatus)
print defer.gatherResults([d, self.disconnect])
print "Finished onGetSessionState()"
client.core.get_session_state().addCallback(self.onGetSessionState)
print "Finished onConnect()"
def onConnectFail(self,result):
print "Error: %s" % result
Deluge().getDownloadQueue()
Here is the error it outputs:
Traceback (most recent call last):
File "./delugeTest.py", line 64, in <module>
Deluge().getDownloadQueue()
File "./delugeTest.py", line 18, in getDownloadQueue
self.connect()
File "./delugeTest.py", line 28, in connect
test = task.react(self.onConnect, [])
File "/usr/local/lib/python2.7/dist-packages/twisted/internet/task.py", line 867, in react
finished = main(_reactor, *argv)
File "./delugeTest.py", line 58, in onConnect
client.core.get_session_state().addCallback(self.onGetSessionState)
File "/usr/lib/python2.7/dist-packages/deluge/ui/client.py", line 504, in __call__
return self.daemon.call(self.base, *args, **kwargs)
File "/usr/lib/python2.7/dist-packages/deluge/ui/client.py", line 308, in call
self.protocol.send_request(request)
AttributeError: 'NoneType' object has no attribute 'send_request'
This is in reference to a question I asked a few months ago: How to properly stop Twisted reactor when callback is finished
Any inputs on what is wrong with line phCmd = "ph %s return all".split(' ') % (qgroup) ? I am trying to decipher the string %s.
from subprocess import Popen, PIPE, STDOUT
def main ():
qgroups = ['tech.sw.list','tech.sw.amss']
for qgroup in qgroups:
print qgroup
phCmd = "ph %s return all".split(' ') % (qgroup)
phPipe = Popen(phCmd, stdout=PIPE, stderr=PIPE)
(output, error) = phPipe.communicate()
print output
if phPipe.returncode != 0:
print output
raise IOError, "phcommand %s failed" % (phCmd)
return output
ERROR:
Traceback (most recent call last):
File "test.py", line 20, in <module>
main()
File "test.py", line 9, in main
phCmd = "ph %s return all".split(' ') % (qgroup)
if __name__ == '__main__':
main()
The .split(' ') method call of a string returns a list. Try something like
phCmd = ("ph %s return all" % (qgroup)).split(' ')
instead.
"ph %s return all".split(' ') % (qgroup)
The split() call returns a list, and % is undefined for the argument types list and tuple. I'm not sure what you mean to do here, but it looks like you want:
("ph %s return all" % (qgroup)).split(' ')
When using "%" with strings, you have to place it right after the string. This line of code
phCmd = "ph %s return all".split(' ') % (qgroup)
is actually telling Python to take the list returned by "ph %s return all".split(' ') and run an operation similar to:
>>> 2 % 2
0
>>>
on it using (qgroup), which blows up.
To fix your problem, do this:
phCmd = ("ph %s return all" % qgroup).split(' ')