Related
Dears,
I am new on python and trying to start python networking with a simple code that have 2 plain text line edits and try to start 2 telnet sessions and get output of each session on a separate plain text line area however i get the below error
QObject::connect: Cannot queue arguments of type 'QTextCursor'
(Make sure 'QTextCursor' is registered using qRegisterMetaType().)
the code as below:
app = QApplication(sys.argv)
def to_str(bytes_or_str):
if isinstance(bytes_or_str, bytes):
value = bytes_or_str.decode() # uses 'utf-8' for encoding
else:
value = bytes_or_str
return value # Instance of str
def testTelnet_start(): # called when button pressed
_thread.start_new_thread(testTelnet,("192.168.3.247",1))
print("Test")
_thread.start_new_thread(testTelnet,("192.168.3.252",2))
def testTelnet(ip,x):
try:
tel_conn = telnetlib.Telnet()
tel_conn.open(host= ip)
data = tel_conn.read_until(b"login:",3)
data = to_str(data)
data = ip + "\n" + data
print(data)
if(x==1):
plain_text_area_Upgrade1.appendPlainText(data)
elif(x==2):
plain_text_area_Upgrade2.appendPlainText(data)
except Exception as e:
print(e)
my_Test_Window = QWidget()
my_Test_Window.setWindowTitle("Telnet Window")
my_Test_Window.resize(490,350)
my_Test_Window.setFixedSize(my_Test_Window.size())
push_test1 = QPushButton("Test#1",my_Test_Window)
push_test1.move(90,280)
plain_text_area_Upgrade1 = QPlainTextEdit(my_Test_Window)
plain_text_area_Upgrade1.resize(160,150)
plain_text_area_Upgrade1.updatesEnabled()
plain_text_area_Upgrade1.move(25,20)
plain_text_area_Upgrade1.setReadOnly(True)
plain_text_area_Upgrade1.insertPlainText("Testing ...")
plain_text_area_Upgrade1.appendPlainText("")
plain_text_area_Upgrade2 = QPlainTextEdit(my_Test_Window)
plain_text_area_Upgrade2.resize(160,150)
plain_text_area_Upgrade2.updatesEnabled()
plain_text_area_Upgrade2.move(250,20)
plain_text_area_Upgrade2.setReadOnly(True)
plain_text_area_Upgrade2.insertPlainText("Testing ...")
plain_text_area_Upgrade2.appendPlainText("")
push_test1.clicked.connect(testTelnet_start)
my_Test_Window.show()
app.exec_()
Any idea why a simple multi threading code cause those errors ?
Thanks.
So I'm making an application using a GPS module, Python and a MySQL database.
So I have written some code, to try to capture the data from the GPS and store it in the database. I'm using a plugin called "pynmea2" to parse some of the data (longitude and latitude). However, I need more data then that, so, I already tried ALOT different things, but my program keeps crashing the whole time. Could someone help me out with this?
Most of the time I get all the data from the Serial connection, but I want to be able to strip data from it. So example of what I get:[b'$GPGGA,093512.000,,,,,0,3,,,M,,M,,*47\r\n', b'$GPGLL,,,,,093512.000,V,N*76\r\n', b'$GPGSA,A,1,,,,,,,,,,,,,,,*1E\r\n', b'$GPGSV,3,1,11,15,72,214,,24,52,276,,13,48,141,,17,31,093,29*70\r\n', b'$GPGSV,3,2,11,18,28,292,,28,27,049,25,19,24,120,24,12,23,211,13*7E\r\n', b'$GPGSV,3
Well, it's not that simple to extract data from it,but it works out just fine with the pynmea2 library (only library I'm allowed to use.
So, I need the speed, the latitude and longitude for now, but the speed is bothering me now. It gives ValueError: could not convert string to float: "22*49\\r\\n'"
alot of times because I don't do a proper way of finding the data and then "parsing" it.
Here is my code I'm currently using;
from model.GPSParser import GPSParser
from model.DB import DB
import serial
import time
import datetime
import pynmea2
#########################################
# This is the main code to setup the
# serial connection with the GPS module.
# it needs to be OR runt as root OR as
# pi with all the root rights.
#########################################
port = "/dev/ttyAMA0"
ser = serial.Serial(port, 9600, timeout=0)
#########################################
# These are all the global variables
# to be used. All defined and set to
# zero or their standard 'Null' value.
#########################################
lat = 0.0
lon = 0.0
cur_speed = 0.0
while True:
try:
# Get the data from the serial monitor.
data = str(ser.readlines()).lstrip("b'")[:-3]
# print(data)
#########################################
# Find the speed, to check if we're
# standing still or not. Save it in a
# #var speed
#########################################
if data.find('$GPVTG') != -1:
cur_speed = data.split(",")[7]
#########################################
# Get the Latitude and Longitude
#########################################
if data.find('$GPGGA') != -1:
print(data)
# Check whether the data strings are empty or not.
if GPSParser.parseLatitude(data) != "" and GPSParser.parseLongitude(data) != "":
lat = GPSParser.parseLatitude(data)
lon = GPSParser.parseLongitude(data)
# Debug printing
# print("Latitude: " + GPSParser.parseLatitude(data))
# print("Longitude: " + GPSParser.parseLongitude(data))
# print("Speed: " + cur_speed)
#########################################
# Insert the coordinates into the database
# Be sure to check of we are really driving
# So when the speed is higher then 5 km/u
# Store everything into the database.
#########################################
if float(cur_speed) > 5.0:
db = DB()
db.insertCoordinates(lat, lon, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
# Wait a bit to not overload the Serial port
time.sleep(0.5)
############################################################
# The error handling
############################################################
except serial.serialutil.SerialException:
ser.close()
port = "/dev/ttyAMA0"
ser = serial.Serial(port, 9600, timeout=0)
continue
except pynmea2.ParseError:
# print("Error on parsing object, continuing...")
continue
except BlockingIOError:
# print("Blocking I/O error, continuing...")
continue
except TypeError:
# print("Type error, continuing...")
continue
except IndexError:
# print("To catch an error...")
continue
except KeyboardInterrupt:
print("\nProgram stopped.")
exit()
So the import from model doesn't do much, only the database connection and the "gps parser" is only the pynmea that parses a string of data and then returns it.
So what I want is something like:
It gets all the data it pulses once per second,
it then splits it all into chucks where it starts with the $GP variable, then I can search for the second variable part, for example VTG or GGA. And then I can use that string to make conversions to the right value to extract the speed, latitude, longitude and other data if needed.
Hope you guys can understand me well and can help me out.
Not sure if that solves your problem, but pynmea2 has speed attributes, defined in talker.py.
import pynmea2
for i, line in enumerate(open('/tmp/nmea.txt').readlines()):
# parsing via pynmea
msg = pynmea2.parse(line.strip())
if msg.sentence_type == 'VTG':
print ('parsing line %s with pynmea:' % i, float(msg.spd_over_grnd_kmph))
# parsing via manually
if line.startswith('$GPVTG'):
cur_speed = line.split(",")[7]
print ('parsing line %s manually:' % i, float(cur_speed))
Returns:
parsing line 1 with pynmea: 91.626
parsing line 1 manually: 91.626
parsing line 10 with pynmea: 90.842
parsing line 10 manually: 90.842
parsing line 19 with pynmea: 89.676
parsing line 19 manually: 89.676
My code prints out what I need from the print statement in Events. But, I have no idea how to return the data because of the way the class is instantiated. Further, the print statement only works if pythoncom.PumpWaitingMessages() is included, but it doesn't return the data that's printed, or anything.
I'd like to be able to use what's printed as a return value to be accessed by other functions.
(If worse comes to worse, I could capture stdout (which is a last resort).)
Code:
# Standard Lib
import time
# Third Party
from win32com.client import DispatchWithEvents
import pythoncom
# Local Lib
import scan_var
class Events(object):
def OnBarcodeEvent(self, eventType=pythoncom.Empty, pscanData=pythoncom.Empty):
print pscanData
return pscanData
zebra = DispatchWithEvents("CoreScanner.CoreScanner", Events)
# open api
open_status = zebra.Open(0, [1], 1)
print "Open status: {}".format(open_status)
# get scanners
get_scanners = zebra.GetScanners(0, [1])
print "get_scanners: {}".format(get_scanners)
# Register for events
register = zebra.ExecCommand(1001,scan_var.register_for_events)
print "register: {}".format(register)
# PEWPEWPEW (pull trigger)
fire_result = zebra.ExecCommand(2011, scan_var.pull_trigger)
print "PEWPEWPEW {}".format(fire_result)
time.sleep(5)
while True:
time.sleep(1)
pythoncom.PumpWaitingMessages()
Output:
Open status: 0
get_scanners: (1, (1,),504</VID> <PID>6400</PID> ...
register: (u'', 0)
PEWPEWPEW (u'', 0)
<?xml version="1.0" encoding="UTF-8"?>
<outArgs>
<scannerID>1</scannerID>
<arg-xml>
<scandata>
<modelnumber>new_hotness </modelnumber>
<serialnumber>1522501a0501156 </serialnumber>
<GUID>2A4BE99CFCEFD047837ADF0082aD51AD5</GUID>
<datatype>27</datatype>
<datalabel>0x39 0x32 0x304 ...</datalabel>
<rawdata>0x22 0x03 0x00 ... </rawdata>
</scandata>
</arg-xml>
</outArgs>
My solution is as follows. This may be ugly and wrong but it got me what I need. If anyone has a better way I'm happy to edit.
class Events(object):
def get_barcode(self):
return self.pscanData
def OnBarcodeEvent(self, eventType=1, pscanData=pythoncom.Empty):
self.pscanData = pscanData
print self.pscanData
def save_serial_to_cache():
zebra = DispatchWithEvents("CoreScanner.CoreScanner", Events)
# open api
open_status = zebra.Open(0, [1], 1)
print "Open status: {}".format(open_status)
#get scanners
get_scanners = zebra.GetScanners(0, [1])
print "get_scanners: {}".format(get_scanners)
# Register for events
register = zebra.ExecCommand(1001,scan_var.register_for_events)
print "register: {}".format(register)
# PEWPEWPEW (pull trigger)
fire_result = zebra.ExecCommand(2011, scan_var.pull_trigger)
print "PEWPEWPEW {}".format(fire_result)
for counter in xrange(0, 5):
time.sleep(1)
pythoncom.PumpWaitingMessages()
return zebra._obj_.get_barcode.__self__.pscanData
I have a CSV with keywords in one column and the number of impressions in a second column.
I'd like to provide the keywords in a url (while looping) and for the Google language api to return what type of language was the keyword in.
I have it working manually. If I enter (with the correct api key):
http://ajax.googleapis.com/ajax/services/language/detect?v=1.0&key=myapikey&q=merde
I get:
{"responseData": {"language":"fr","isReliable":false,"confidence":6.213709E-4}, "responseDetails": null, "responseStatus": 200}
which is correct, 'merde' is French.
so far I have this code but I keep getting server unreachable errors:
import time
import csv
from operator import itemgetter
import sys
import fileinput
import urllib2
import json
E_OPERATION_ERROR = 1
E_INVALID_PARAMS = 2
#not working
def parse_result(result):
"""Parse a JSONP result string and return a list of terms"""
# Deserialize JSON to Python objects
result_object = json.loads(result)
#Get the rows in the table, then get the second column's value
# for each row
return row in result_object
#not working
def retrieve_terms(seedterm):
print(seedterm)
"""Retrieves and parses data and returns a list of terms"""
url_template = 'http://ajax.googleapis.com/ajax/services/language/detect?v=1.0&key=myapikey&q=%(seed)s'
url = url_template % {"seed": seedterm}
try:
with urllib2.urlopen(url) as data:
data = perform_request(seedterm)
result = data.read()
except:
sys.stderr.write('%s\n' % 'Could not request data from server')
exit(E_OPERATION_ERROR)
#terms = parse_result(result)
#print terms
print result
def main(argv):
filename = argv[1]
csvfile = open(filename, 'r')
csvreader = csv.DictReader(csvfile)
rows = []
for row in csvreader:
rows.append(row)
sortedrows = sorted(rows, key=itemgetter('impressions'), reverse = True)
keys = sortedrows[0].keys()
for item in sortedrows:
retrieve_terms(item['keywords'])
try:
outputfile = open('Output_%s.csv' % (filename),'w')
except IOError:
print("The file is active in another program - close it first!")
sys.exit()
dict_writer = csv.DictWriter(outputfile, keys, lineterminator='\n')
dict_writer.writer.writerow(keys)
dict_writer.writerows(sortedrows)
outputfile.close()
print("File is Done!! Check your folder")
if __name__ == '__main__':
start_time = time.clock()
main(sys.argv)
print("\n")
print time.clock() - start_time, "seconds for script time"
Any idea how to finish the code so that it will work? Thank you!
Try to add referrer, userip as described in the docs:
An area to pay special attention to
relates to correctly identifying
yourself in your requests.
Applications MUST always include a
valid and accurate http referer header
in their requests. In addition, we
ask, but do not require, that each
request contains a valid API Key. By
providing a key, your application
provides us with a secondary
identification mechanism that is
useful should we need to contact you
in order to correct any problems. Read
more about the usefulness of having an
API key
Developers are also encouraged to make
use of the userip parameter (see
below) to supply the IP address of the
end-user on whose behalf you are
making the API request. Doing so will
help distinguish this legitimate
server-side traffic from traffic which
doesn't come from an end-user.
Here's an example based on the answer to the question "access to google with python":
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import urllib, urllib2
from pprint import pprint
api_key, userip = None, None
query = {'q' : 'матрёшка'}
referrer = "https://stackoverflow.com/q/4309599/4279"
if userip:
query.update(userip=userip)
if api_key:
query.update(key=api_key)
url = 'http://ajax.googleapis.com/ajax/services/language/detect?v=1.0&%s' %(
urllib.urlencode(query))
request = urllib2.Request(url, headers=dict(Referer=referrer))
json_data = json.load(urllib2.urlopen(request))
pprint(json_data['responseData'])
Output
{u'confidence': 0.070496580000000003, u'isReliable': False, u'language': u'ru'}
Another issue might be that seedterm is not properly quoted:
if isinstance(seedterm, unicode):
value = seedterm
else: # bytes
value = seedterm.decode(put_encoding_here)
url = 'http://...q=%s' % urllib.quote_plus(value.encode('utf-8'))
I'd like to create a hashlib instance, update() it, then persist its state in some way. Later, I'd like to recreate the object using this state data, and continue to update() it. Finally, I'd like to get the hexdigest() of the total cumulative run of data. State persistence has to survive across multiple runs.
Example:
import hashlib
m = hashlib.sha1()
m.update('one')
m.update('two')
# somehow, persist the state of m here
#later, possibly in another process
# recreate m from the persisted state
m.update('three')
m.update('four')
print m.hexdigest()
# at this point, m.hexdigest() should be equal to hashlib.sha1().update('onetwothreefour').hextdigest()
EDIT:
I did not find a good way to do this with python in 2010 and ended up writing a small helper app in C to accomplish this. However, there are some great answers below that were not available or known to me at the time.
You can do it this way using ctypes, no helper app in C is needed:-
rehash.py
#! /usr/bin/env python
''' A resumable implementation of SHA-256 using ctypes with the OpenSSL crypto library
Written by PM 2Ring 2014.11.13
'''
from ctypes import *
SHA_LBLOCK = 16
SHA256_DIGEST_LENGTH = 32
class SHA256_CTX(Structure):
_fields_ = [
("h", c_long * 8),
("Nl", c_long),
("Nh", c_long),
("data", c_long * SHA_LBLOCK),
("num", c_uint),
("md_len", c_uint)
]
HashBuffType = c_ubyte * SHA256_DIGEST_LENGTH
#crypto = cdll.LoadLibrary("libcrypto.so")
crypto = cdll.LoadLibrary("libeay32.dll" if os.name == "nt" else "libssl.so")
class sha256(object):
digest_size = SHA256_DIGEST_LENGTH
def __init__(self, datastr=None):
self.ctx = SHA256_CTX()
crypto.SHA256_Init(byref(self.ctx))
if datastr:
self.update(datastr)
def update(self, datastr):
crypto.SHA256_Update(byref(self.ctx), datastr, c_int(len(datastr)))
#Clone the current context
def _copy_ctx(self):
ctx = SHA256_CTX()
pointer(ctx)[0] = self.ctx
return ctx
def copy(self):
other = sha256()
other.ctx = self._copy_ctx()
return other
def digest(self):
#Preserve context in case we get called before hashing is
# really finished, since SHA256_Final() clears the SHA256_CTX
ctx = self._copy_ctx()
hashbuff = HashBuffType()
crypto.SHA256_Final(hashbuff, byref(self.ctx))
self.ctx = ctx
return str(bytearray(hashbuff))
def hexdigest(self):
return self.digest().encode('hex')
#Tests
def main():
import cPickle
import hashlib
data = ("Nobody expects ", "the spammish ", "imposition!")
print "rehash\n"
shaA = sha256(''.join(data))
print shaA.hexdigest()
print repr(shaA.digest())
print "digest size =", shaA.digest_size
print
shaB = sha256()
shaB.update(data[0])
print shaB.hexdigest()
#Test pickling
sha_pickle = cPickle.dumps(shaB, -1)
print "Pickle length:", len(sha_pickle)
shaC = cPickle.loads(sha_pickle)
shaC.update(data[1])
print shaC.hexdigest()
#Test copying. Note that copy can be pickled
shaD = shaC.copy()
shaC.update(data[2])
print shaC.hexdigest()
#Verify against hashlib.sha256()
print "\nhashlib\n"
shaD = hashlib.sha256(''.join(data))
print shaD.hexdigest()
print repr(shaD.digest())
print "digest size =", shaD.digest_size
print
shaE = hashlib.sha256(data[0])
print shaE.hexdigest()
shaE.update(data[1])
print shaE.hexdigest()
#Test copying. Note that hashlib copy can NOT be pickled
shaF = shaE.copy()
shaF.update(data[2])
print shaF.hexdigest()
if __name__ == '__main__':
main()
resumable_SHA-256.py
#! /usr/bin/env python
''' Resumable SHA-256 hash for large files using the OpenSSL crypto library
The hashing process may be interrupted by Control-C (SIGINT) or SIGTERM.
When a signal is received, hashing continues until the end of the
current chunk, then the current file position, total file size, and
the sha object is saved to a file. The name of this file is formed by
appending '.hash' to the name of the file being hashed.
Just re-run the program to resume hashing. The '.hash' file will be deleted
once hashing is completed.
Written by PM 2Ring 2014.11.14
'''
import cPickle as pickle
import os
import signal
import sys
import rehash
quit = False
blocksize = 1<<16 # 64kB
blocksperchunk = 1<<8
chunksize = blocksize * blocksperchunk
def handler(signum, frame):
global quit
print "\nGot signal %d, cleaning up." % signum
quit = True
def do_hash(fname, filesize):
hashname = fname + '.hash'
if os.path.exists(hashname):
with open(hashname, 'rb') as f:
pos, fsize, sha = pickle.load(f)
if fsize != filesize:
print "Error: file size of '%s' doesn't match size recorded in '%s'" % (fname, hashname)
print "%d != %d. Aborting" % (fsize, filesize)
exit(1)
else:
pos, fsize, sha = 0, filesize, rehash.sha256()
finished = False
with open(fname, 'rb') as f:
f.seek(pos)
while not (quit or finished):
for _ in xrange(blocksperchunk):
block = f.read(blocksize)
if block == '':
finished = True
break
sha.update(block)
pos += chunksize
sys.stderr.write(" %6.2f%% of %d\r" % (100.0 * pos / fsize, fsize))
if finished or quit:
break
if quit:
with open(hashname, 'wb') as f:
pickle.dump((pos, fsize, sha), f, -1)
elif os.path.exists(hashname):
os.remove(hashname)
return (not quit), pos, sha.hexdigest()
def main():
if len(sys.argv) != 2:
print "Resumable SHA-256 hash of a file."
print "Usage:\npython %s filename\n" % sys.argv[0]
exit(1)
fname = sys.argv[1]
filesize = os.path.getsize(fname)
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
finished, pos, hexdigest = do_hash(fname, filesize)
if finished:
print "%s %s" % (hexdigest, fname)
else:
print "sha-256 hash of '%s' incomplete" % fname
print "%s" % hexdigest
print "%d / %d bytes processed." % (pos, filesize)
if __name__ == '__main__':
main()
demo
import rehash
import pickle
sha=rehash.sha256("Hello ")
s=pickle.dumps(sha.ctx)
sha=rehash.sha256()
sha.ctx=pickle.loads(s)
sha.update("World")
print sha.hexdigest()
output
a591a6d40bf420404a011733cfb7b190d62c65bf0bcda32b57b277d9ad9f146e
Note: I would like to thank PM2Ring for his wonderful code.
hashlib.sha1 is a wrapper around a C library so you won't be able to pickle it.
It would need to implement the __getstate__ and __setstate__ methods for Python to access its internal state
You could use a pure Python implementation of sha1 if it is fast enough for your requirements
I was facing this problem too, and found no existing solution, so I ended up writing a library that does something very similar to what Devesh Saini described: https://github.com/kislyuk/rehash. Example:
import pickle, rehash
hasher = rehash.sha256(b"foo")
state = pickle.dumps(hasher)
hasher2 = pickle.loads(state)
hasher2.update(b"bar")
assert hasher2.hexdigest() == rehash.sha256(b"foobar").hexdigest()
Hash algorithm for dynamic growing/streaming data?
You can easily build a wrapper object around the hash object which can transparently persist the data.
The obvious drawback is that it needs to retain the hashed data in full in order to restore the state - so depending on the data size you are dealing with, this may not suit your needs. But it should work fine up to some tens of MB.
Unfortunattely the hashlib does not expose the hash algorithms as proper classes, it rathers gives factory functions that construct the hash objects - so we can't properly subclass those without loading reserved symbols - a situation I'd rather avoid. That only means you have to built your wrapper class from the start, which is not such that an overhead from Python anyway.
here is a sample code that might even fill your needs:
import hashlib
from cStringIO import StringIO
class PersistentSha1(object):
def __init__(self, salt=""):
self.__setstate__(salt)
def update(self, data):
self.__data.write(data)
self.hash.update(data)
def __getattr__(self, attr):
return getattr(self.hash, attr)
def __setstate__(self, salt=""):
self.__data = StringIO()
self.__data.write(salt)
self.hash = hashlib.sha1(salt)
def __getstate__(self):
return self.data
def _get_data(self):
self.__data.seek(0)
return self.__data.read()
data = property(_get_data, __setstate__)
You can access the "data" member itself to get and set the state straight, or you can use python pickling functions:
>>> a = PersistentSha1()
>>> a
<__main__.PersistentSha1 object at 0xb7d10f0c>
>>> a.update("lixo")
>>> a.data
'lixo'
>>> a.hexdigest()
'6d6332a54574aeb35dcde5cf6a8774f938a65bec'
>>> import pickle
>>> b = pickle.dumps(a)
>>>
>>> c = pickle.loads(b)
>>> c.hexdigest()
'6d6332a54574aeb35dcde5cf6a8774f938a65bec'
>>> c.data
'lixo'