Store and compare last two lines using response.iter_lines() - python

I have a rate stream where I need to store and compare the last two lines. For instance if the new price is higher than the previous, queue event. It's my understanding that iter_lines()only displays the last line. My question is how could I store the last line, wait for a new line and compare those, then queue the event? I know this is simple, but I'm still having trouble, thanks for your help!
Here is my UPDATED(3) stream:
def stream_to_queue(self):
response = self.connect_to_stream()
if response.status_code != 200:
return
oldLine = ''
for line in response.iter_lines(1):
if line < oldLine:
try:
msg = json.loads(line)
except Exception as e:
print "Caught exception when converting message into json\n" + str(e)
return
if msg.has_key("instrument") or msg.has_key("tick"):
print msg["tick"]
instrument = msg["tick"]["instrument"]
time = msg["tick"]["time"]
bid = msg["tick"]["bid"]
ask = msg["tick"]["ask"]
stop = msg["tick"]["ask"]
tev = TickEvent(instrument, time, bid, ask)
self.events_queue.put(tev)
oldLine = line

The original function:
def stream_to_queue(self):
response = self.connect_to_stream()
if response.status_code != 200:
return
for line in response.iter_lines(1):
if line:
try:
msg = json.loads(line)
except Exception as e:
print "Caught exception when converting message into json\n" + str(e)
return
if msg.has_key("instrument") or msg.has_key("tick"):
print msg["tick"]
instrument = msg["tick"]["instrument"]
time = msg["tick"]["time"]
bid = msg["tick"]["bid"]
ask = msg["tick"]["ask"]
stop = msg["tick"]["ask"]
The repaired function:
def stream_to_queue(self):
response = self.connect_to_stream()
if response.status_code != 200:
return
last_msg = None # new line
for line in response.iter_lines(1):
if line:
try:
msg = json.loads(line)
if last_msg is None: # new line
last_msg = msg # new line
except Exception as e:
print "Caught exception when converting message into json\n" + str(e)
return
# can now compare last msg with current msg
if msg.has_key("instrument") or msg.has_key("tick"):
print msg["tick"]
instrument = msg["tick"]["instrument"]
time = msg["tick"]["time"]
bid = msg["tick"]["bid"]
ask = msg["tick"]["ask"]
stop = msg["tick"]["ask"]
last_msg = msg # new line (may want to indent 4 more spaces)
It may make sense to move the if last_msg is None check to the inside of if msg.has_key block if you want the last_msg to have certain information.

Related

json converting to python 3

i was trying to use a code writing in python 2.7 with python 3.5,but i coudn't solve this error
TypeError: the JSON object must be str, not 'bytes'
while has_next_page:
after = '' if after is '' else "&after={}".format(after)
base_url = base + node + parameters + after + since + until
url = getFacebookPageFeedUrl(base_url)
statuses = json.loads(request_until_succeed(url))
reactions = getReactionsForStatuses(base_url)
reactions = getReactionsForStatuses(base_url)
for status in statuses['data']:
# Ensure it is a status with the expected metadata
if 'reactions' in status:
status_data = processFacebookPageFeedStatus(status)
reactions_data = reactions[status_data[0]]
# calculate thankful/pride through algebra
num_special = status_data[6] - sum(reactions_data)
w.writerow(status_data + reactions_data + (num_special,))
num_processed += 1
if num_processed % 100 == 0:
print(("{} Statuses Processed: {}".format
(num_processed, datetime.datetime.now())))
# if there is no next page, we're done.
if 'paging' in statuses:
after = statuses['paging']['cursors']['after']
else:
has_next_page = False
the probleme is 6th line with json.load, is any one who have idea how to solve it ?
thank you
here is the request_until_succeed function:
def request_until_succeed(url):
req = Request(url)
success = False
while success is False:
try:
response = urlopen(req)
if response.getcode() == 200:
success = True
except Exception as e:
print(e)
time.sleep(5)
print("Error for URL {}: {}".format(url, datetime.datetime.now()))
print("Retrying.")
return response.read()

Error in Python Script?

I keep getting an error when I run this python program,
It says I don't have any file or directory at '/path/to/times-testing.log'.
I don't seem to understand, can anyone help me in fixing this problem.
Thank you in advance!
Heres the code:
import urllib2
import json
import datetime
import time
import sys, os
import logging
from urllib2 import HTTPError
from ConfigParser import SafeConfigParser
# helper function to iterate through dates
def daterange( start_date, end_date ):
if start_date <= end_date:
for n in range( ( end_date - start_date ).days + 1 ):
yield start_date + datetime.timedelta( n )
else:
for n in range( ( start_date - end_date ).days + 1 ):
yield start_date - datetime.timedelta( n )
# helper function to get json into a form I can work with
def convert(input):
if isinstance(input, dict):
return {convert(key): convert(value) for key, value in input.iteritems()}
elif isinstance(input, list):
return [convert(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
# helpful function to figure out what to name individual JSON files
def getJsonFileName(date, page, json_file_path):
json_file_name = ".".join([date,str(page),'json'])
json_file_name = "".join([json_file_path,json_file_name])
return json_file_name
# helpful function for processing keywords, mostly
def getMultiples(items, key):
values_list = ""
if len(items) > 0:
num_keys = 0
for item in items:
if num_keys == 0:
values_list = item[key]
else:
values_list = "; ".join([values_list,item[key]])
num_keys += 1
return values_list
# get the articles from the NYTimes Article API
def getArticles(date, query, api_key, json_file_path):
# LOOP THROUGH THE 101 PAGES NYTIMES ALLOWS FOR THAT DATE
for page in range(101):
for n in range(5): # 5 tries
try:
request_string = "http://api.nytimes.com/svc/search/v2/articlesearch.json?begin_date=" + date + "&end_date=" + date + "&page=" + str(page) + "&api-key=" + api_key
response = urllib2.urlopen(request_string)
content = response.read()
if content:
articles = convert(json.loads(content))
# if there are articles here
if len(articles["response"]["docs"]) >= 1:
json_file_name = getJsonFileName(date, page, json_file_path)
json_file = open(json_file_name, 'w')
json_file.write(content)
json_file.close()
# if no more articles, go to next date
else:
return
time.sleep(3) # wait so we don't overwhelm the API
except HTTPError as e:
logging.error("HTTPError on page %s on %s (err no. %s: %s) Here's the URL of the call: %s", page, date, e.code, e.reason, request_string)
if e.code == 403:
print "Script hit a snag and got an HTTPError 403. Check your log file for more info."
return
if e.code == 429:
print "Waiting. You've probably reached an API limit."
time.sleep(30) # wait 30 seconds and try again
except:
logging.error("Error on %s page %s: %s", date, file_number, sys.exc_info()[0])
continue
# parse the JSON files you stored into a tab-delimited file
def parseArticles(date, tsv_file_name, json_file_path):
for file_number in range(101):
# get the articles and put them into a dictionary
try:
file_name = getJsonFileName(date,file_number, json_file_path)
if os.path.isfile(file_name):
in_file = open(file_name, 'r')
articles = convert(json.loads(in_file.read()))
in_file.close()
else:
break
except IOError as e:
logging.error("IOError in %s page %s: %s %s", date, file_number, e.errno, e.strerror)
continue
# if there are articles in that document, parse them
if len(articles["response"]["docs"]) >= 1:
# open the tsv for appending
try:
out_file = open(tsv_file_name, 'ab')
except IOError as e:
logging.error("IOError: %s %s %s %s", date, file_number, e.errno, e.strerror)
continue
# loop through the articles putting what we need in a tsv
try:
for article in articles["response"]["docs"]:
# if (article["source"] == "The New York Times" and article["document_type"] == "article"):
keywords = ""
keywords = getMultiples(article["keywords"],"value")
# should probably pull these if/else checks into a module
variables = [
article["pub_date"],
keywords,
str(article["headline"]["main"]).decode("utf8").replace("\n","") if "main" in article["headline"].keys() else "",
str(article["source"]).decode("utf8") if "source" in article.keys() else "",
str(article["document_type"]).decode("utf8") if "document_type" in article.keys() else "",
article["web_url"] if "web_url" in article.keys() else "",
str(article["news_desk"]).decode("utf8") if "news_desk" in article.keys() else "",
str(article["section_name"]).decode("utf8") if "section_name" in article.keys() else "",
str(article["snippet"]).decode("utf8").replace("\n","") if "snippet" in article.keys() else "",
str(article["lead_paragraph"]).decode("utf8").replace("\n","") if "lead_paragraph" in article.keys() else "",
]
line = "\t".join(variables)
out_file.write(line.encode("utf8")+"\n")
except KeyError as e:
logging.error("KeyError in %s page %s: %s %s", date, file_number, e.errno, e.strerror)
continue
except (KeyboardInterrupt, SystemExit):
raise
except:
logging.error("Error on %s page %s: %s", date, file_number, sys.exc_info()[0])
continue
out_file.close()
else:
break
# Main function where stuff gets done
def main():
config = SafeConfigParser()
script_dir = os.path.dirname(__file__)
config_file = os.path.join(script_dir, 'config/settings.cfg')
config.read(config_file)
json_file_path = config.get('files','json_folder')
tsv_file_name = config.get('files','tsv_file')
log_file = config.get('files','logfile')
api_key = config.get('nytimes','api_key')
start = datetime.date( year = int(config.get('nytimes','start_year')), month = int(config.get('nytimes','start_month')), day = int(config.get('nytimes','start_day')) )
end = datetime.date( year = int(config.get('nytimes','end_year')), month = int(config.get('nytimes','end_month')), day = int(config.get('nytimes','end_day')) )
query = config.get('nytimes','query')
logging.basicConfig(filename=log_file, level=logging.INFO)
logging.info("Getting started.")
try:
# LOOP THROUGH THE SPECIFIED DATES
for date in daterange( start, end ):
date = date.strftime("%Y%m%d")
logging.info("Working on %s." % date)
getArticles(date, query, api_key, json_file_path)
parseArticles(date, tsv_file_name, json_file_path)
except:
logging.error("Unexpected error: %s", str(sys.exc_info()[0]))
finally:
logging.info("Finished.")
if __name__ == '__main__' :
main()
It generates the following error when compiling it:
Rakeshs-MacBook-Air:get-nytimes-articles-master niharika$ python getTimesArticles.py
Traceback (most recent call last):
File "getTimesArticles.py", line 180, in <module>
main()
File "getTimesArticles.py", line 164, in main
logging.basicConfig(filename=log_file, level=logging.INFO)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/logging/__init__.py", line 1545, in basicConfig
hdlr = FileHandler(filename, mode)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/logging/__init__.py", line 911, in __init__
StreamHandler.__init__(self, self._open())
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/logging/__init__.py", line 941, in _open
stream = open(self.baseFilename, self.mode)
IOError: [Errno 2] No such file or directory: '/path/to/times-testing.log'
Rakeshs-MacBook-Air:get-nytimes-articles-master niharika$
Your main() function:
def main():
config = SafeConfigParser()
script_dir = os.path.dirname(__file__)
config_file = os.path.join(script_dir, 'config/settings.cfg')
config.read(config_file)
...
log_file = config.get('files','logfile')
...
logging.basicConfig(filename=log_file, level=logging.INFO)
opens the config/settings.cfg file and fetches the name of the log file, which seems to be /path/to/times-testing.log. You either need to create that folder (probably not the best idea) or configure it to point to the correct file.

why is the regex function always popping up the Attribute Error?

I have been writing a function in python to get the IP of a computer. The code is given below :
def getip(self):
self.path = "/root"
self.iplist = []
os.chdir(self.path)
os.system("ifconfig > ipfinder")
try:
file = open("ipfinder","r")
self.pattern = '(\d{1,3}\.){3}\d{1,3}'
while True:
line = file.readline()
try:
ip = re.search(self.pattern, line).group()
self.iplist.append(ip)
except AttributeError:
pass
file.close()
except EOFError:
for ip in self.iplist:
print ip
I know this is not a good way to get the IP of a machine. The problem is that the AttributeError pops up every single time. Why is it happening? why can't a match be found?
I ran it in my local. Found 4 things be be modified!
a) regex:- \d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}
b) trim for any extra space while reading:- file.readline().strip()
c) If it comes to end of line, break the while:-
if line == '':
break
d) Instead of re.search, do re.finall
The modified code that works in my system without AttributeError is:-
def getip(self):
self.path = "/root"
self.iplist = []
os.chdir(self.path)
os.system("ifconfig > ipfinder")
try:
file = open("ipfinder","r")
self.pattern = '\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}'
while True:
line = file.readline().strip()
if line == '':
break
try:
ip = re.findall(self.pattern, line)
self.iplist.append(ip)
except AttributeError:
pass
file.close()
except EOFError:
for ip in self.iplist:
print ip

How to properly debug ThreadPool?

I'm trying to get some data from a web page. To speed up this process (they allow me to make 1000 requests per minute), I use ThreadPool.
Since there is a huge amount of data, the process is quite vulnerable to connection fails etc. so I try to log everything I can to be able to detect each mistake I did in code.
The problem is that program sometimes just stops without any exception (it acts like it is running but with no effect - I use PyCharm). I log catched exceptions everywhere I can but I can't see any exception in any log.
I assume that if there were a timeout reached, the exception would be raised and logged.
I've found out where the problem could be. Here is the code:
As a pool, I use: from multiprocessing.pool import ThreadPool as Pool
And lock: from threading import Lock
The download_category function is being used in loop.
def download_category(url):
# some code
#
# ...
log('Create pool...')
_pool = Pool(_workers_number)
with open('database/temp_produkty.txt') as f:
log('Spracovavanie produktov... vytvaranie vlakien...') # I see this in log
for url_product in f:
x = _pool.apply_async(process_product, args=(url_product.strip('\n'), url))
_pool.close()
_pool.join()
log('Presuvanie produktov z temp export do export.csv...') # I can't see this in log
temp_export_to_export_csv()
set_spracovanie_kategorie(url)
except Exception as e:
logging.exception('Got exception on download_one_category: {}'.format(url))
And process_product function:
def process_product(url, cat):
try:
data = get_product_data(url)
except:
log('{}: {} exception while getting product data... #') # I don't see this in log
return
try:
print_to_temp_export(data, cat) # I don't see this in log
except:
log('{}: {} exception while printing to csv... #') # I don't see this in log
raise
LOG function:
def log(text):
now = datetime.now().strftime('%d.%m.%Y %H:%M:%S')
_lock.acquire()
mLib.printToFile('logging/log.log', '{} -> {}'.format(now, text))
_lock.release()
I use logging module too. In this log, I see that probably 8 (number of workers) times request was sent but no answer hasn't been recieved.
EDIT1:
def get_product_data(url):
data = defaultdict(lambda: '-')
root = load_root(url)
try:
nazov = root.xpath('//h1[#itemprop="name"]/text()')[0]
except:
nazov = root.xpath('//h1/text()')[0]
under_block = root.xpath('//h2[#id="lowest-cost"]')
if len(under_block) < 1:
under_block = root.xpath('//h2[contains(text(),"Naj")]')
if len(under_block) < 1:
return False
data['nazov'] = nazov
data['url'] = url
blocks = under_block[0].xpath('./following-sibling::div[#class="shp"]/div[contains(#class,"shp")]')
i = 0
for block in blocks:
i += 1
data['dat{}_men'.format(i)] = eblock.xpath('.//a[#class="link"]/text()')[0]
del root
return data
LOAD ROOT:
class RedirectException(Exception):
pass
def load_url(url):
r = requests.get(url, allow_redirects=False)
if r.status_code == 301:
raise RedirectException
if r.status_code == 404:
if '-q-' in url:
url = url.replace('-q-','-')
mLib.printToFileWOEncoding('logging/neexistujuce.txt','Skusanie {} kategorie...'.format(url))
return load_url(url) # THIS IS NOT LOOPING
else:
mLib.printToFileWOEncoding('logging/neexistujuce.txt','{}'.format(url))
html = r.text
return html
def load_root(url):
try:
html = load_url(url)
except Exception as e:
logging.exception('load_root_exception')
raise
return etree.fromstring(html, etree.HTMLParser())

Python "while true" loop does NOT end (using Python Tweepy)

The following code seems to be mostly "working". Meaning that it scrapes all the tweets from Twitter API for a specified day. Though it seems the while True loop never breaks and I don't see the expected "Finished!!" string even through the csv file is complete.
import tweepy
import time
import csv
ckey = "xxx"
csecret = "xxx"
atoken = "xxx-xxx"
asecret = "xxx"
OAUTH_KEYS = {'consumer_key':ckey, 'consumer_secret':csecret,
'access_token_key':atoken, 'access_token_secret':asecret}
auth = tweepy.OAuthHandler(OAUTH_KEYS['consumer_key'], OAUTH_KEYS['consumer_secret'])
api = tweepy.API(auth)
startSince = '2014-10-03'
endUntil = '2014-10-04'
suffix = '_03OCT2014.csv'
searchTerms = 'xyz'
tweets = tweepy.Cursor(api.search, q=searchTerms,
since=startSince, until=endUntil).items()
while True:
try:
for tweet in tweets:
placeHolder = []
placeHolder.append(tweet.author.name.encode('utf8'))
placeHolder.append(tweet.author.screen_name.encode('utf8'))
placeHolder.append(tweet.created_at)
prefix = 'TweetData_xyz'
wholeFileName = prefix + suffix
with open(wholeFileName, "ab") as f:
writeFile = csv.writer(f)
writeFile.writerow(placeHolder)
except tweepy.TweepError:
time.sleep(60*15)
continue
except IOError:
time.sleep(60*5)
continue
except StopIteration:
break
print "Finished!!!"
StopIteration is never raised in your code. The for statement would catch it if it were raised by tweepy.Cursor().items(), it would not propagate further.
Just break out if the for loop ends:
while True:
try:
for tweet in tweets:
# do stuff
# completed iterating successfully
break
and remove the except StopIteration: handler altogether.
You code has no exit condition.
It seems you don;t want to exit that loop if you got an error thrown. So I assume that when you reach end ob your while body you would like to exit, yes?
def process_tweet(tweet):
placeHolder = []
placeHolder.append(tweet.author.name.encode('utf8'))
placeHolder.append(tweet.author.screen_name.encode('utf8'))
placeHolder.append(tweet.created_at)
prefix = 'TweetData_xyz'
wholeFileName = prefix + suffix
with open(wholeFileName, "ab") as f:
writeFile = csv.writer(f)
writeFile.writerow(placeHolder)
while True:
try:
for tweet in tweets:
process_tweet(tweet)
break
except tweepy.TweepError:
time.sleep(60*15)
continue
except IOError:
time.sleep(60*5)
continue
except StopIteration:
break
print "Finished!!!"

Categories

Resources