I have been searching for a solution, so i am trying to get weather information for a location id in my database,it is meant to use location_id from model and store the information in weather log a model in my database this is the code and the error below:
![# -*- coding: UTF-8 -*-
import urllib
from xml.dom import minidom
from xml.dom.minidom import parse
from argparse import ArgumentParser
from pprint import pprint
from datetime import datetime
from django.db import models
from django.core.management.base import NoArgsCommand
Location = models.get_model("weatherapp", "Location")
WeatherLog = models.get_model("weatherapp", "WeatherLog")
SILENT, NORMAL, VERBOSE = 0, 1, 2
WEATHER_URL = 'http://weather.yahooapis.com/forecastrss?p=%s&u=c'
METRIC_PARAMETER = ''
WEATHER_NS = "http://xml.weather.yahoo.com/ns/rss/1.0"
def weather_for_location(location_id, options):
# taken from http://developer.yahoo.com/python/python-xml.html
# and modified a little
url = WEATHER_URL % location_id
try:
dom = minidom.parse(urllib.urlopen(url))
except Exception:
return None
# Get the units of the current feed.
yunits = dom.getElementsByTagNameNS(WEATHER_NS, 'units') \[0\]
# Get the location of the specified location code.
ylocation = dom.getElementsByTagNameNS(WEATHER_NS, 'location') \[0\]
# Get the current conditions.
ycondition = dom.getElementsByTagNameNS(WEATHER_NS, 'condition') \[0\]
forecasts = \[\]
for node in enumerate( dom.getElementsByTagNameNS(WEATHER_NS, 'forecast')):
forecasts.append({
'date': node.getAttribute('date'),
'low': node.getAttribute('low'),
'high': node.getAttribute('high'),
'condition': node.getAttribute('text')
})
return {
'current_condition': ycondition.getAttribute('text'),
'current_temp': ycondition.getAttribute('temp'),
'current_humidity': yatmosphere.getAttribute('humidity'),
'current_visibility': yatmosphere.getAttribute('visibility'),
'current_wind_speed': ywind.getAttribute('speed'),
'forecasts': forecasts,
'title': dom.getElementsByTagName('title')\[0\].firstChild.data,
'guid': dom.getElementsByTagName('guid')\[0\].firstChild.data,
}
class Command(NoArgsCommand):
help = "Aggregates data from weather feed"
def handle_noargs(self, **options):
verbosity = int(options.get('verbosity', NORMAL))
created_count = 0
for l in Location.objects.all():
weather = weather_for_location(l.location_id, options)
if verbosity > NORMAL:
pprint(weather)
timestamp_parts = map(int, weather\['guid'\].split("_")\[1:-1\])
timestamp = datetime(*timestamp_parts)
log, created = WeatherLog.objects.get_or_create(
location=l,
timestamp=timestamp,
defaults={
'temperature': weather\['current_temp'\],
'humidity': weather\['current_humidity'\],
'wind_speed': weather\['current_wind_speed'\],
'visibility': weather\['current_visibility'\],
}
)
if created:
created_count += 1
if verbosity > NORMAL:
print "New weather logs: %d" % created_count][1]
error:
> File
> "/home/temi/rapidsmstuts/myapp/weatherapp/management/commands/check_weather.py",
> line 74, in handle_noargs
> timestamp_parts = map(int, weather['guid'].split("_")[1:-1]) TypeError: 'NoneType' object has no attribute '__getitem__'
File "/home/temi/rapidsmstuts/myapp/weatherapp/management/commands/check_weather.py", line 47, in weather_for_location
'date': node.getAttribute('date'),
AttributeError: 'tuple' object has no attribute 'getAttribute'
File "/home/temi/rapidsmstuts/myapp/weatherapp/management/commands/check_weather.py", line 35, in weather_for_location
yunits = dom.getElementsByTagNameNS(WEATHER_NS, 'units') [0]
IndexError: list index out of range
Related
Hi I have 2 functions these functions have a different types of import of datetime. I know where is problem but I do not know how to solute it
my code:
from datetime import datetime
import datetime
def upload_video(title,description,tags,upload_year,uplaod_month,upload_day):
upload_date_time = datetime.datetime(upload_year,uplaod_month,upload_day, 8, 00, 0).isoformat() + '.000Z'
print(f"this is a upload time {upload_date_time}")
request_body = {
'snippet': {
'categoryI': 19,
'title': title,
'description': description,
'tags': tags
},
'status': {
'privacyStatus': 'private',
'publishAt': upload_date_time,
'selfDeclaredMadeForKids': False,
},
'notifySubscribers': False
}
mediaFile = MediaFileUpload('output.MP4')
response_upload = service.videos().insert(
part='snippet,status',
body=request_body,
media_body=mediaFile
).execute()
def date_calculator():
days_in_months = {1:31, 2:28, 3:31, 4:30, 5:31, 6:30, 7:31, 8:31, 9:30, 10:31, 11:30, 12:31}
year = datetime.now().year
month = datetime.now().month
# Read the last used date from the text file
with open("last_used_date.txt", "r") as f:
last_used_date = f.read().strip()
# If the file is empty or the date is invalid, set the last used date to the current date
if not last_used_date or not all(c.isdigit() for c in last_used_date.split(".")):
last_used_day = datetime.now().day
last_used_month = month
else:
last_used_day, last_used_month = map(int, last_used_date.split(".")[:2])
# Generate new dates until the next one is greater than the current date
number = 0
number_test = 1
while True:
date = "{}.{}.{}".format(last_used_day, last_used_month, year)
number += 1
if last_used_day == days_in_months[month]:
last_used_month += 1
last_used_day = 1
else:
last_used_day += 1
if number == 2:
last_used_day += 1
number = 0
number_test += 1
if (last_used_month > month or
(last_used_month == month and last_used_day > datetime.now().day)):
with open("last_used_date.txt", "w") as f:
f.write("{}.{}.{}".format(last_used_day, last_used_month, year))
break
return last_used_day,last_used_month,year
error:
Traceback (most recent call last): File
"c:\Users\Lukas\Dokumenty\python_scripts\Billionare
livestyle\main.py", line 233, in
day,month,year = date_calculator() File "c:\Users\Lukas\Dokumenty\python_scripts\Billionare
livestyle\main.py", line 162, in date_calculator
year = datetime.now().year AttributeError: module 'datetime' has no attribute 'now'
if I will change imports like this:
import datetime
from datetime import datetime
error will look like that:
Traceback (most recent call last): File
"c:\Users\Lukas\Dokumenty\python_scripts\Billionare
livestyle\main.py", line 235, in
upload_video(title,"#Shorts", ["motivation", "business", "luxury", "entrepreneurship", "success", "lifestyle", "inspiration", "wealth",
"financial freedom", "investing", "mindset", "personal development",
"self-improvement", "goals", "hustle", "ambition", "rich life",
"luxury lifestyle", "luxury brand", "luxury travel", "luxury
cars"],year,month,day) File
"c:\Users\Lukas\Dokumenty\python_scripts\Billionare
livestyle\main.py", line 74, in upload_video
upload_date_time = datetime.datetime(upload_year,uplaod_month,upload_day, 8, 00,
0).isoformat() + '.000Z' AttributeError: type object
'datetime.datetime' has no attribute 'datetime'
Unfortunately, the datetime module and the datetime class inside it are spelled exactly the same way. You need to pick one of those things to import and then use it consistently. i.e. either:
import datetime # I want the whole datetime module
datetime.datetime.now() # Must use the fully qualified class name
datetime.date.today() # But I can also use other classes from the module
Or:
from datetime import datetime # I only want the datetime class
datetime.now() # Use the class directly
The name datetime can only mean one thing at a time, so if you do both imports, all that's happening is that the second meaning overwrites the first one.
I'm trying to import data using an API from IG. It seems to fetch historical prices with 'num_points' fine but when I try to use a date range, it returns an error.
session = requests_cache.CachedSession(cache_name='cache', backend='sqlite', expire_after=timedelta(hours=1))
class config(object):
username = "xxx"
password = "xxx"
api_key = "xxx"
acc_type = "Demo-cfd"
acc_number = "xxx"
ig_service = IGService(config.username, config.password, config.api_key)
ig_service.create_session()
epic = 'CS.D.EURUSD.MINI.IP'
resolution = 'D'
num_points = 10
response = ig_service.fetch_historical_prices_by_epic_and_num_points(epic, resolution, num_points)
# works fine
df_ask = response['prices']['ask']
print("ask prices:\n%s" % df_ask)
But if I replace the num_points parameter with a date range and change the function, I'm getting an error:
epic = 'CS.D.EURUSD.MINI.IP'
resolution = 'D'
(start_date, end_date) = ('2015-09-15', '2015-09-28')
response = ig_service.fetch_historical_prices_by_epic_and_date_range(epic, resolution, start_date, end_date)
df_ask = response['prices']['ask']
print("ask prices:\n%s" % df_ask)
Error:
File "/Users/xxx/untitled5.py", line 53, in <module>
response = ig_service.fetch_historical_prices_by_epic_and_date_range(epic, resolution, start_date, end_date)
File "/opt/anaconda3/lib/python3.9/site-packages/trading_ig/rest.py", line 1618, in fetch_historical_prices_by_epic_and_date_range
data = self.parse_response(response.text)
File "/opt/anaconda3/lib/python3.9/site-packages/trading_ig/rest.py", line 342, in parse_response
raise (Exception(response["errorCode"]))
Exception: error.malformed.date
I'm very new to Python, my requirement is that i have CQ webpage and need to update the status of BugID based on particular fields.
Here is the sample code i'm trying.
import httplib2
import json
import getpass
import urllib
from string import Template
from xml.dom.minidom import parseString
class Credentials():
def assign_user (self):
self._user = 'user'
def assign_passwd (self):
self._passwd = 'pawrd'
user_cred = Credentials()
class RestLink:
def __init__(self, link, baseline_cr= 'ENGR00xxxx'):
self._link = Template(link)
self.cr = baseline_cr
def get_link(self):
return self._link.safe_substitute(recordId=self.cr,
loginid=user_cred.get_user(),
password=user_cred.get_passwd())
class CQBase:
SERVER = 'cq.am.domain.net'
RESPONSE_OK = 'OK'
def __init__(self, logger):
self._logger = logger
def send_request(self):
data = ''
try:
conn = httplib2.HTTPConnectionWithTimeout(self.SERVER)
conn.request("GET", link)
res = conn.getresponse()
data = res.read()
if res.reason != self.RESPONSE_OK:
raise ParseException('Cannot execute request!')
conn.close()
except:
conn.close()
raise
return data
class CQIssueReader(CQBase):
VIEW_CR_LINK = '/cqweb/restapi/TSR/ENGR/RECORD/${recordId}?format=JSON&recordType=CR&loginId=${loginid}&password=${password}&noframes=true'
def __init__(self, cr, logger):
CQBase.__init__(self, logger)
self._cr = cr
self._headline = ''
self._subtype = ''
self._branch = ''
self._is_resolved = 0
self._is_integrated = 0
self.parse_cr()
def parse_cr(self):
self._is_resolved = False
self._is_integrated = False
data = self.send_request(RestLink(self.VIEW_CR_LINK, self._cr).get_link())
parsedData = json.loads(data)
for field in parsedData['fields']:
if field['FieldName'] == 'Headline':
self._headline = field['CurrentValue']
if field['FieldName'] == 'Integrated':
self._logger.log_details('\tIntegrated = ' + field['CurrentValue'])
if field['CurrentValue'] == 'Y':
self._is_integrated = True
if field['FieldName'] == 'State':
self._logger.log_details('\tState = ' + field['CurrentValue'])
if (field['CurrentValue'] == 'Resolved') or (field['CurrentValue'] == 'Closed')\
or (field['CurrentValue'] == 'Verified'):
self._is_resolved = True
if field['FieldName'] == 'Subtype':
self._subtype = field['CurrentValue']
if field['FieldName'] == 'BranchName':
self._branch = field['CurrentValue']
self._logger.log_details('\tBranchName = ' + self._branch)
def get_headline(self):
return self._headline
def get_subtype(self):
return self._subtype
def get_branch_name(self):
return self._branch
test = CQIssueReader(CQBase)
test_data = CQIssueReader.parse_cr()
print (test_data)
i get following error with above code:
Traceback (most recent call last):
File "test.py", line 97, in <module>
test = CQIssueReader(CQBase)
TypeError: __init__() missing 1 required positional argument: 'logger'
Kindly guide me where i'm going wrong.
According to def __init__(self, cr, logger): your Class needs a parameter called logger to work. In test = CQIssueReader(CQBase), you've not passed in a logger.
I have the following script for scraping a webform:
#!/usr/bin/env python
"""
Python script for searching firms on http://www.adviserinfo.sec.gov/IAPD/Content/Search/iapd_Search.aspx
Assumes that the input Excel file is in the same directory as this script.
"""
import os
import re
import sys
import django
import mechanize
from bs4 import BeautifulSoup
from xlrd import open_workbook
XLSX_FILE = 'Legal Names.xlsx'
IAPD_URL = 'http://www.adviserinfo.sec.gov/IAPD/Content/Search/iapd_Search.aspx'
#------------------------------------------------------------------------------------------
sys.path.append(os.path.realpath(os.path.join(os.path.dirname(__file__), 'scraper/')))
sys.path.append(os.path.realpath(os.path.join(os.path.dirname(__file__), 'scraper/scraper/')))
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
django.setup()
from django.core.exceptions import ObjectDoesNotExist
from custom_scraper.models import *
#------------------------------------------------------------------------------------------
def legal_names(file=XLSX_FILE):
'''
Read in legal names from the excel spreadsheet with the assumption
that names to be searched for are in column 1. Skip row 1 header
'''
wb = open_workbook(file)
s = wb.sheets()[0]
col = 1
for row in range(s.nrows)[1:]:
name = s.cell(row, col).value
if type(name) == int:
continue
yield name
class IapdScraper(object):
def __init__(self):
self.br = mechanize.Browser()
self.br.addheaders = [('User-agent',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.63 Safari/535.7')]
def scrape(self, q):
'''
Search for a Firm in the IAPD database by name
'''
def select_form(form):
return form.attrs.get('id', None) == 'aspnetForm'
try:
firm = IapdFirm.objects.get(query_name=q)
except ObjectDoesNotExist:
firm = IapdFirm(query_name=q)
firm.save()
else:
if firm.checked:
return
self.br.open(IAPD_URL)
self.br.select_form(predicate=select_form)
self.br.form['ctl00$cphMainContent$ucUnifiedSearch$rdoSearchBy'] = ['rdoOrg']
self.br.submit()
self.br.select_form(predicate=select_form)
self.br.form['ctl00$cphMainContent$ucUnifiedSearch$txtFirm'] = q.encode('utf8')
self.br.submit()
s = BeautifulSoup(self.br.response().read())
r = re.compile(r'^ctl\d+_cphMainContent_grOrgResults$')
t = s.find('table', id=r)
if not t: # Not found
print 'Not Found'
firm.checked = True
firm.save()
return
tr = t.findAll('tr', recursive=False)
tr = tr[2:-1] # Skip records-per-page header/footer and title header
for row in tr:
td = row.findAll('td', recursive=False)
firm.legal_name = td[0].b.text.strip()
firm.other_name = td[1].text.strip()
firm.sec_number = td[2].text.strip()
firm.address = td[3].text.strip()
firm.checked = True
firm.save()
if __name__ == '__main__':
scraper = IapdScraper()
for name in legal_names():
print '\nq=%s' % name
scraper.scrape(q=name)
When I run the script in my venv, however, I get the following error:
c:\Users\bal2155\venv\Scripts>scraper.py
Traceback (most recent call last):
File "C:\Users\bal2155\venv\Scripts\scraper.py", line 26, in <module>
django.setup()
File "C:\Python27\lib\site-packages\django\__init__.py", line 20, in setup
configure_logging(settings.LOGGING_CONFIG, settings.LOGGING)
File "C:\Python27\lib\site-packages\django\conf\__init__.py", line 46, in __ge
tattr__
self._setup(name)
File "C:\Python27\lib\site-packages\django\conf\__init__.py", line 42, in _set
up
self._wrapped = Settings(settings_module)
File "C:\Python27\lib\site-packages\django\conf\__init__.py", line 98, in __in
it__
% (self.SETTINGS_MODULE, e)
ImportError: Could not import settings 'settings' (Is it on sys.path? Is there a
n import error in the settings file?): No module named settings
I have a inkling that this has to do either with how I've set up the venv or how I downloaded django. I've checked the documentation for the latter and I'm not sure what the myproject.settings extension means. Any help here would be greatly appreciated.
I am trying to get the profiles feed from my Google Apps domain using the gdata library supplied my Google for Python. This is my code
import atom
import gdata.auth
import gdata.contacts
import gdata.contacts.service
gd_client = gdata.contacts.service.ContactsService()
gd_client.email = 'name#domain.com'
gd_client.password = 'password'
gd_client.source = 'madeupgibberish'
gd_client.account_type = 'HOSTED'
gd_client.contact_list = 'domain.com'
gd_client.ProgrammaticLogin()
def PrintFeed(feed):
for i, entry in enumerate(feed.entry):
print '\n%s %s' % (i+1, entry.title.text)
max_results = raw_input(
'Enter max return: ')
feed_uri = gd_client.GetProfilesFeed()
query = gdata.contacts.service.ContactsQuery(feed_uri)
print(feed_uri)
query.max_results = max_results
#query.orderby='title'
feed = gd_client.GetContactsFeed(query.ToUri())
# Use the print feed method defined above.
PrintFeed(feed)
print(feed_uri)
#print feed
f = open('c:\\python27\\junk.xml', 'w')
f.write(str(feed))
f.close()
When I run this it returns:
C:\Python27\Lib\gdata-2.0.16>python contactAPI.py
Enter max return: 300
Traceback (most recent call last):
File "contactAPI.py", line 27, in <module>
feed_uri = gd_client.GetProfilesFeed()
File "build\bdist.win-amd64\egg\gdata\contacts\service.py", line 294, in GetProfilesFeed
File "build\bdist.win-amd64\egg\gdata\service.py", line 1108, in Get
gdata.service.RequestError: {'status': 403, 'body': 'Version 1.0 is not supported.', 'reason': 'Forbidden'}
I am able to use GetContactsFeed and other feeds, but I cannot get profiles. Any idea whats happening here or what I need to fix? Thank you in advance for your help.
The gdata.contacts.service uses the deprecated version of the API. You should use gdata.contacts.{client, data} instead}
Here is a sample getting users profiles.
import atom
import gdata.auth
import gdata.contacts
import gdata.contacts.client
email = 'admin#domain.com'
password = 'password'
domain = 'domain.com'
gd_client = gdata.contacts.client.ContactsClient(domain=domain)
gd_client.ClientLogin(email, password, 'madeupgibberish')
def PrintFeed(feed):
for i, entry in enumerate(feed.entry):
print '\n%s %s' % (i+1, entry.title.text)
feed_link = atom.data.Link(gd_client.GetFeedUri(kind='profiles'))
while feed_link:
profiles_feed = gd_client.GetProfilesFeed(uri=feed_link.href)
PrintFeed(profiles_feed)
feed_link = profiles_feed.GetNextLink()
The library's contact_sample.py and unshare_profiles.py work with the client, data files.