I'm trying to achieve something with a function I have.
so as you can see here:
this is an SQL table with data. when I upload 2 docs I get it on doc0 and doc1 and all the others are null.
but what I want to do is if I upload only 2 docs the rest will be removed completely from the SQL table.
this is my code:
def submit_quality_dept_application(request, application_id):
n = int(request.data['length'])
application = Application.objects.get(id=application_id)
application_state = application.application_state
teaching_feedback = request.FILES['teaching-feedback']
application_state['teaching_feedback'] = teaching_feedback.name
now = datetime.now()
dt_string = now.strftime("%Y-%m-%d %H:%M:%S")
application_state['edited_time'] = dt_string
for i in range(5):
application_state[f'doc{i}'] = None
for i in range(n):
doc = request.FILES[f'doc{i}']
application_state[f'doc{i}'] = doc.name
copy_to_application_directory(doc, application.id)
copy_to_application_directory(teaching_feedback, application.id)
ApplicationStep.objects.update_or_create(
application=application, step_name=Step.STEP_7
)
Application.objects.filter(id=application_id).update(application_state=application_state)
return Response(n, status=status.HTTP_200_OK)
what should I do to achieve it?
thank you so much for your help!
Related
I am struggling to understand how the date queries work in Django as I am storing a database with train times. I want to get times that are greater than the current time.
The query looks like this, but returns zero results:
latestdepartures = LatestDepartures.objects.filter(station=startstation,earliest__gte=timezone.now().astimezone(pytz.utc))
My database has the entry below for example.
When I run the query, I get the results below (first line is print(timezone.now().astimezone(pytz.utc)):
2020-08-01 15:49:06.610055+00:00
<QuerySet []>
The code which adds the data to the database looks like:
def convert_date_time(o):
if isinstance(o, datetime):
return o.__str__()
def updateservices(stationname,destination):
now = datetime.now()
# dd/mm/YY H:M:S
datenow = now.strftime("%d/%m/%Y")
board = DARWIN_SESH.get_station_board(stationname)
stationdict = dict()
stationdict['from'] = stationname
stationdict['name'] = board.location_name
stationdict['servicelist']=[]
services = board.train_services
for s in services:
traindict = dict()
service_details = DARWIN_SESH.get_service_details(s.service_id)
traindict['departuretime'] = datetime.strptime(datenow + " " + service_details.std,'%m/%d/%Y %H:%M').astimezone(pytz.utc)
traindict['callingpoints'] = []
callingpoints = service_details.subsequent_calling_points
for c in callingpoints:
if c.crs == destination:
callingpointdict = dict()
callingpointdict['code'] = c.crs
callingpointdict['name'] = c.location_name
callingpointdict['arrivaltime'] = datetime.strptime(datenow + " " + c.st,'%m/%d/%Y %H:%M').astimezone(pytz.utc)
traindict['callingpoints'].append(callingpointdict)
if len(traindict['callingpoints']) > 0:
stationdict['servicelist'].append(traindict)
#For getting the minimum departure
departures = [s['departuretime'] for s in stationdict['servicelist']]
#Store the train departure object in the database
stationdata = json.dumps(stationdict, default=convert_date_time)
LatestDepartures.objects.create(
station = stationname,
earliest = min(departures),
services = stationdata
)
return stationdata
servicedetails.std will be a time represented in 24hours in string format, for example "17:00".
Can anyone help, I am not sure if I am meant to change the date format somewhere or if it is to do with the way the datetime object is created by adding the time.
UPDATE:
Now storing the date in a different format as '%d/%m/%Y %H:%M':
Now I get dates that are greater than, but once the time current time has exceeded the earliest in the database, the query still returns results. Example output is:
2020-08-01 17:31:21.909052+00:00
print(timezone.now().astimezone(pytz.utc))
2020-08-01 18:03:00+00:00 - Time in database
I am attempting to query all rows for a column called show_id. I would then like to compare each potential item to be added to the DB with the results. Now the simplest way I can think of doing that is by checking if each show is in the results. If so pass etc. However the results from the below snippet are returned as objects. So this check fails.
Is there a better way to create the query to achieve this?
shows_inDB = Show.query.filter(Show.show_id).all()
print(shows_inDB)
Results:
<app.models.user.Show object at 0x10c2c5fd0>,
<app.models.user.Show object at 0x10c2da080>,
<app.models.user.Show object at 0x10c2da0f0>
Code for the entire function:
def save_changes_show(show_details):
"""
Save the changes to the database
"""
try:
shows_inDB = Show.query.filter(Show.show_id).all()
print(shows_inDB)
for show in show_details:
#Check the show isnt already in the DB
if show['id'] in shows_inDB:
print(str(show['id']) + ' Already Present')
else:
#Add show to DB
tv_show = Show(
show_id = show['id'],
seriesName = str(show['seriesName']).encode(),
aliases = str(show['aliases']).encode(),
banner = str(show['banner']).encode(),
seriesId = str(show['seriesId']).encode(),
status = str(show['status']).encode(),
firstAired = str(show['firstAired']).encode(),
network = str(show['network']).encode(),
networkId = str(show['networkId']).encode(),
runtime = str(show['runtime']).encode(),
genre = str(show['genre']).encode(),
overview = str(show['overview']).encode(),
lastUpdated = str(show['lastUpdated']).encode(),
airsDayOfWeek = str(show['airsDayOfWeek']).encode(),
airsTime = str(show['airsTime']).encode(),
rating = str(show['rating']).encode(),
imdbId = str(show['imdbId']).encode(),
zap2itId = str(show['zap2itId']).encode(),
added = str(show['added']).encode(),
addedBy = str(show['addedBy']).encode(),
siteRating = str(show['siteRating']).encode(),
siteRatingCount = str(show['siteRatingCount']).encode(),
slug = str(show['slug']).encode()
)
db.session.add(tv_show)
db.session.commit()
except Exception:
print(traceback.print_exc())
I have decided to use the method above and extract the data I wanted into a list, comparing each show to the list.
show_compare = []
shows_inDB = Show.query.filter().all()
for item in shows_inDB:
show_compare.append(item.show_id)
for show in show_details:
#Check the show isnt already in the DB
if show['id'] in show_compare:
print(str(show['id']) + ' Already Present')
else:
#Add show to DB
For querying a specific column value, have a look at this question: Flask SQLAlchemy query, specify column names. This is the example code given in the top answer there:
result = SomeModel.query.with_entities(SomeModel.col1, SomeModel.col2)
The crux of your problem is that you want to create a new Show instance if that show doesn't already exist in the database.
Querying the database for all shows and looping through the result for each potential new show might become very inefficient if you end up with a lot of shows in the database, and finding an object by identity is what an RDBMS does best!
This function will check to see if an object exists, and create it if not. Inspired by this answer:
def add_if_not_exists(model, **kwargs):
if not model.query.filter_by(**kwargs).first():
instance = model(**kwargs)
db.session.add(instance)
So your example would look like:
def add_if_not_exists(model, **kwargs):
if not model.query.filter_by(**kwargs).first():
instance = model(**kwargs)
db.session.add(instance)
for show in show_details:
add_if_not_exists(Show, id=show['id'])
If you really want to query all shows upfront, instead of putting all of the id's into a list, you could use a set instead of a list which will speed up your inclusion test.
E.g:
show_compare = {item.show_id for item in Show.query.all()}
for show in show_details:
# ... same as your code
I am having issues with my below API request to Flickr. My function takes as input a list of 10 photo ids. However when I print the data from my function I am only getting information based on 1 photo ID. Looking at my below function any ideas on what may be causing the contents of only 1 photo ID to print? Any help would be great.
for item in get_flickr_data(word)["photos"]["photo"]:
photo_ids =item["id"].encode('utf-8')
lst_photo_ids.append(photo_ids)
print lst_photo_ids
lst_photo_ids = ['34117701526', '33347528313', '34158745075', '33315997274', '33315996984', '34028007021', '33315995844', '33347512113', '33315784134', '34024299271']
def get_photo_data(lst_photo_ids):
baseurl = "https://api.flickr.com/services/rest/"
params_d = {}
params_d["method"] = "flickr.photos.getInfo"
params_d["format"] = "json"
params_d["photo_id"] = photo_ids
params_d["api_key"] = FLICKR_KEY
unique_identifier = params_unique_combination(baseurl,params_d)
if unique_identifier in CACHE_DICTION:
flickr_data_diction = CACHE_DICTION[unique_identifier]
else:
resp = requests.get(baseurl,params_d)
json_result_text = resp.text[14:-1]
flickr_data_diction = json.loads(json_result_text)
CACHE_DICTION[unique_identifier] = flickr_data_diction
fileref = open(CACHE_FNAME,"w")
fileref.write(json.dumps(CACHE_DICTION))
fileref.close()
return flickr_data_diction
print get_photo_data(photo_ids)
trying to write data to my local datastore like:
drivingJson = json.loads(drivingdata)
for data in drivingJson:
keys = getKey()
index = 1
dataList = list()
for nodeData in data:
self.response.write(keys)
self.response.write("<br>")
lat = nodeData['lat']
lng = nodeData['long']
color = nodeData['color']
timestamp = datetime.datetime.strptime(nodeData['timestamp'], "%Y-%m-%d %H:%M:%S")
saveDrivingData = DrivingObject(
index = index,
lat = float(lat),
lng = float(lng),
timestamp = timestamp,
sessionKey = str(keys),
color = int(color)
)
dataList.append(saveDrivingData)
index +=1
ndb.put_multi_async(dataList)
this doesn't populate the datastore with any detail. But when i use
ndb.put_multi(dataList)
the datatstore populates well. How do I handle the asynchronous call. Thanks
put_multi_async returns a list of Future objects.
You need to call wait_any to make sure the put's complete before you return from the request.
Have a read about async all work has to complete before you return.
https://cloud.google.com/appengine/docs/python/ndb/async#using
All through the document it talks about waiting.
Working on a script to collect users browser history with time stamps ( educational setting).
Firefox 3 history is kept in a sqlite file, and stamps are in UNIX epoch time... getting them and converting to readable format via a SQL command in python is pretty straightforward:
sql_select = """ SELECT datetime(moz_historyvisits.visit_date/1000000,'unixepoch','localtime'),
moz_places.url
FROM moz_places, moz_historyvisits
WHERE moz_places.id = moz_historyvisits.place_id
"""
get_hist = list(cursor.execute (sql_select))
Chrome also stores history in a sqlite file.. but it's history time stamp is apparently formatted as the number of microseconds since midnight UTC of 1 January 1601....
How can this timestamp be converted to a readable format as in the Firefox example (like 2010-01-23 11:22:09)? I am writing the script with python 2.5.x ( the version on OS X 10.5 ), and importing sqlite3 module....
Try this:
sql_select = """ SELECT datetime(last_visit_time/1000000-11644473600,'unixepoch','localtime'),
url
FROM urls
ORDER BY last_visit_time DESC
"""
get_hist = list(cursor.execute (sql_select))
Or something along those lines
seems to be working for me.
This is a more pythonic and memory-friendly way to do what you described (by the way, thanks for the initial code!):
#!/usr/bin/env python
import os
import datetime
import sqlite3
import opster
from itertools import izip
SQL_TIME = 'SELECT time FROM info'
SQL_URL = 'SELECT c0url FROM pages_content'
def date_from_webkit(webkit_timestamp):
epoch_start = datetime.datetime(1601,1,1)
delta = datetime.timedelta(microseconds=int(webkit_timestamp))
return epoch_start + delta
#opster.command()
def import_history(*paths):
for path in paths:
assert os.path.exists(path)
c = sqlite3.connect(path)
times = (row[0] for row in c.execute(SQL_TIME))
urls = (row[0] for row in c.execute(SQL_URL))
for timestamp, url in izip(times, urls):
date_time = date_from_webkit(timestamp)
print date_time, url
c.close()
if __name__=='__main__':
opster.dispatch()
The script can be used this way:
$ ./chrome-tools.py import-history ~/.config/chromium/Default/History* > history.txt
Of course Opster can be thrown out but seems handy to me :-)
The sqlite module returns datetime objects for datetime fields, which have a format method for printing readable strings called strftime.
You can do something like this once you have the recordset:
for record in get_hist:
date_string = record[0].strftime("%Y-%m-%d %H:%M:%S")
url = record[1]
This may not be the most Pythonic code in the world, but here's a solution: Cheated by adjusting for time zone (EST here) by doing this:
utctime = datetime.datetime(1601,1,1) + datetime.timedelta(microseconds = ms, hours =-5)
Here's the function : It assumes that the Chrome history file has been copied from another account into /Users/someuser/Documents/tmp/Chrome/History
def getcr():
connection = sqlite3.connect('/Users/someuser/Documents/tmp/Chrome/History')
cursor = connection.cursor()
get_time = list(cursor.execute("""SELECT last_visit_time FROM urls"""))
get_url = list(cursor.execute("""SELECT url from urls"""))
stripped_time = []
crf = open ('/Users/someuser/Documents/tmp/cr/cr_hist.txt','w' )
itr = iter(get_time)
itr2 = iter(get_url)
while True:
try:
newdate = str(itr.next())
stripped1 = newdate.strip(' (),L')
ms = int(stripped1)
utctime = datetime.datetime(1601,1,1) + datetime.timedelta(microseconds = ms, hours =-5)
stripped_time.append(str(utctime))
newurl = str(itr2.next())
stripped_url = newurl.strip(' ()')
stripped_time.append(str(stripped_url))
crf.write('\n')
crf.write(str(utctime))
crf.write('\n')
crf.write(str(newurl))
crf.write('\n')
crf.write('\n')
crf.write('********* Next Entry *********')
crf.write('\n')
except StopIteration:
break
crf.close()
shutil.copy('/Users/someuser/Documents/tmp/cr/cr_hist.txt' , '/Users/parent/Documents/Chrome_History_Logs')
os.rename('/Users/someuser/Documents/Chrome_History_Logs/cr_hist.txt','/Users/someuser/Documents/Chrome_History_Logs/%s.txt' % formatdate)