Read pyodbc query results into variables - python

Context: I'm trying to query data from SQL Server via Pyodbc & use looping logic to read the results into the variables in the query block below.
Question(s): Can someone please help me modify the code block below so that it properly populates the variables via the looping logic? I suspect because I'm using the fetchall() method on the query cursor that each result row transforms into a tuple within a list -- which then seems to make the looping logic below it useless. Can someone please suggest an alternative solution?
from constantcontact import ConstantContact
from constantcontact import Contact
import requests
import json
import pyodbc
username = 'REDACTED'
password = 'REDACTED'
sample_contact_connection = pyodbc.connect("Driver={ODBC Driver 13 for SQL Server};""Server=PC;""Database=leadgen_sandbox;""Username="+username+";""Password="+password+";""Trusted_Connection=yes;")
sample_contact_cursor = sample_contact_connection.cursor()
sample_contact_query = "SELECT first_name,last_name,title,company_name,email_address FROM leadgen_sandbox.dbo.sample_contacts"
sample_contact_connection.autocommit = True
sample_contact_cursor.execute(sample_contact_query)
print(sample_contact_cursor.fetchall())
constantcontact = ConstantContact('REDACTED','REDACTED')
list_id = '1816761971'
for [first_name, last_name, title, company_name, email_address] in sample_contact_cursor.fetchall():
new_contact = Contact()
new_contact.set_first_name(''+first_name+'')
new_contact.set_last_name(''+last_name+'')
new_contact.set_job_title(''+title+'')
new_contact.set_company_name(''+company_name+'')
new_contact.add_list_id(''+list_id+'')
new_contact.set_email_address(''+email_address+'')
response = constantcontact.post_contacts(new_contact)
response_text = json.dumps(response, indent = 4, sort_keys = True)
print(response_text)
sample_contact_connection.close()

When you called sample_contact_cursor.fetchall() , you already exhausted the contents of the cursor. So, it's content would no longer be available for the loop. Removing the print before the loop would fix this. Also, the .fetchall() is redundant in the loop as each row would be read in the for loop one by one anyways. You could write this way as well:
for [first_name, last_name, title, company_name, email_address] in sample_contact_cursor:
new_contact = Contact()
new_contact.set_first_name(''+first_name+'')
new_contact.set_last_name(''+last_name+'')
#Write your remaining code
If you do need to first print the cursor, and then run the loop, you have to execute the cursor once agin before the loop like this:
sample_contact_cursor.execute(sample_contact_query)
print(sample_contact_cursor.fetchall())
constantcontact = ConstantContact('REDACTED','REDACTED')
list_id = '1816761971'
sample_contact_cursor.execute(sample_contact_query)
for [first_name, last_name, title, company_name, email_address] in sample_contact_cursor:
new_contact = Contact()
new_contact.set_first_name(''+first_name+'')
new_contact.set_last_name(''+last_name+'')
#Write your remaining code

Related

1064 SQL You have an error in your SQL syntax; Python List to String Convert?

I'm still new to coding and sql, and I am working on a library management database system as a project.
And I keep getting a 1064 when trying to store my information in my database.
I'm pretty sure it's trying to tell me that the authors are Lists, and need to be a string, but I'm not sure how to do that, and if I can without query entries!
below is the def in question!
def googleAPI(self):
lineTitle = str(titleInfo.text())
# create getting started variables
api = "https://www.googleapis.com/books/v1/volumes?q=isbn:"
isbn = lineTitle.strip()#input("Enter 10 digit ISBN: ").strip()
# send a request and get a JSON response
resp = urlopen(api + isbn)
# parse JSON into Python as a dictionary
book_data = json.load(resp)
# create additional variables for easy querying
volume_info = book_data["items"][0]["volumeInfo"]
author = volume_info["authors"]
# practice with conditional expressions!
prettify_author = author if len(author) > 1 else author[0]
# display title, author, page count, publication date
# fstrings require Python 3.6 or higher
# \n adds a new line for easier reading
gTitle = str(volume_info['title'])
pCount = str(volume_info['pageCount'])
pubDate = str(volume_info['publishedDate'])
author = str(volume_info["authors"])
prettify_author = author if len(author) > 1 else author[0]
stringAuthor = str(prettify_author)
insertBooksF = "insert into "+bookTable+" values('"+isbn+"','"+gTitle+"','"+stringAuthor+"','"+pubDate+"','"+pCount+"')"
try:
cur.execute(insertBooksF)
con.commit()
print("You failed at failing")
except:
print("You actually failed")
print(f"\nTitle: {volume_info['title']}")
print(f"Author: {prettify_author}")
print(f"Page Count: {volume_info['pageCount']}")
print(f"Publication Date: {volume_info['publishedDate']}")
print("\n***\n")
I believe this line is the one needed adjusting
insertBooksF = "insert into "+bookTable+" values('"+isbn+"','"+gTitle+"','"+stringAuthor+"','"+pubDate+"','"+pCount+"')"
You should ALWAYS let the database connector substitute your field values. Otherwise, you open yourself up to SQL injection, where a malicious user provides a field value like "; delete * from table;
stringAuthor = author if isinstance(author,str) else author[0]
insertBooksF = "insert into "+bookTable+" values(?,?,?,?,?)"
cur.execute(insertBooksF, (isbn, gTitle, stringAuthor, pubDate, pCount))

Why python dictionary only update the last value appended?

I have a mongodb database and I retrieve some information from it. And I try to append it to a python dictionary using a for loop, but it only appends the last row.
here's my code:
import pymongo
import datetime
# #app.route("/bemployees", methods=["POST", "GET"])
def retrieve_all_documents():
client = pymongo.MongoClient(
"<url-removed>"
)
# database
db = client.cdap
# collection (table)
collection = db.predicted_values
cursor = collection.find({})
documents_ = {}
for document in cursor:
documents_.update(document)
print(document) # prints all the records
print(documents_) # only prints the last record
return documents_
can someone please help me?
I assume that each document is a dict that has all the same keys. I think you need a list to store.
documents_ = []
for i in cursor:
dic = {}
dic.update(i)
documents_.append(dic)
print(documents_)
Please share your response data in the question that will helpful for answer this question thank you

Return multiple queries from BigQuery into html tables using one Flask route

I am learning Google App Engine and created a simple app that queries a table on BigQuery and returns an HTML table.
Relevant pieces:
#app.route("/")
def main():
query_job = bigquery_client.query(
"""
SELECT DISTINCT *
FROM github_project.labeled_data_dev
WHERE cluster = 1
ORDER BY commits DESC
LIMIT 10
"""
)
return flask.redirect(
flask.url_for(
"results",
project_id=query_job.project,
job_id=query_job.job_id,
location=query_job.location,
)
)
#app.route("/results")
def results():
project_id = flask.request.args.get("project_id")
job_id = flask.request.args.get("job_id")
location = flask.request.args.get("location")
query_job = bigquery_client.get_job(
job_id,
project=project_id,
location=location,
)
try:
# Set a timeout because queries could take longer than one minute.
results = query_job.result(timeout=30)
except concurrent.futures.TimeoutError:
return flask.render_template("timeout.html", job_id=query_job.job_id)
return flask.render_template("query_result.html", results=results)
I have a few variations on the above query that I would like to also return (changing the WHERE, etc).
Editing in some more information, per request.
The above script runs the query and returns a simple table. What I am after is a modification that will return multiple tables.
I have tried to edit the main script, as follows:
#app.route("/")
def main():
query_job_1 = bigquery_client.query(
"""
SELECT DISTINCT *
FROM github_project.labeled_data_dev
WHERE cluster = 1
ORDER BY commits DESC
LIMIT 10
"""
)
query1 = flask.redirect(
flask.url_for(
"results",
project_id=query_job.project,
job_id=query_job.job_id,
location=query_job.location,
)
)
query_job_2 = bigquery_client.query(
"""
SELECT DISTINCT *
FROM github_project.labeled_data_dev
WHERE cluster = 2
ORDER BY commits DESC
LIMIT 10
"""
)
query2 = flask.redirect(
flask.url_for(
"results",
project_id=query_job.project,
job_id=query_job.job_id,
location=query_job.location,
)
)
return query1, query2
The idea was to then be able to call both query1 and query2 in the #app.route("/results") section and return results1, results2 which could then be used in my html template, but this doesn't work.
I believe I could potentially use a class, like the one created in the second answer here using sqlite3, but I am unsure how to rewrite it with my BigQuery connections in mind.
Any help would be greatly appreciated.
Objects returned by a function cannot be "called" per se. A function call can be done, and doing so will return the objects defined in the return line. When calling the "main" function in your specific case, what is returned is a tuple with 2 elements (query1, query2). If you need to access each elements to be used in your HTML template afterward, you will want to unpack that tuple.
Take a look at this question [1] and its answers to understand the concept.
[1] How can I return two values from a function in Python?

Problem with continuation token when querying from Cosmos DB

I'm facing a problem with continuation when querying items from CosmosDB.
I've already tried the following solution but with no success. I'm only able to query the first 10 results of a page even though I get a token that is not NULL.
The token has a size of 10733 bytes and looks like this.
{"token":"+RID:gtQwAJ9KbavOAAAAAAAAAA==#RT:1#TRC:10#FPP:AggAAAAAAAAAAGoAAAAAKAAAAAAAAAAAAADCBc6AEoAGgAqADoASgAaACoAOgBKABoAKgA6AE4AHgAuAD4ASgAeACoAPgBOAB4ALgA+AE4AHgAqAD4ASgAeAC4APgBOAB4ALgA+AE4AIgA2AEYAFgAmADYARgAaACYAPgBKABYAKgA6AE4AHgAuAD4ATgAeAC4APgBOAB4ALgA+AE4AIgAuAD4ATgAeAC4APgBOACIAMgA+AFIAIgAyAD4AUgAmADIAQgAWACIALgBCABIAIgAyAEIAEgAiADIAQgAOACYANgBKAB4AJgA6AEYAGgAqADoATgAeAC4APgB....etc...etc","range":{"min":"","max":"05C1BF3FB3CFC0"}}
Code looks like this. Function QueryDocuments did not work. Instead I had to use QueryItems.
options = {}
options['enableCrossPartitionQuery'] = True
options['maxItemCount'] = 10
q = client.QueryItems(collection_link, query, options)
results_1 = q._fetch_function(options)
#this is a string representing a JSON object
token = results_1[1]['x-ms-continuation']
data = list(q._fetch_function({'maxItemCount':10,'enableCrossPartitionQuery':True, 'continuation':token}))
Is there a solution to this? Thanks for your help.
Please use pydocumentdb package and refer to below sample code.
from pydocumentdb import document_client
endpoint = "https://***.documents.azure.com:443/";
primaryKey = "***";
client = document_client.DocumentClient(endpoint, {'masterKey': primaryKey})
collection_link = "dbs/db/colls/coll"
query = "select c.id from c"
query_with_optional_parameters = [];
q = client.QueryDocuments(collection_link, query, {'maxItemCount': 2})
results_1 = q._fetch_function({'maxItemCount': 2})
print(results_1)
token = results_1[1]['x-ms-continuation']
results_2 = q._fetch_function({'maxItemCount': 2, 'continuation': token})
print(results_2)
Output:

pass in table name to sqlalchemy query through url in flask

In short, how do i
var="TableName"
models.var.query.all()
explanation
My goal is to allow the user to change the order of list of items.
I set up an ajax call that sends an array of id's to the api below.
It works if i hard code the query, and make a api view per table.
my problem is that i want "table" to fill in this line
models.table.query.filter_by(id=item).first()
to complete the query.
here is the view api which gives me an error "no attribute 'table'"
#app.route('/order/<table>')
def order(table):
# import pdb;pdb.set_trace()
sortedItems = request.args.listvalues()[0]
o=1
import pdb;pdb.set_trace()
for item in sortedItems:
grab = models.table.query.filter_by(id=item).first()
grab.order=o
o=o+1
db.session.commit()
return jsonify(result=sortedItems)
You can use getattr():
>>> var = 'table_name'
>>> table = getattr(models, var)
>>> table.query.filter_by(id=item).first()
getattr() will raise an AttributeError if the attribute your trying to get does not exist.
Example for your order()-function:
#app.route('/order/<table>')
def order(table):
sortedItems = request.args.listvalues()[0]
o=1
table = getattr(models, table)
for item in sortedItems:
grab = table.query.filter_by(id=item).first()
grab.order=o
o=o+1
db.session.commit()
return jsonify(result=sortedItems)

Categories

Resources