Python fernet: token must be in bytes - python

i would like to add values to the database in sqlite3, and they need to be encrypted. And when i want to retrieve values they need to be decrypted. Now I'm getting this error TypeError: token must be bytes. Here is the full list of errors: Traceback (most recent call last): File "C:\Users\d\OneDrive\Bureaublad\Assignment8\CDMS.py", line 75, in <module> get_clients() File "C:\Users\d\OneDrive\Bureaublad\Assignment8\CDMS.py", line 68, in get_clients new += fernet.decrypt(i).decode() + " " File "C:\Users\d\AppData\Local\Programs\Python\Python38\lib\site-packages\cryptography\fernet.py", line 75, in decrypt timestamp, data = Fernet._get_unverified_token_data(token) File "C:\Users\d\AppData\Local\Programs\Python\Python38\lib\site-packages\cryptography\fernet.py", line 100, in _get_unverified_token_data utils._check_bytes("token", token) File "C:\Users\d\AppData\Local\Programs\Python\Python38\lib\site-packages\cryptography\utils.py", line 29, in _check_bytes raise TypeError("{} must be bytes".format(name)) TypeError: token must be bytes
Here is my code:
from Classes import Client
from cryptography.fernet import Fernet
import sqlite3
key = Fernet.generate_key()
fernet = Fernet(key)
conn = sqlite3.connect('database.db')
c = conn.cursor()
def insert_client(client):
fullname = fernet.encrypt(client.fullname.encode())
fullname1 = fullname.decode()
adress = fernet.encrypt(client.adress.encode())
adress1 = adress.decode()
zipcode = fernet.encrypt(client.zipcode.encode())
zipcode1 = zipcode.decode()
city = fernet.encrypt(client.city.encode())
city1 = city.decode()
email = fernet.encrypt(client.email.encode())
email1 = email.decode()
mphone = fernet.encrypt(client.mphone.encode())
mphone1 = mphone.decode()
with conn:
c.execute("INSERT INTO client VALUES (:fullname, :adress, :zipcode, :city, :email, :mphone)",
{'fullname': fullname1, 'adress': adress1, 'zipcode': zipcode1,
'city': city1, 'email': email1, 'mphone': mphone1})
client1 = Client('Name1', 'Street1', 'Zipcode', 'NewYork', '123#gmail.com', '12345678')
def get_clients():
arr = []
new = ""
c.execute("SELECT * FROM client")
arr = c.fetchall()
for i in arr:
new += fernet.decrypt(i).decode() + " "
return new
insert_client(client1)
get_clients()
conn.close()
The first function is for adding clients, by a class object named Client. And the second function is for retrieving all values decrypted.

fetchall returns an iterable of rows (most commonly tuples). If you want to only process one record, you should instead use fetchone:
...
arr = c.fetchone()
for i in arr:
new += fernet.decrypt(i).decode() + " "
...
Alternatively you could iterate the cursor and return a list:
def get_clients():
arr = []
c.execute("SELECT * FROM client")
for row in c:
new = ""
for i in row:
new += fernet.decrypt(i).decode() + " "
arr.append(new)
return arr
But the Pythonic way would be to use comprehensions:
def get_clients():
c.execute("SELECT * FROM client")
return [' '.join(fernet.decrypt(i).decode() for i in row)
for row in c]
Slightly shorter, isn't it?

Related

Python Pandas Passing a variable to merge

I don't know what I'm doing wrong with the leftJoin() function.
I have a connection to a DB2 database and an Oracle database.
Queries return me a result, both in DB2 and Oracle.
I continue to get the primary key value and try to pass it as a variable to the leftJoin() function, but it doesn't work here.
The key consists of two fields. If I manually put the value of 'ID', 'VER' into on in df1 in merge it works.
import ibm_db
import ibm_db as db
import ibm_db_dbi
import pandas as pd
import cx_Oracle
import re
def connectDB2():
arg1 = "DATABASE=...;HOSTNAME=...;PORT=...;UID=...;PWD=...;"
conn = db.connect(arg1, "", "")
if conn:
print ('connection success')
# Run SQL
sql_generic = "SELECT distinct 'select ' || LISTAGG(COLNAME,', ') || ' from ' || trim(TABSCHEMA) || '.' || tabname || ' FETCH FIRST 2 ROWS ONLY' FROM SYSCAT.columns WHERE TABSCHEMA = '...' AND TABNAME = '...' AND COLNAME NOT IN ('CDC_STATUS','CDC_ODS_UPD') GROUP BY TABSCHEMA, TABNAME"
stmt = ibm_db.exec_immediate(conn, sql_generic)
result = ibm_db.fetch_both(stmt)
conn1 = ibm_db_dbi.Connection(conn)
connectDB2.df = pd.read_sql(result['1'], conn1)
print('df', connectDB2.df)
sql_PK = "SELECT COLNAMES FROM syscat.INDEXES WHERE TABSCHEMA='...' AND TABNAME = '...' AND UNIQUERULE='P'"
conn2 = ibm_db_dbi.Connection(conn)
connectDB2.df1 = pd.read_sql(sql_PK, conn2)
print('pk', connectDB2.df1)
d = connectDB2.df1.loc[:, "COLNAMES"]
print('d', d)
print('d0', d[0])
content_new1 = re.sub('$|^', '\'', d[0], flags=re.M)
content_new2 = re.sub('\'\+', '\'', content_new1, flags=re.M)
connectDB2.content_new3 = re.sub('\+', '\',\'', content_new2, flags=re.M)
print('c3', connectDB2.content_new3) --> format: 'ID','VER'
else:
print ('connection failed')
def connectOracle():
con = cx_Oracle.connect('...')
orders_sql = """select ... from ... FETCH FIRST 2 ROWS ONLY""";
connectOracle.df_orders = pd.read_sql(orders_sql, con)
print(connectOracle.df_orders)
def leftJoin():
df1 = pd.merge(connectOracle.df_orders, connectDB2.df, on=connectDB2.content_new3, how='left')
connectDB2()
connectOracle()
leftJoin()
I am adding below what the logs return.
Traceback (most recent call last):
File "C:\Users\PycharmProjects\pythonProject1\testConnection.py", line 68, in <module>
leftJoin()
File "C:\Users\PycharmProjects\pythonProject1\testConnection.py", line 57, in leftJoin
df1 = pd.merge(connectOracle.df_orders, connectDB2.df, on=connectDB2.content_new3, how='left')
File "C:\Users\PycharmProjects\pythonProject1\venv\lib\site-packages\pandas\core\reshape\merge.py", line 106, in merge
op = _MergeOperation(
File "C:\Users\PycharmProjects\pythonProject1\venv\lib\site-packages\pandas\core\reshape\merge.py", line 699, in __init__
) = self._get_merge_keys()
File "C:\Users\PycharmProjects\pythonProject1\venv\lib\site-packages\pandas\core\reshape\merge.py", line 1096, in _get_merge_keys
right_keys.append(right._get_label_or_level_values(rk))
File "C:\Users\PycharmProjects\pythonProject1\venv\lib\site-packages\pandas\core\generic.py", line 1779, in _get_label_or_level_values
raise KeyError(key)
KeyError: "'ID','VER'"
You are using the merge command wrongly,
I dont know what is actually inside your given dfs
connectOracle.df_orders
connectDB2.df
But i know for sure, you are doing a left join. And you are passing a key found on your second df or "right" df per say.
pd.merge(connectOracle.df_orders, connectDB2.df, on = 'guy with the same index found in both your dfs', how='left')
If you dont have that guy, well them you should define your left key or your right that want to 'join', on the parameters

Python script makes AssertionError when trying to MariaDB

I have this piece of code that collects data from a HAT connected to a Raspberry.
When run it gives this error:
[51.57, 22.30, 1002.01]
Traceback (most recent call last):
File "dbWriter.py", line 45, in <module>
write2DB(record)
File "dbWriter.py", line 26, in write2DB
assert len(values) == 3
AssertionError
I am by no means a programmer, i just fiddle around. It is meant to save 'record' to a database, which is then read and updated in realtime on an apache2 server. All help appreciated.
import mysql.connector
from itertools import repeat
import sys
import bme680
import time
try:
sensor = bme680.BME680(bme680.I2C_ADDR_PRIMARY)
except IOError:
sensor = bme680.BME680(bme680.I2C_ADDR_SECONDARY)
sensor.set_humidity_oversample(bme680.OS_2X)
sensor.set_pressure_oversample(bme680.OS_4X)
sensor.set_temperature_oversample(bme680.OS_8X)
sensor.set_filter(bme680.FILTER_SIZE_3)
mydb = mysql.connector.connect(
host='localhost',
user='pi',
passwd='pass',
database='weatherDB'
)
mycursor = mydb.cursor()
def write2DB(values):
assert len(values) == 3
sqlText = '''INSERT INTO data(humidity, temperature, pressure) VALUES({},{}, })'''.format(values[0], values[1], values[2])
mycursor.execute(sqlText)
mydb.commit()
for _ in repeat(None):
sensor.get_sensor_data()
output_humi = '{0:.2f}'.format(
sensor.data.humidity)
output_temp = '{0:.2f}'.format(
sensor.data.temperature)
output_pres = '{0:.2f}'.format(
sensor.data.pressure)
record = []
record = ('[' + (output_humi) + ', ' + (output_temp) + ', ' + (output_pres) + ']')
print(record)
write2DB(record)
time.sleep(10)
pass
You have:
record = ('[' + (output_humi) + ', ' + (output_temp) + ', ' + (output_pres) + ']')
record evaluates to a single string, not a list of 3 elements and hence your exception.
Change the above to:
record = [output_humi, output_temp, output_pres]
You are also missing a { in your format specification. It should be:
sqlText = '''INSERT INTO data(humidity, temperature, pressure) VALUES({},{}, {})'''.format(values[0], values[1], values[2])
An alternative would be to use a prepared statement:
sqlText = 'INSERT INTO data(humidity, temperature, pressure) VALUES(%s, %s, %s)'
mycursor.execute(sqlText, values)
In the above case you will be passing actual strings as the values. I don't know how the columns are defined, but no matter. If they are defined as floating point or decimal values, the strings will be converted to the correct type.

Elasticsearch query is not returning the correct response

I wrote a code for elastic search where I'm giving the movie_name as search_term but when it got the match according to the jaro winkler condition i.e
for i in es_data:
if (i['_source']['entity_type'] == 'movie_entity'):
dist = distance.get_jaro_distance(search_term, i['_source']['entity_name'], winkler=True, scaling=0.1)
if dist > 0.80:
This code is returning the correct output, but when there is no match, I get an error. I tried putting else statement but the error is still happening.
Can anyone help me with this issue?
from..items import DeccanchronicleItem
import mysql.connector
from mysql.connector import Error
from mysql.connector import errorcode
from elasticsearch import Elasticsearch, RequestsHttpConnection
from requests_aws4auth import AWS4Auth
import boto3
import spacy
import fuzzy
from pyjarowinkler import distance
import json
import scrapy
import re
class DeccanchronicleSpider(scrapy.Spider):
name = 'a_review'
page_number = 2
start_urls = ['https://www.deccanchronicle.com/entertainment/movie-review?pg=1'
]
def parse(self, response):
items = {}
i = 1
movie_title = response.xpath('//*[#id="fullBody"]/div[4]/div[3]/div[1]/div[*]/div[2]/a/h3/text()').getall()
movie_text = response.xpath('//*[#id="fullBody"]/div[4]/div[3]/div[1]/div[*]/div[2]/a/div[1]/text()').getall()
movie_id = response.xpath('//*[#id="fullBody"]/div[4]/div[3]/div[1]/div[*]/div[2]/a/#href').getall()
items['movie_title'] = movie_title
items['movie_text'] = movie_text
items['movie_id'] = movie_id
li = items['movie_title']
for i in range(len(li)):
li_split = li[i].split(" ")
#print(movietitle)
if 'Review:' in li_split or 'review:' in li_split:
outputs = DeccanchronicleItem()
outputs['page_title'] = li[i]
outputs['review_content'] = items['movie_text'][i]
outputs['review_link'] = 'https://www.deccanchronicle.com' + str(items['movie_id'][i])
nlp = spacy.load('/Users/divyanshu/review_bot/review_bot/NER_model')
def actor_mid_ner(sentence):
doc = nlp(sentence)
detected_hash = {}
# detected_hash = { ent.label_ : ([ent.text] if ent.label_ is None else ) for ent in doc.ents}
for ent in doc.ents:
label = ent.label_
detected = detected_hash.keys()
omit = ['Unwanted']
if label not in omit:
if label not in detected:
detected_hash[label] = [ent.text]
else:
detected_hash[label].append(ent.text)
else:
detected_hash[label] = [ent.text]
return detected_hash, detected
sentence = outputs['page_title']
ner_hash, ner_keys = actor_mid_ner(sentence)
movie_name = " ".join(str(x) for x in ner_hash['MOVIE'] )
print('-----------------------------------')
print(movie_name)
print('-----------------------------------')
def elasticsearch(movie_name):
search_term = movie_name
host = 'xxxxxxxxxxxxxxx' # For example, my-test-domain.us-east-1.es.amazonaws.com
region = 'ap-southeast-1' # e.g. us-west-1
service = 'es'
credentials = boto3.Session().get_credentials()
awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token)
es = Elasticsearch(
hosts = [{'host': host, 'port': 443}],
http_auth = awsauth,
use_ssl = True,
verify_certs = True,
connection_class = RequestsHttpConnection
)
body = {
"query": {
"multi_match" : {
"query": search_term,
"fields": ["entity_name", "aka"],
"fuzziness": "AUTO"
}
}
}
res = es.search(index="production-widget_id_search", body=body)
es_data = res['hits']['hits']
# print(es_data)
for i in es_data:
if (i['_source']['entity_type'] == 'movie_entity'):
dist = distance.get_jaro_distance(search_term, i['_source']['entity_name'], winkler=True, scaling=0.1)
if dist > 0.80:
return (i['_source']['entity_id'], i['_source']['entity_name'])
movie_id , movie_name_es = elasticsearch(movie_name)
review_url = outputs['review_link']
print('-----------------------------------')
print(movie_id)
print('-----------------------------------')
print(movie_name)
print('-----------------------------------')
print(movie_name_es)
print('-----------------------------------')
print(review_url)
print('***********************************')
try:
connection = mysql.connector.connect(host='localhost',
database='review_url',
user='root',
password='admin')
mySql_insert_query = """INSERT INTO k_master_movie_reviews (id, title, title_es, url)
VALUES(%s,%s,%s,%s)""",(movie_id, movie_name, movie_name_es, review_url )
cursor = connection.cursor()
cursor.execute(mySql_insert_query)
connection.commit()
print(cursor.rowcount, "Record inserted successfully into table")
cursor.close()
except mysql.connector.Error as error:
print("Failed to insert record into table {}".format(error))
finally:
if (connection.is_connected()):
connection.close()
print("MySQL connection is closed")
outputs['id'] = movie_id
outputs['title'] = movie_name
outputs['title_es'] = movie_name_es
outputs['url'] = review_url
yield outputs
pass
next_page = 'https://www.deccanchronicle.com/entertainment/movie-review?pg=' + str(DeccanchronicleSpider.page_number)
if DeccanchronicleSpider.page_number <= 5:
DeccanchronicleSpider.page_number += 1
yield response.follow(next_page, callback = self.parse)
This the error I'm getting
Traceback (most recent call last):
File "/Users/divyanshu/env/lib/python3.7/site-packages/scrapy/utils/defer.py", line 117, in iter_errback
yield next(it)
File "/Users/divyanshu/env/lib/python3.7/site-packages/scrapy/utils/python.py", line 345, in __next__
return next(self.data)
File "/Users/divyanshu/env/lib/python3.7/site-packages/scrapy/utils/python.py", line 345, in __next__
return next(self.data)
File "/Users/divyanshu/env/lib/python3.7/site-packages/scrapy/core/spidermw.py", line 64, in _evaluate_iterable
for r in iterable:
File "/Users/divyanshu/env/lib/python3.7/site-packages/scrapy/spidermiddlewares/offsite.py", line 29, in process_spider_output
for x in result:
File "/Users/divyanshu/env/lib/python3.7/site-packages/scrapy/core/spidermw.py", line 64, in _evaluate_iterable
for r in iterable:
File "/Users/divyanshu/env/lib/python3.7/site-packages/scrapy/spidermiddlewares/referer.py", line 338, in <genexpr>
return (_set_referer(r) for r in result or ())
File "/Users/divyanshu/env/lib/python3.7/site-packages/scrapy/core/spidermw.py", line 64, in _evaluate_iterable
for r in iterable:
File "/Users/divyanshu/env/lib/python3.7/site-packages/scrapy/spidermiddlewares/urllength.py", line 37, in <genexpr>
return (r for r in result or () if _filter(r))
File "/Users/divyanshu/env/lib/python3.7/site-packages/scrapy/core/spidermw.py", line 64, in _evaluate_iterable
for r in iterable:
File "/Users/divyanshu/env/lib/python3.7/site-packages/scrapy/spidermiddlewares/depth.py", line 58, in <genexpr>
return (r for r in result or () if _filter(r))
File "/Users/divyanshu/env/lib/python3.7/site-packages/scrapy/core/spidermw.py", line 64, in _evaluate_iterable
for r in iterable:
File "/Users/divyanshu/review_bot/review_bot/spiders/a.py", line 515, in parse
movie_id , movie_name_es = elasticsearch(movie_name)
TypeError: cannot unpack non-iterable NoneType object
That's because your elasticsearch() function when there is no match will return None which you then immediately unpack into movie_id and movie_name_es. I suggest adding return (None, None) to the end of the elasticsearch() function.
Let's have a loom at the last parts of your elasticsearch() function:
res = es.search(index="production-widget_id_search", body=body)
es_data = res['hits']['hits']
# print(es_data)
for i in es_data:
if (i['_source']['entity_type'] == 'movie_entity'):
dist = distance.get_jaro_distance(search_term, i['_source']['entity_name'], winkler=True, scaling=0.1)
if dist > 0.80:
return (i['_source']['entity_id'], i['_source']['entity_name'])
You have a for loop, and in each iteration, there are two if conditions. If there isn't a single iteration in which both conditions are met, you function will never reach the return statement, thus, it will return None.
To fix the unpacking, you can add a another return statement into your function that will be initialized if the for loop doesn't return anything::
res = es.search(index="production-widget_id_search", body=body)
es_data = res['hits']['hits']
# print(es_data)
for i in es_data:
if (i['_source']['entity_type'] == 'movie_entity'):
dist = distance.get_jaro_distance(search_term, i['_source']['entity_name'], winkler=True, scaling=0.1)
if dist > 0.80:
return (i['_source']['entity_id'], i['_source']['entity_name'])
return (None, None)
You can try removing this line :
movie_id , movie_name_es = elasticsearch(movie_name)
And instead do this :
es_results = elasticsearch(movie_name)
movie_id = es_results[0]
movie_name_es = es_results[1]
I am giving this solution because your error is pointing to this line -
File "/Users/divyanshu/review_bot/review_bot/spiders/a.py", line 515, in parse
movie_id , movie_name_es = elasticsearch(movie_name)
TypeError: cannot unpack non-iterable NoneType object

Peewee raw SELECT sql with multiple programmatic filter parameters

I am facing a problem returning the users from this get_users() function. Here is the code:
I am using Peewee, Pymysql and MySQL
def get_users(self, filter_columns=None, parameters=[], operator=None, ):
#Define the operator to be used in the WHERE statement
if operator:
operator = operator
else:
operator = '&'
#Contruct the sql_where statement
where = ''
if filter_columns:
for field in filter_columns:
where = where + field + '=%s, '
if len(where)>2:
where = where[:-2]
#Build Parameter list
param_list = ''
for param in parameters:
param_list = param_list + param + ", "
if len(param_list)>2:
param_list = param_list[:-2]
#Select the users and return.
sql = "SELECT * FROM user WHERE " + where
user = U.raw(sql, param_list)
return user
When i call the function like this:
users = user.get_users(filter_columns=['first_name', 'status'], parameters=['awa', 'active'], operator='|')
print(users)
for u in users:
print(u.first_name, u.last_name)
This is what i get as result:
Traceback (most recent call last):
File "D:/projects/micro bank/tests/smanager/randomtest.py", line 10, in <module>
for u in users:
File "C:\Python34\lib\site-packages\peewee.py", line 2963, in __iter__
return iter(self.execute())
File "C:\Python34\lib\site-packages\peewee.py", line 2959, in execute
self._qr = QRW(self.model_class, self._execute(), None)
File "C:\Python34\lib\site-packages\peewee.py", line 2902, in _execute
return self.database.execute_sql(sql, params, self.require_commit)
File "C:\Python34\lib\site-packages\peewee.py", line 3758, in execute_sql
cursor.execute(sql, params or ())
File "C:\Python34\lib\site-packages\pymysql\cursors.py", line 164, in execute
query = self.mogrify(query, args)
File "C:\Python34\lib\site-packages\pymysql\cursors.py", line 143, in mogrify
query = query % self._escape_args(args, conn)
TypeError: not enough arguments for format string
When i print out the user returned, i get this:
<class 'common.models.User'> SELECT * FROM user WHERE first_name=%s, status=%s ['awa, active']
From observation, the problem comes from this last area ['awa, active'] which is supposed to be ['awa', 'active']
The problem now is establishing a parameter_list that when i use it, it should print out like this ['awa', 'active']
Thanks for assistance.
I assume you need param_list to be ['awa', 'active']
Try this:
param_list = []
for param in parameters:
param_list.append(param)
# Since `param_list` in needed in the format `'"awa", "active"'`
param_list = ', '.join('"{0}"'.format(w) for w in param_list)
instead of
param_list = ''
for param in parameters:
param_list = param_list + param + ", "

TypeError: 'str' object is not callable - When filling database

I have to make a script what automatic does a BLAST for me and than fill the database automatic.
Everything goes wel until the script is trying to fill the database. Then I get the following error:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/rubenoldenkamp/anaconda/lib/python3.4/site-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 580, in runfile
execfile(filename, namespace)
File "/Users/rubenoldenkamp/anaconda/lib/python3.4/site-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 48, in execfile
exec(compile(open(filename, 'rb').read(), filename, 'exec'), namespace)
File "/Users/rubenoldenkamp/Documents/Python/BLASTscript.py", line 74, in <module>
ReadFile()
File "/Users/rubenoldenkamp/Documents/Python/BLASTscript.py", line 44, in ReadFile
BLASTfor(SeqFor)
File "/Users/rubenoldenkamp/Documents/Python/BLASTscript.py", line 72, in BLASTfor
fillblast(titel_lijst, score_lijst, e_lijst, iden_lijst, pos_lijst, gaps_lijst)
File "/Users/rubenoldenkamp/Documents/Python/BLASTscript.py", line 25, in fillblast
cursor.execute("INSERT INTO `pg2`.`blast` (`blast_sequentie_id`,`blast_titel`, `blast_score`, `blast_evalue`, `blast_gaps`, `blast_positives`, `blast_identity`) VALUES (%s, %s, %s,%s, %s,%s, %s);"(i,titel_lijst[i], score_lijst[i], e_lijst[i], iden_lijst[i], pos_lijst[i], gaps_lijst[i]))
TypeError: 'str' object is not callable
I know something with filling the database is going wrong, but I don't know what and how I can solve it. Can you please help me? This is my code:
import mysql.connector
import xlrd
from Bio.Blast import NCBIWWW
from Bio.Blast import NCBIXML
#Deze lijsten vullen met de sequentie header en sequentie uit het xlsx bestand!
def fillseq(SeqFor2, SeqFor3):
titels_lijst = SeqFor2
sequenties_lijst = SeqFor3
conn = mysql.connector.connect(host = "ithurtswhenip.nl", user = "pg2", password = "pg2", database= "pg2", port= "3307")
cursor = conn.cursor()
for i in range(0,len(titels_lijst)):
cursor.execute("INSERT INTO `pg2`.`sequenties` (`sequenties_id`,`sequenties_titel`, `sequenties_seq`) VALUES (%s,%s, %s);"(i,titels_lijst[i], sequenties_lijst[i]))
print("1 record toegevoegd")
cursor.commit()
cursor.close()
conn.close()
def fillblast(titel_lijst, score_lijst, e_lijst, iden_lijst, pos_lijst, gaps_lijst):
conn = mysql.connector.connect(host = "ithurtswhenip.nl", user = "pg2", password = "pg2", database= "pg2", port= "3307")
cursor = conn.cursor()
for i in range(0,len(titel_lijst)):
cursor.execute("INSERT INTO `pg2`.`blast` (`blast_sequentie_id`,`blast_titel`, `blast_score`, `blast_evalue`, `blast_gaps`, `blast_positives`, `blast_identity`) VALUES (%s, %s, %s,%s, %s,%s, %s);"(i,titel_lijst[i], score_lijst[i], e_lijst[i], iden_lijst[i], pos_lijst[i], gaps_lijst[i]))
print("1 record toegevoegd")
cursor.commit()
cursor.close()
conn.close()
def ReadFile():
sh = xlrd.open_workbook('TestBLAST.xlsx').sheet_by_index(0)
SeqFor = list()
SeqFor2 = list()
SeqFor3 = list()
for rownum in range(sh.nrows):
SeqFor.append(sh.cell(rownum, 1).value)
SeqFor2.append(sh.cell(rownum, 0).value)
SeqFor3.append(sh.cell(rownum, 1).value)
BLASTfor(SeqFor)
fillseq(SeqFor2, SeqFor3)
def BLASTfor(SeqFor):
sequence = SeqFor
for ForwardSeq in sequence:
results_handle = NCBIWWW.qblast("blastx","nr", ForwardSeq, hitlist_size = 1)
bestand= open ("blast_report.xml", "w")
bestand.writelines (results_handle.readlines())
bestand.close()
result = open("blast_report.xml", "r")
blast_records = NCBIXML.parse(result)
blast_record = next(blast_records)
titel_lijst, score_lijst, e_lijst, iden_lijst, pos_lijst, gaps_lijst = [], [], [], [], [] , []
E_VALUE_THRESH = 1
for alignment in blast_record.alignments:
for hsp in alignment.hsps:
if hsp.expect < E_VALUE_THRESH:
titel_lijst.append(alignment.title)
score_lijst.append(hsp.score)
e_lijst.append(hsp.expect)
iden_lijst.append(hsp.identities)
pos_lijst.append(hsp.positives)
gaps_lijst.append(hsp.gaps)
fillblast(titel_lijst, score_lijst, e_lijst, iden_lijst, pos_lijst, gaps_lijst)
ReadFile()
#BLASTfor()
#fillseq()
#z,x,c,v,b,n = fillblast()
In your cursor.execute lines you need a comma between the query string and the arguments list. As it stands you have something like string(args) which looks like a function, thus you get the error that you can't call a string.
cursor.execute("INSERT INTO `pg2`.`sequenties` (`sequenties_id`,`sequenties_titel`, `sequenties_seq`) VALUES (%s,%s, %s);", (i,titels_lijst[i], sequenties_lijst[i]))
^ added comma right here
Just do that for all your cursor.execute statements

Categories

Resources