AttributeError: 'Engine' object has no attribute 'conn' - python

So I am trying to create an auto update to SQL from another excel file, by unique value, as to know what is the new data to add to the database..
There's different in columns names between the database and the excel file as in the database and names without spaces...
I tried to do it with pandas it gave me the same error
So here's my simple code tried with xlrd
import xlrd
from sqlalchemy import create_engine
def insert():
book = xlrd.open_workbook(r"MNM_Rotterdam_5_Daily_Details-20191216081027 - Copy (2).xlsx")
sheet = book.sheet_by_name("GSM Details")
database = create_engine(
'mssql+pyodbc://WWX542337CDCD\SMARTRNO_EXPRESS/myDB?driver=SQL+Server+Native+Client+11.0') # name of database
cnxn = database.raw_connection
cursor = cnxn.cursor()
query = """Insert INTO [myDB].[dbo].[mnm_rotterdam_5_daily_details-20191216081027] (Date, SiteName, CellCI, CellLAC, CellName, CellIndex) values (?,?,?,?,?,?)"""
for r in range(1, sheet.nrows):
date = sheet.cell(r,0).value
site_name = sheet.cell(r,3).value
cell_ci = sheet.cell(r,4).value
cell_lac = sheet.cell(r,5).value
cell_name = sheet.cell(r,6).value
cell_index = sheet.cell(r,7).value
values = (date, site_name, cell_ci, cell_lac, cell_name, cell_index)
cursor.execute(query, values)
cnxn.commit()
# Close the cursor
cursor.close()
# Commit the transaction
database.commit()
# Close the database connection
database.close()
# Print results
print ("")
print ("")
columns = str(sheet.ncols)
rows = str(sheet.nrows)
print ("Imported", columns,"columns and", rows, "rows. All Done!")
insert()
and this is the error:
I tried to change the range I found another error:
Traceback (most recent call last):
File "D:/Tooling/20200207/uniquebcon.py", line 48, in <module>
insert()
File "D:/Tooling/20200207/uniquebcon.py", line 37, in insert
database.commit()
AttributeError: 'Engine' object has no attribute 'commit'
I think this is related to SQL-Alchemy in the connection

Instead of creating the cursor directly with
cursor = database.raw_connection().cursor()
you can create a connection object, then create the cursor from that, and then call .commit() on the connection:
cnxn = database.raw_connection()
crsr = cnxn.cursor()
# do stuff with crsr ...
cnxn.commit()

Related

how to insert dataframe into sql workbench. on local host

# Create the connection object
mydb = mysql.connector.connect(
host = "localhost",
user = "root",
password = "umar1234",
database="date_sheet"
)
# print(mydb)
# # To Create Database
mycursor = mydb.cursor()
# creating column list for insertion#BSIT(M)-VII
print(data.columns.tolist())
cols = "`,`".join([str(i) for i in data])
for i,row in data.iterrows():
sql = "INSERT INTO `room` (`" +cols + "`) VALUES (" + "%s,"*(len(row)-1) + "%s)"
# cursor.execute(sql, tuple(row))
cursor.execute()
connection.commit()
i want to insert data frame into sql workbench ...
database name ,and username and password is mentioned..
but i got an error..
Traceback (most recent call last):
File "C:\Users\UMAR\PycharmProjects\FYP\scend.py", line 36, in <module>
cursor.execute()
AttributeError: module 'mysql.connector.cursor' has no attribute 'execute'

How to pass a variable as a column name with pyodbc?

I have a list that has two phone numbers and I'd like to put each phone number into its own column in an Access database. The column names are Phone_Number1 and Phone_Number2. How do I pass that to the INSERT statement?
phone_numbers = ['###.218.####', '###.746.####']
driver = '{Microsoft Access Driver (*.mdb, *.accdb)}'
filepath = 'C:/Users/Notebook/Documents/master.accdb'
myDataSources = pyodbc.dataSources()
access_driver = myDataSources['MS Access Database']
conn = pyodbc.connect(driver=driver, dbq=filepath)
cursor = conn.cursor()
phone_number_count = 1
for phone_number in phone_numbers:
column_name = "Phone_Number" + str(phone_number_count)
cursor.execute("INSERT INTO Business_Cards (column_name) VALUES (?)", (phone_number))
conn.commit()
print("Your database has been updated.")
This is what I have so far.
Traceback (most recent call last):
File "C:/Users/Notebook/PycharmProjects/Jarvis/BusinessCard.py", line 55, in <module>
database_entry(phone_numbers, emails, name, title)
File "C:/Users/Notebook/PycharmProjects/Jarvis/BusinessCard.py", line 47, in database_entry
cursor.execute("INSERT INTO Business_Cards (column_name) VALUES (?)", (phone_number))
pyodbc.Error: ('HYS22', "[HYS22] [Microsoft][ODBC Microsoft Access Driver] The INSERT INTO statement contains the following unknown field name: 'column_name'. Make sure you have typed the name correctly, and try the operation again. (-1507) (SQLExecDirectW)")
If you want to insert both numbers in the same row, remove the for loop and adjust the INSERT to consider the two columns:
phone_numbers = ['###.218.####', '###.746.####']
# ...
column_names = [f"PhoneNumber{i}" for i in range(1, len(phone_numbers) + 1)]
placeholders = ['?'] * len(phone_numbers)
cursor.execute(f"INSERT INTO Business_Cards ({', '.join(column_names)}) VALUES ({', '.join(placeholders)})", tuple(phone_numbers))
conn.commit()
# ...

Getting keyerror while fetching JSON data from API using python

I am getting keyerror in one while printing one of the json data fetched from API using python.
Error:
Except nagios_service, I am able to print other data
Traceback (most recent call last):
File "<ipython-input-55-3a1eadbbe594>", line 1, in <module>
runfile('Y:/_Temp/MEIPE/python/20190104_Script_Jason_APIv3.py', wdir='Y:/_Temp/MEIPE/python')
File "C:\Users\MEIPE\AppData\Local\Continuum\anaconda2\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 704, in runfile
execfile(filename, namespace)
File "C:\Users\MEIPE\AppData\Local\Continuum\anaconda2\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 93, in execfile
exec(compile(scripttext, filename, 'exec'), glob, loc)
File "Y:/_Temp/MEIPE/python/20190104_Script_Jason_APIv3.py", line 68, in <module>
print data[i]["_source"]["nagios_service"]
KeyError: 'nagios_service'
My code:
url1 = "http://nagiosdatagateway.vestas.net/esq/ITE1452552/logstash-
2018.12.16/2/desc"
response = urllib.urlopen(url1)
data = json.loads(response.read())
#define db connection
cnxn = pyodbc.connect("Driver={SQL Server Native Client 11.0};"
"Server=DKCDCVDCP42\DPA;"
"Database=VPDC;"
"Trusted_Connection=yes;")
cursor = cnxn.cursor()
sql="SELECT count(*) as count_of_rows FROM [VPDC].[pa].
[ROC_Nagios_Reporting_RawData]"
cursor.execute(sql)
for row in cursor.fetchall():
k = row.count_of_rows
i = 0
j = len(data)#find length of data set
#print j
for i in range(0,j): #loop to insert date into SQL Server
print data[i]["_source"]["nagios_service"]
print data[i]["_source"]["nagios_host"]
print data[i]["_source"]["nagios_author"]
print data[i]["_source"]["nagios_severity_label"]
print data[i]["_source"]["nagios_external_command"]
print data[i]["_source"]["#timestamp"]
cnxn.commit() #commit transaction
cursor.close()
cnxn.close()
I need help in fixing this keyerror on nagios_service. And should print all data.
We might be able to provide a better answer if you showed us the data or explained what the purpose of this was, but for now if you want to run this code without getting exceptions, you need to allow for the possibility that not all the items contain this key. One way would be to use get() calls instead of __getitem__ calls (using square brackets) - the dict.get(key, default) method returns default if key is not in the dict, or None if you don't provide default. So a basic solution would be:
for i in range(0,j): #loop to insert date into SQL Server
source_data = data[i]["_source"]
print source_data.get("nagios_service")
print source_data.get("nagios_host")
print source_data.get("nagios_author")
print source_data.get("nagios_severity_label")
print source_data.get("nagios_external_command")
print source_data.get("#timestamp")
A slightly better version that will tell you which key is missing:
for i in range(0,j): #loop to insert date into SQL Server
source_data = data[i]["_source"]
keys = ['_source', 'nagios_service', 'nagios_host', 'nagios_author',
'nagios_severity_label', 'nagios_external_command', '#timestamp']
for key in keys:
print source_data.get(key, "Missing key: '%s'" % key)
I tried using try: and except KeyError: in my code after searching SO a little more and was able to insert JSON data into SQL table with out any errors.
url1 = "http://nagiosdatagateway.vestas.net/esq/ITE1452552/logstash-" + ysday1
#print url1 #test
#url = "http://nagiosdatagateway.vestas.net/esq/ITE1452552/logstash-
2018.12.16/2/desc"
response = urllib.urlopen(url1)
data = json.loads(response.read())
#define db connection
cnxn = pyodbc.connect("Driver={SQL Server Native Client 11.0};"
"Server=DKCDCVDCP42\DPA;"
"Database=VPDC;"
"Trusted_Connection=yes;")
cursor = cnxn.cursor()
sql= "SELECT count(*) as count_of_rows FROM [VPDC].[pa].
[ROC_Nagios_Reporting_RawData]"
cursor.execute(sql)
for row in cursor.fetchall():
k = row.count_of_rows
i = 0
j = len(data)#find length of data set
#print j
#for each in data:
for i in range(0,j): #loop to insert date into SQL Server
try:
print data[i]["_source"]["nagios_author"]
print data[i]["_source"]["nagios_service"]
cursor.execute("insert into [VPDC].[pa].[ROC_Nagios_Reporting_RawData]
(Nagios_Author,Nagios_service,Nagios_host,Nagios_comment) values
(?,?,?,?)",(data[i]["_source"]["nagios_author"],data[i]["_source"]
["nagios_service"],data[i]["_source"]["nagios_host"],data[i]["_source"]
["nagios_comment"] ))
except KeyError:
pass
cnxn.commit() #commit transaction
cursor.close()
cnxn.close() #close connection

Writing column names to file

I have an SQLite DB file and I am parsing the data from each column in a table of the db to a .txt file. At the moment it is writing the column contents to the file but it won't pull the column names and write those. How can I go about it as I have tried to use this guide Is there a way to get a list of column names in sqlite? but i cannot seem to get it to work. Here is my code with an attempt at pulling the column names from the table.
import sqlite3
from sqlite3 import Error
# create a database connection to the SQLite database specified by the db_file
def create_connection(db_file,detect_types=sqlite3.PARSE_DECLTYPES):
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
print(e)
return None
# Query specific rows in the sms table
def select_data(conn):
cur = conn.cursor()
cur.execute("SELECT _id, address, strftime('%d-%m-%Y', date / 1000, 'unixepoch'),read, type, body, seen FROM sms")
print("Writing the contents of the sms table to an evidence file")
print("\t")
# Trying to pull out column names from db table
def get_col_names():
conn = sqlite3.connect("mmssms.db")
c = conn.cursor()
c.execute("SELECT _id, address, strftime('%d-%m-%Y', date / 1000, 'unixepoch'),read, type, body, seen FROM sms")
return [member[0] for member in c.description]
# Write the data to a smsEvidence.txt file
with open('EvidenceExtractionFiles/smsInfo.txt', 'a+') as f:
rows = cur.fetchall()
for row in rows:
#print(row)
f.write("%s\n" % str(row))
print("SMS Data is written to the evidence File")
# path to where the db files are stored
def main():
database = "H:\College Fourth Year\Development Project\Final Year Project 2018\mmssms.db"
# create a database connection
conn = create_connection(database)
with conn:
# print("Query specific columns")
select_data(conn)
# close db connection
if(conn):
conn.close()
print("Database closed")
if __name__ == '__main__':
main()
You may use cursor.description which holds info about the column names:
[ ... ]
cur = cursor.execute('SELECT * FROM test_table LIMIT 100')
col_names = [ name[0] for name in cur.description ]
print (col_names)
[ ... ]

MySQLdb fetcall,AttributeError: 'int' object has no attribute 'fetchall'

I want to check if I have managed to import my csv file into MySQL db in proper manner.My code
import MySQLdb
mydb = MySQLdb.connect(host = 'localhost',user = 'milenko',passwd = 'nuklear',db = 'mm')
cur = mydb.cursor()
command = cur.execute('SELECT * FROM jul')
results = command.fetchall()
print (results)
But I got this
File "b12.py", line 6, in <module>
results = command.fetchall()
AttributeError: 'int' object has no attribute 'fetchall'
I have seen previous SO posts,where people claim that number objects do not have fetcall object.I have copied this code from Python for MySQL from Albert Lukaszewski.
How to pull down db content in one go?
You can't call fetchall() on the result of a cursor.execute(), in fact, according to MySQLdb documentation, cursor.execute() return the number of affected rows by the query executed.
To retrieve data you have to access to cursor results directly:
cur = mydb.cursor()
cur.execute('SELECT * FROM jul')
results = cur.fetchall()

Categories

Resources