python Database Connection in function - python

I try to create a function for database connection in python. But this is now working.
Here is my code for the definition.
def connect():
dsn = cx_Oracle.makedsn(host='MYHOST', sid='DEVPRON', port=1521)
conn = cx_Oracle.connect(user='root', password='***', dsn=dsn)
cur = conn.cursor()
return [cur,conn]
I return conn and cur every time i call the connect function.
so here is my code when iam calling the function
connect()[0].execute("insert into tbluser (fullname,nickname) values ('" + fname + "', '" + nname + "') ")
connect()[1].commit()
when i run this no error occur, but when i check the database, there is no inserted row. please help. Thanks

Each time you call your connect function you are creating a new connection to the database server. So, your first call executes a query. The second call gives you a new connection. You're committing with this new connection, but there have been no changes. Try this instead:
def connect():
dsn = cx_Oracle.makedsn(host='MYHOST', sid='DEVPRON', port=1521)
conn = cx_Oracle.connect(user='root', password='***', dsn=dsn)
cur = conn.cursor()
return cur, conn
cur, conn = connect()
cur.execute("insert into tbluser (fullname,nickname) values ('" + fname + "', '" + nname + "') ")
conn.commit()

using sqlalchemy also you can connect database from python. Here is the code
from sqlalchemy import create_engine
engine = create_engine('oracle://host:port/database', echo=True)
conn = engine.connect()
result = conn.execute(query)

Related

MySQL to OPC UA Server

I'm trying to read the data from MySql database to OPC UA server. I tested it with the following code and sample database it is working. However, I'm not sure if it runs in a real time environment as the database has 40+ tables and 30+ columns in each table recording 1 minute data. Can someone please suggest the optimal way to do this.
from opcua import ua, uamethod, Server
from time import sleep
import logging
import mysql.connector
mydb = mysql.connector.connect(
host="127.0.0.1",
port=3306,
user="root",
password="root",
database="classicmodels")
mycursor = mydb.cursor(buffered=True , dictionary=True)
sql = "SELECT * FROM classicmodels.customers"
mycursor.execute(sql)
myresult = mycursor.fetchone()
sql1 = "SELECT * FROM classicmodels.employees"
mycursor.execute(sql1)
myresult1 = mycursor.fetchone()
if __name__ == "__main__":
"""
OPC-UA-Server Setup
"""
server = Server()
endpoint = "opc.tcp://127.0.0.1:4848"
server.set_endpoint(endpoint)
servername = "Python-OPC-UA-Server"
server.set_server_name(servername)
"""
OPC-UA-Modeling
"""
root_node = server.get_root_node()
object_node = server.get_objects_node()
idx = server.register_namespace("OPCUA_SERVER")
myobj = object_node.add_object(idx, "DA_UA")
myobj1 = object_node.add_object(idx, "D_U")
"""
OPC-UA-Server Add Variable
"""
for key, value in myresult.items():
myobj.add_variable(idx, key, str(value))
for key, value in myresult1.items():
myobj1.add_variable(idx, key, str(value))
"""
OPC-UA-Server Start
"""
server.start()
'''

Error while executing Python script on AWS Lambda function

I am attempting to use the following Lambda Function on AWS to automatically import objects from an S3 bucket into an RDS instance as they are uploaded into the bucket.
import boto3
import pymysql
s3_cient = boto3.client('s3')
# Read CSV file content from S3 bucket
def read_data_from_s3(event):
bucket_name = event["Records"][0]["s3"]["bucket"]["name"]
s3_file_name = event["Records"][0]["s3"]["object"]["key"]
resp = s3_cient.get_object(Bucket="bucket_name", Key=s3_file_name)
data = resp['Body'].read().decode('utf-8')
data = data.split("\n")
return data
def lambda_handler(event, context):
rds_endpoint = "rds-endpoint"
username = "name"
password = "pass" # RDS Mysql password
db_name = "db-name" # RDS MySQL DB name
conn = None
try:
conn = pymysql.connect(rds_endpoint, user=username, passwd=password, db=db_name, connect_timeout=5)
except pymysql.MySQLError as e:
print("ERROR: Unexpected error: Could not connect to MySQL instance.")
try:
cur = conn.cursor()
cur.execute("INSERT INTO `db`.`table`(`sensor`,`sIP`,`dIP`,`sPort`) VALUES(<{sensor: }>,<{sIP: }>,<{dIP: }>,<{sPort: }>);")
conn.commit()
except:
pass
data = read_data_from_s3(event)
with conn.cursor() as cur:
for ent in data: # Iterate over S3 csv file content and insert into MySQL database
try:
ent = ent.replace("\n","").split(",")
print (">>>>>>>"+str(ent))
cur.execute('insert into db`.`table (sensor,Sip,Dip,sPort) values("'+str(ent[1])+'")')
conn.commit()
except:
continue
cur.execute("select * from table")
# Display table records
for row in cur:
print (row)
if conn:
conn.commit()
Whenever I run this, I get the following error and I do not understand what I am doing wrong? Would someone be able to clarify my errors?
[ERROR] TypeError: __init__() takes 1 positional argument but 2 positional arguments (and 4 keyword-only arguments) were given
Traceback (most recent call last):
File "/var/task/lambda_function.py", line 23, in lambda_handler
conn = pymysql.connect(rds_endpoint, user=username, passwd=password, db=db_name, connect_timeout=5)
It seems that pymysql.connect only takes in keyword arguments. You are passing rds_endpoint in as a positional argument. The rds_endpoint argument should probably passed in as host=rds_endpoint.

Assign/Substitute a list of values in a SQL statement for PostgreSQL using Python

I have a huge table with lot of partition tables underneath, but i only want to query/update few(18) of the partitions. So i have to prepare and run 18 python scripts (because i have to create and close the connection after inserting into every partition table as per our DBA instructions, because each period consists of millions of records) to run my Insert statements.
So i tried two ways to solve this:
One is to run a for loop to substitute the list of periods in my SQL statement which involves writing two python scripts:
import os
import logging
#import psycopg2
import socket
logging.basicConfig(level=logging.INFO)
cpc_name = socket.gethostname()
list_of_periods = [
{'period': '1940'},
{'period': '1941_1945'},
{'period': '1946_1950'},
{'period': '1951_1955'},
{'period': '1956_1960'},
{'period': '1961_1965'},
{'period': '1966_1970'},
{'period': '1971_1975'},
{'period': '1976_1980'},
{'period': '1981_1985'},
{'period': '1986_1990'},
{'period': '1991_1995'},
{'period': '1996_2000'},
{'period': '2001_2005'},
{'period': '2006_2010'},
{'period': '2011_2015'},
{'period': '2016_2020'},
{'period': '2021_2025'}
]
if __name__ == "__main__":
logging.info("Starting test process")
logging.info(" cpc = {}".format(cpc_name) + '\n')
for period in list_of_periods:
os.system('python sample_segment.py')
########
sample_segment.py
import os
import logging
import psycopg2
import socket
logging.basicConfig(level=logging.INFO)
cpc_name = socket.gethostname()
if __name__ == "__main__":
logging.info("Starting test process")
logging.info(" cpc = {}".format(cpc_name) + '\n')
connection = psycopg2.connect(user = os.environ.get("DATABASE_USER", "SVTDATAVANT"),
password = os.environ.get("DATABASE_PASS", "pass"),
host = os.environ.get("DATABASE_HOST", "psql.silver.com"),
port = 5432,
dbname = os.environ.get("DATABASE_NAME", "psql_db"),
options = "-c search_path=DATAVANT_O")
with connection.cursor() as cursor:
logging.info(str(connection.get_dsn_parameters()) + '\n')
cursor.execute("SELECT version();")
connection.commit()
conn = cursor.fetchone()
logging.info("You are connected to - " + str(conn))
tempb = '''
INSERT INTO DATAVANT_STG_O.mortality_index_{period}
SELECT * FROM DATAVANT_STG_O.tmp_mortality_{period}; '''
logging.info("Performing Insert Operation")
cursor.execute(tempb)
connection.commit()
count = cursor.rowcount
logging.info(str(count) + " - count for the period: {period}")
logging.info(" - count for the period: {period}")
connection.close()
print("PostgreSQL connection is closed")
Obviously it did not work and i suspect my python programming skills of-course or maybe i am looking for something like os.sytem.sql_with_parameters()
But anyway my intention is to turn the below statement
INSERT INTO DATAVANT_STG_O.mortality_index_{period}
SELECT * FROM DATAVANT_STG_O.tmp_mortality_{period};
INTO
INSERT INTO DATAVANT_STG_O.mortality_index_1940
SELECT * FROM DATAVANT_STG_O.tmp_mortality_1940;
INSERT INTO DATAVANT_STG_O.mortality_index_1941_1945
SELECT * FROM DATAVANT_STG_O.tmp_mortality_1941_1945;
etc...for 18 periods
And the second method i tried is by reading YAML file in my python script.
import os
import logging
import psycopg2
import socket
import yaml
logging.basicConfig(level=logging.INFO)
cpc_name = socket.gethostname()
with open(r'/home/SILVER/user/test/periods.yaml') as file:
YEARS = yaml.load(file, Loader=yaml.FullLoader)
print("load yaml file" + '\n')
print(YEARS)
if __name__ == "__main__":
logging.info("Starting test process")
logging.info(" cpc = {}".format(cpc_name) + '\n')
connection = psycopg2.connect(user = os.environ.get("DATABASE_USER", "SVTDATAVANT"),
password = os.environ.get("DATABASE_PASS", "pass"),
host = os.environ.get("DATABASE_HOST", "psql.host.com"),
port = 5432,
dbname = os.environ.get("DATABASE_NAME", "psql_db"),
options = "-c search_path=DATAVANT_O")
with connection.cursor() as cursor:
logging.info(str(connection.get_dsn_parameters()) + '\n')
cursor.execute("SELECT version();")
connection.commit()
conn = cursor.fetchone()
logging.info("You are connected to - " + str(conn))
tempb = '''
INSERT INTO DATAVANT_STG_O.mortality_index_YEARS[periods]
SELECT * FROM DATAVANT_STG_O.tmp_mortality_YEARS[periods]; '''
cursor.execute(tempb)
logging.info("Performing Insert Operation")
connection.commit()
count = cursor.rowcount
logging.info(str(count) + " - count for the period: YEARS[periods]")
logging.info(" - count for the period: YEARS[periods]")
connection.close()
print("PostgreSQL connection is closed")
##YAML file
---
periods:
- 1940
- 1941_1945
- 1946_1950
- 1951_1955
- 1956_1960
- 1961_1965
- 1966_1970
- 1971_1975
- 1976_1980
- 1981_1985
- 1986_1990
- 1991_1995
- 1996_2000
- 2001_2005
- 2006_2010
- 2011_2015
- 2016_2020
- 2021_2025
This is the part where i learnt that values from YAML file cannot be assigned to a SQL statement in a python file.
My apologies to the Large question but i just want to be clear and provide as much information as i could.
I would be very grateful for any suggestions to help me solve this scenario using either of the methods mentioned above or feel free to recommend me a new one, Thank you!
##Below is output, when i tried Mike's approach:
import os
import logging
import psycopg2
import socket
logging.basicConfig(level=logging.INFO)
cpc_name = socket.gethostname()
periods = ['1940']
periods.extend(['{}_{}'.format(i, i + 4) for i in range(1941, 2026, 5)])
for period in periods:
# Do your psycopg2 connection here and get your cursor
connection = psycopg2.connect(user = os.environ.get("DATABASE_USER", "SVTDATAVANT"),
password = os.environ.get("DATABASE_PASS", "pass"),
host = os.environ.get("DATABASE_HOST", "psql.silver.com"),
port = 5432,
dbname = os.environ.get("DATABASE_NAME", "psql_db"),
options = "-c search_path=DATAVANT_O")
with connection.cursor() as cursor:
logging.info(str(connection.get_dsn_parameters()) + '\n')
cursor.execute("SELECT version();")
connection.commit()
conn = cursor.fetchone()
logging.info("You are connected to - " + str(conn))
cursor.execute("""
SELECT COUNT(*) FROM datavant_stg_o.mortality_index_{};""". format(period, period)
)
# Commit and close your connection here
connection.commit()
count = cursor.rowcount
logging.info("Count for the period {} is: " . format(period, period) + str(count) + '\n')
connection.close()
print("PostgreSQL connection is closed" + "\n")
(Currently i am only trying to execute the count(*)s to check the functionality)
So i am expecting the counts for the partitions as:
SELECT COUNT(*) FROM datavant_stg_o.mortality_index_1940 - 1001066
SELECT COUNT(*) FROM datavant_stg_o.mortality_index_1941_1945 - 1713850
etc which are the original counts when i query from pgAdmin
but instead i am getting the output as
Count for the period 1940 is: 1,
Count for the period 1941_1945 is: 1 etc,
Does it have anything to do with the quotes?
If you are on python 3.6 or later:
periods = ['1940']
periods.extend([f'{i}_{i + 4}' for i in range(1941, 2026, 5)])
for period in periods:
# Do your psycopg2 connection here and get your cursor
cursor.execute(f"""
INSERT INTO DATAVANT_STG_O.mortality_index_{period}
SELECT * FROM DATAVANT_STG_O.tmp_mortality_{period};""")
# Commit and close your connection here
For an earlier python:
periods = ['1940']
periods.extend(['{}_{}'.format(i, i + 4) for i in range(1941, 2026, 5)])
for period in periods:
# Do your psycopg2 connection here and get your cursor
cursor.execute("""
INSERT INTO DATAVANT_STG_O.mortality_index_{}
SELECT * FROM DATAVANT_STG_O.tmp_mortality_{};""". format(period, period)
)
# Commit and close your connection here

MySQL server has gone away issue

I am newbie in python, so, it looks like my first project on that lang.
Everytime when I'm trying to run my script - I get different answers from mysql server.
The most frequent answer is OperationalError: (2006, 'MySQL server has gone away')
Sometimes I get output Thread: 11 commited (see code below).
And sometimes emergency stop (traslated, I have russian output in console).
Whatever if output full of commited - records in table still the same.
import MySQLdb
import pyping
import socket, struct
from threading import Thread
def ip2int(addr):
"""Convert ip to integer"""
return struct.unpack("!I", socket.inet_aton(addr))[0]
def int2ip(addr):
"""Convert integer to ip"""
return socket.inet_ntoa(struct.pack("!I", addr))
def ping(ip):
"""Pinging client"""
request = pyping.ping(ip, timeout=100, count=1)
return int(request.max_rtt)
class UpdateThread(Thread):
def __init__(self, records, name):
Thread.__init__(self)
self.database = MySQLdb.connect(host="***", port=3306, user="root", passwd="***", db="dns")
self.cursor = database.cursor()
self.name = name
self.records = records
def run(self):
print(self.name)
for r in self.records:
#latency = ping(int2ip(r[1])) what the hell :x
#ip = str(int2ip(r[1]))
id = str(r[0])
self.cursor.execute("""update clients set has_subn=%s where id=%s""" % (id, id))
self.database.commit()
print(self.name + " commited")
#start
database = MySQLdb.connect(host="***", port=3306, user="root", passwd="***", db="dns")
cursor = database.cursor()
cursor.execute("""select * from clients""")
data = cursor.fetchall() #All records from DataBase
count = len(data)
threads_counter = 10 #We are creating 10 threads for all records
th_count = count / threads_counter #Count of records for each thread
last_thread = count % threads_counter #Last records
threads = []
i = 0
while i < (count - last_thread):
temp_list = data[i:(i+th_count)]
#print(temp_list)
threads.append(UpdateThread(records = temp_list, name = "Thread: " + str((i/3) + 1)).start())
i += th_count
threads.append(UpdateThread(records = data[i: count], name = "Thread: 11").start())
P.S.
Another answers I found here is not helping me.
UPD:
I found that some(everytime another) thread print
OperationalError: (2013, 'Lost connection to MySQL server during query') and all next threads print OperationalError: (2013, 'Lost connection to MySQL server during query')
You need to close your DB connections when you're done with them or else the DB server will become overwhelmed and make your connections expire. For your program, I would change your code so that you have only one DB connection. You can pass a reference to it to your UpdateThread instances and close it when you're done.
database.close()

pyodbc connect to database twice and failed

I'm trying use Python and pyodbc to access SQL server 2008. The first connection works. Then, after the program finishes its job, it closes the connection. When the program tries to access the database and connect to it again, it fails in the statement:
self.conn = pyodbc.connect(DRIVER=self.DRIVER, SERVER=self.SERVER, DATABASE=self.DATABASE, UID=self.UID, PWD=self.PWD, charset="UTF-8")
but the first time is OK. So does anyone know why? Below is the Python code:
class ODBC_MS:
def __init__(self, DRIVER,SERVER, DATABASE, UID, PWD):
''' initialization '''
self.DRIVER = DRIVER
self.SERVER = SERVER
self.DATABASE = DATABASE
self.UID = UID
self.PWD = PWD
def _GetConnect(self):
''' Connect to the DB '''
if not self.DATABASE:
raise(NameError,"no getting db name")
try:
self.conn = pyodbc.connect(DRIVER=self.DRIVER, SERVER=self.SERVER,
DATABASE=self.DATABASE, UID=self.UID,
PWD=self.PWD, charset="UTF-8")
except Exception,e:
print e.message
else:
self.cur = self.conn.cursor()
if not self.cur:
raise(NameError,"connected failed!")
else:
return self.cur, self.conn
def ExecNoQuery(self,conn, cursor, sql):
cursor.execute(sql)
ret = conn.commit()
return ret
def _UnConnect(self,conn, cursor):
conn.close()
if __name__ == '__main__':
ms = ODBC_MS('{SQL SERVER}', r'<server>', '<db>', '<user>', '<password>')
cursor, conn = ms._GetConnect() #connection
sql = "create table XX for example"
ret = ms.ExecNoQuery(conn, cursor,sql) #sql operation
ms._UnConnect(conn, cursor) #close db
#access the database the second time.
ms = ODBC_MS('{SQL SERVER}', r'<server>', '<db>', '<user>', '<password>')
cursor, conn = ms._GetConnect() # not success, I don't know why
sql = "create table XX for example"
ret = ms.ExecNoQuery(conn, cursor,sql) #sql operation
ms._UnConnect(conn, cursor) #close db
The second time when the program calls ms.GetConnect(), the statement self.conn = pyodbc.connect(DRIVER=self.DRIVER, SERVER=self.SERVER, DATABASE=self.DATABASE, UID=self.UID, PWD=self.PWD, charset="UTF-8") fails.

Categories

Resources