When I run my python code that reads PostGresql database to read configuration from a table. I am using sqlalchemy engine.connect to establish the connection. I am closing the connection using conn.close() command. But still my Database is showing active database connections.
def get_postgres_data_df(self, table_name):
global read_conn
result_df = pd.DataFrame()
if self.debug:
print('Inside PostgreSQLOperations Class get_postgres_data_df method')
connection_string = 'postgresql://' + \
self.user + ':' + \
self.password + '#' + \
self.host + ':' + \
self.port + '/' + \
self.database
try:
if self.debug:
print('Trying to read from table {0}'.format(table_name))
engine = sqlalchemy.create_engine(connection_string)
read_conn = engine.connect()
print('Connected to database # ' + self.database)
if self.debug:
print(engine.table_names())
except Exception as e:
print('Failed to establish the connection', e)
read_conn.close()
query = ('SELECT * FROM ' + table_name)
try:
result_df = pd.read_sql(sql=query, con=read_conn)
if self.debug:
print('Read success for table # ', table_name)
print(result_df)
read_conn.close()
except Exception as e:
print('Read failed # ', e)
read_conn.close()
return result_df
Related
I have 2 python functions that handle an event in a lambda function that are essentially the same thing. When checking the logs in AWS I get the following error:
{
"errorMessage": "local variable 'post_connection' referenced before assignment",
"errorType": "UnboundLocalError",
"stackTrace": [
" File \"/var/task/etl_python/handler.py\", line 11, in handle\n EtlCall.run_bc(event)\n",
" File \"/var/task/etl_python/elt_call.py\", line 153, in run_bc\n if post_connection:\n"
]
}
My code looks like this:
def run_bo(event):
s3_resource = boto3.resource('s3')
idv_endpoint = os.getenv('DB_ENDPOINT')
idv_database = os.getenv("DB_NAME")
filename = 'staging/aml_bo'
bucket = os.getenv('BILLING_ETL')
if 'resources' in event and "psql_billing" in event['resources'][0]:
try:
config = VaultService()
s3_resource = boto3.resource('s3')
idv_endpoint = os.getenv('DB_ENDPOINT')
idv_database = os.getenv("DB_NAME")
filename = 'staging/billing_bo'
bucket = os.getenv('BILLING_ETL')
idv_username = config.get_postgres_username()
idv_password = config.get_postgres_password()
post_connection = psycopg2.connect(user = idv_username
, password = idv_password
, host = idv_endpoint
, port = "5432"
, database = idv_database)
cursor = post_connection.cursor()
bo_qry = "SELECT uuid\
,first_name, middle_initial, last_name, date(date_of_birth)
mailing_address, public.bo"
#Might need not need the next two lines but this should work.
query = """COPY ({}) TO STDIN WITH (FORMAT csv, DELIMITER '|', QUOTE '"', HEADER TRUE)""".format(bo_qry)
file = StringIO()
cursor.copy_expert(query, file)
s3_resource.Object(bucket, f'{filename}.csv').put(Body=file.getvalue())
cursor.close()
except(Exception, psycopg2.Error) as error:
print("Error connecting to postgres instance", error)
finally:
if post_connection:
cursor.close()
post_connection.close()
#return "SUCCESS"
else:
# Unknown notification
#raise Exception(f'Unexpected event notification: {event}')
print("Cannot make a solid connection to psql instance. Please check code configuration")
def run_bc(event):
if 'resources' in event and "psql_billing" in event['resources'][0]:
try:
config = VaultService()
s3_resource = boto3.resource('s3')
idv_endpoint = os.getenv('DB_ENDPOINT')
idv_database = os.getenv("DB_NAME")
filename = 'staging/billing_bc'
bucket = os.getenv('BILLING_ETL')
idv_username = config.get_postgres_username()
idv_password = config.get_postgres_password()
post_connection = psycopg2.connect(user = idv_username
, password = idv_password
, host = idv_endpoint
, port = "5432"
, database = idv_database)
cursor = post_connection.cursor()
bc_qry = "select id, uuid, document_type, image_id,
document_id\
from public.bc"
#Might need not need the next two lines but this should work.
query = """COPY ({}) TO STDIN WITH (FORMAT csv, DELIMITER '|', QUOTE '"', HEADER TRUE)""".format(bc_flowdown_qry)
file = StringIO()
cursor.copy_expert(query, file)
s3_resource.Object(bucket, f'{filename}.csv').put(Body=file.getvalue())
cursor.close()
except(Exception, psycopg2.Error) as error:
print("Error connecting to postgres instance", error)
finally:
if post_connection:
cursor.close()
post_connection.close()
#return "SUCCESS"
else:
# Unknown notification
#raise Exception(f'Unexpected event notification: {event}')
print("Cannot make a solid connection to psql instance. Please check code configuration")
I don't understand how my connection is unbound if I am closing the connection and the connection after each function and then reopening it for the next. I close it at the end when the data is dumped to my file and then create a new connection in the next function.
I'm trying to update my Heroku DB from a Python script I have on my computer. I set up my app on Heroku with NodeJS (because I just like Javascript for that sort of thing), and I'm not sure I can add in a Python script to manage everything. I was able to fill out the DB once, with the script, and it had no hangups. When I try to update it, I get the following statement in my console:
Traceback (most recent call last):
File "/home/alan/dev/python/smog_usage_stats/scripts/DBManager.py", line 17, in <module>
CONN = pg2.connect(
File "/home/alan/dev/python/smog_usage_stats/venv/lib/python3.8/site-packages/psycopg2/__init__.py", line 127, in connect
conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
psycopg2.OperationalError: FATAL: role "alan" does not exist
and this is my script:
#DBManager.py
import os
import zipfile
import psycopg2 as pg2
from os.path import join, dirname
from dotenv import load_dotenv
# -------------------------------
# Connection variables
# -------------------------------
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
# -------------------------------
# Connection to database
# -------------------------------
# Server connection
CONN = pg2.connect(
database = os.environ.get('PG_DATABASE'),
user = os.environ.get('PG_USER'),
password = os.environ.get('PG_PASSWORD'),
host = os.environ.get('PG_HOST'),
port = os.environ.get('PG_PORT')
)
# Local connection
# CONN = pg2.connect(
# database = os.environ.get('LOCAL_DATABASE'),
# user = os.environ.get('LOCAL_USER'),
# password = os.environ.get('LOCAL_PASSWORD'),
# host = os.environ.get('LOCAL_HOST'),
# port = os.environ.get('LOCAL_PORT')
# )
print("Connected to POSTGRES!")
global CUR
CUR = CONN.cursor()
# -------------------------------
# Database manager class
# -------------------------------
class DB_Manager:
def __init__(self):
self.table_name = "smogon_usage_stats"
try:
self.__FILE = os.path.join(
os.getcwd(),
"data/statsmaster.csv"
)
except:
print('you haven\'t downloaded any stats')
# -------------------------------
# Create the tables for the database
# -------------------------------
def construct_tables(self):
master_file = open(self.__FILE)
columns = master_file.readline().strip().split(",")
sql_cmd = "DROP TABLE IF EXISTS " + self.table_name + ";\n"
sql_cmd += "CREATE TABLE " + self.table_name + " (\n"
sql_cmd += (
"id_ SERIAL PRIMARY KEY,\n"
+ columns[0] + " INTEGER,\n"
+ columns[1] + " VARCHAR(50),\n"
+ columns[2] + " FLOAT,\n"
+ columns[3] + " INTEGER,\n"
+ columns[4] + " FLOAT,\n"
+ columns[5] + " INTEGER,\n"
+ columns[6] + " FLOAT,\n"
+ columns[7] + " INTEGER,\n"
+ columns[8] + " VARCHAR(10),\n"
+ columns[9] + " VARCHAR(50));"
)
CUR.execute(sql_cmd)
CONN.commit()
# -------------------------------
# Copy data from CSV files created in smogon_pull.py into database
# -------------------------------.
def fill_tables(self):
master_file = open(self.__FILE, "r")
columns = tuple(master_file.readline().strip().split(","))
CUR.copy_from(
master_file,
self.table_name,
columns=columns,
sep=","
)
CONN.commit()
# -------------------------------
# Disconnect from database.
# -------------------------------
def close_db(self):
CUR.close()
print("Cursor closed.")
CONN.close()
print("Connection to server closed.")
if __name__ == "__main__":
manager = DB_Manager()
print("connected")
manager.construct_tables()
print("table made")
manager.fill_tables()
print("filled")
as I said, everything worked fine, but now I'm getting this unexpected error, and not sure how to trace it back. The name "alan" is not in any of my credentials, which is confusing me.
I'm not running it via CLI, but through my text editor (in this case VS code).
So the reason this didn't work, is that I was pointing to the wrong directory for my .env file. dotenv_path = join(dirname(__file__), '.env') needs to "walk" up one more level to find my .env. Changed it to dotenv_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', '.env')) and it worked. Just in case someone else has a similar issue, that might be something to check!
Might be unrelated, but double check your ports if using multiple instances: I also got psycopg2.OperationalError: FATAL: role "myUser" does not exist when I wanted to log in to one PostgreSQL database running on (default) port 5432 with credentials which I had set up in another instance running on port 5433...
import sshtunnel
import time
import logging
import mysql.connector
class SelectCommand():
def __init__(self,dbcmd,value = None, mul = False):
self.dbcmd = dbcmd
self.value = value
self.mul = mul
def execute(self):
try:
print("try1")
connection = mysql.connector.connect(
user='**myuser**', password='**pass**',
host='127.0.0.1', port=server.local_bind_port,
database='**myuser$test**', autocommit = True
)
print("try2")
connection.autocommit = True
mycursor = connection.cursor()
sql = self.dbcmd
val = self.value
mycursor.execute(sql, val)
myresult = mycursor.fetchone()
mycursor.close()
connection.close()
if myresult == None or self.mul == True:
return myresult
return myresult[0]
except Exception as e:
print(e)
return "server disconnect "
sshtunnel.SSH_TIMEOUT = 5.0
sshtunnel.TUNNEL_TIMEOUT = 5.0
def get_server():
#sshtunnel.DEFAULT_LOGLEVEL = logging.DEBUG
server = sshtunnel.SSHTunnelForwarder(
('ssh.pythonanywhere.com'),
ssh_username='**myuser**', ssh_password='**mypass**',
remote_bind_address=('**myuser.mysql.pythonanywhere-services.com**', 3306))
return server
server = get_server()
server.start()
while True :
if(server.is_active):
print("alive... " + (time.ctime()))
print(SelectCommand("SELECT * FROM A_table WHERE id = %s", (1,), mul = True).execute())
else:
print("reconnecting... " + time.ctime())
server.stop()
server = get_server()
server.start()
time.sleep(8)
Now i want use sshtunnel connect with database of pythonanywhere, and i want check connecting of sshtunnel if connect do select command else wait for new connecting. i try do Python: Automatically reconnect ssh tunnel after remote server gone down . but my problem is when i query database i try turn off my WIFI
my console show this message (Could not establish connection from ('127.0.0.1', 54466) to remote side of the tunnel) and Socket exception: An existing connection was forcibly closed by the remote host (10054) then it's result to stopping of my program. How can i fix.
So I've been at it for hours, and there is something really weird going on.
I am looping through code to check if a table value has changed. And if so run some code.
When i run the following program everything works totally fine! The 'lock state' goes from Open to Closed as it should be doing.
import time
import mysql.connector
host = '...'
database = '...'
user = '...'
password = '...'
conn = mysql.connector.connect(host=host, database=database, user=user, password=password)
conn.start_transaction(isolation_level='READ COMMITTED')
state = 'Open'
def get_web_lock_request():
try:
cursor = conn.cursor()
cursor.execute("SELECT * FROM `doorlock`.`state`")
state = ''.join(cursor.fetchone())
cursor.close()
return state
except mysql.connector.Error as e:
print 'get_lock_state ' + format(e)
return ''
def set_web_lock_request():
try:
cursor = conn.cursor()
cursor.execute("UPDATE `doorlock`.`state` SET `state`='" + state + "'")
conn.commit()
cursor.close()
except mysql.connector.Error as e:
print 'set_lock_state: ' + format(e)
while 1:
dbstate = get_web_lock_request()
set_web_lock_request()
if state == 'Open':
state = 'Closed'
else:
state = 'Open'
time.sleep(2)
print dbstate
But in my main code this 'listening for a change' is in a thread. And for some reason the UPDATE statement does not work! even with the conn.commit()
import mysql.connector
conn = mysql.connector.connect(host=host, database=database, user=user, password=password)
conn.start_transaction(isolation_level='READ COMMITTED')
def get_web_lock_request():
try:
cursor = conn.cursor()
cursor.execute("SELECT * FROM `doorlock`.`state`")
state = ''.join(cursor.fetchone())
cursor.close()
return state
except mysql.connector.Error as e:
print 'get_lock_state ' + format(e)
return ''
def set_web_lock_request(state):
try:
cursor = conn.cursor()
cursor.execute("UPDATE `doorlock`.`state` SET `state`='" + state + "'")
conn.commit()
cursor.close()
except mysql.connector.Error as e:
print 'set_lock_state: ' + format(e)
def web_request_listener():
global updating
try:
while 1:
print 'get_web_lock_request : ' + get_web_lock_request()
print 'current_state : ' + current_state()
if get_web_lock_request() != current_state() and not updating:
if get_web_lock_request() == 'Open':
open_lock()
print 'DoorLock: State is now open'
elif get_web_lock_request() == 'Closed':
close_lock()
print 'DoorLock: State is now closed'
time.sleep(1)
except KeyboardInterrupt:
exit()
if __name__=='__main__':
try:
button_listener_thread = threading.Thread(target=button_listener)
web_request_listener_thread = threading.Thread(target=web_request_listener)
except KeyboardInterrupt:
exit()
I could really use some help, I have literallyhave nothing else to try :)
Floris
My kwarg table in **kwarg is not getting recognizing when I invoke it.
class Database():
def set_db_setting(self, host, username, passwd, database):
try:
self.host = host
self.username = username
self.passwd = passwd
self.database = database
self.db = pymysql.connect(host=host, user=username, passwd=passwd, db=database)
print('connected to: {}'.format(database))
return self.db
except:
print('\nerror connecting to database\n')
def db_select(self, *selected_fields, **kwargs):
self.selected_fields = selected_fields = list(selected_fields)
self.table = (kwargs['table']
if 'table' in kwargs
else selected_fields.pop())
try:
with self.db.cursor() as cursor:
sql_tld_id_query = Database.query_stmt_list[0]+ ', '.join(selected_fields) + Database.query_stmt_list[4] + table + Database.query_stmt_list[5] + where_field + '=' + 'www.website.com'
print("sql_tld_id_query is {}".format(sql_tld_id_query))
except Exception as gatherid_err:
print("exception was {}".format(gatherid_err))
self.db.rollback()
I'm invoking it like:
dbclass = Database()
dbclass.set_db_setting('localhost', 'root', 'password', 'garbagedb')
dbclass.db_select('id', 'name', table='tld', where_field='name')
I'm getting an error like:
name 'table' is not defined
FULL TRACEBACK
invoked via:
import traceback
traceback.print_stack()
`
File "dbcrud.py", line 56, in <module>
dbclass.db_select('id', 'name', table='tld', where_field='name')
File "dbcrud.py", line 31, in db_select
traceback.print_stack()
self.selected_fields is ['id', 'name']
exception was name 'table' is not defined
What am I doing wrong here?
I added line continuations to make this fit horizontally...
sql_tld_id_query = Database.query_stmt_list[0]+ ', '.join(selected_fields) + \
Database.query_stmt_list[4] + table + Database.query_stmt_list[5] + \
where_field + '=' + 'www.website.com'
the table in bold should be self.table
you blindly catch all exceptions with you try except Exception block, which probably was hiding the real issue from you. It is better to find out what specific kind of exception you want to catch, and only filter those out. For example if I wanted to have a calculator program that didn't crash when a user tried to divide by zero, I would use try: ... except ZeroDivisionError as e: ...