Unexpected unindent python to execute an sql query - python

I have this python code to execute an sql file:
import mysql.connector
cnx = mysql.connector.connect(user='skynetadmin',
password='Skynetadmin1',
host='silmysskytest01.sing.micron.com',
database='skynet_msa')
cursor =cnx.cursor()
def executeScriptsFromFile(filename):
fd = open(filename, 'r')
sqlFile = fd.read()
fd.close()
sqlCommands = sqlFile.split(';')
for command in sqlCommands:
try:
if command.strip() != '':
cursor.execute(command)
executeScriptsFromFile('C:\Users\gsumarlin\Documents\dumps\Dump20220428\Query_for_testingintern.sql')
cnx.commit()
Hwv, I have this error:
executeScriptsFromFile('C:\Users\gsumarlin\Documents\dumps\Dump20220428')
^ unexpected unindent
I tried moving ard with the indents but it gives me no solution.
Any help is appreciated!

import mysql.connector
def executeScriptsFromFile(Query_for_testingintern):
fd = open(Query_for_testingintern, 'r')
sqlFile = fd.read()
fd.close()
sqlCommands = sqlFile.split(';')
for command in sqlCommands:
try:
if command.strip() != '':
cursor.execute(command)
except:
pass
cnx = mysql.connector.connect(user='skynetadmin',password='Skynetadmin1',host='silmysskytest01.sing.micron.com',database='skynet_msa')
cursor =cnx.cursor()
executeScriptsFromFile("C:\\Users\gsumarlin\Documents\dumps\Dump20220428")
cnx.commit()

Do you use Notepad++ or something like that?
You can simply open the file there and select all (Ctrl + A)
Then click on "Edit" -> Non printable characters -> Convert Tabs to Spaces

Related

two for loop in python fileinput

cur = mysqlcon.cursor()
sql_select_query = """select name,AES_DECRYPT(passwd, 'hardpass') as passwd from fullusers WHERE id = %s"""
# set variable in query
cur.execute(sql_select_query, (iduser,))
myresult = cur.fetchone()
nameuser = myresult['name']
passuser = myresult['passwd']
psuser = str(passuser,'utf-8')
if mysqlcon:
print ("Connected Successfully")
else:
print ("Connection Not Established")
query = """select * from v_ips WHERE uid = %s"""
cur.execute(query, (iduser,))
data = cur.fetchall()
file_name = '/usr/local/etc/3proxy.cfg'
for line in fileinput.FileInput(file_name,inplace=1):
if 'allow login,'+ nameuser +'' in line:
for x in data:
line = line.rstrip()
line = line.replace(line, line+'\rproxy -n -a -p8989 -i'+ x['ip'] +' -e'+ x['ip'] +'')
It is necessary to add several lines from the database to the file after a certain word, when such a structure is as above, all the data in the file simply disappears, maybe I'm doing something wrong?
for line in fileinput.FileInput(file_name,inplace=1):
if 'allow login,'+ nameuser +'' in line:
line = line.rstrip()
for x in data:
line = line.replace(line, line+'\rproxy -n -a -p8989 -i'+ x['ip'] +' -e'+ x['ip'] +'')
such a structure also deletes all data in the file, swapping the loop occurs just as simply deleting all data
I am not familiar with the fileinput library so this might not be "best practice" by any means, however I got it working at least.
With the test file test_file.txt containing this:
test
test
no
no
test
no
and with the following code:
import fileinput
file_name = "test_file.txt"
for line in fileinput.FileInput(file_name,inplace=1):
if 'test' in line:
print(line.replace(line,"this line has officially been tested"))
else:
print(line, end="")
I got the following result:
this line has officially been tested
this line has officially been tested
no
no
this line has officially been tested
no
For some reason using print within the FileInput block of code prints to the file not stdout.
Additionally it is recommended to use context managers whenever operating on files.
Example:
import fileinput
file_name = "test_file.txt"
with fileinput.FileInput(file_name,inplace=1) as file:
for line in file:
if 'test' in line:
print(line.replace(line,"this line has officially been tested"))
else:
print(line, end="")

Invalid Argument error on the python code

I am beginner to python, and I have this code to decompress log file, but I have the error Invalid argument. I don't know why I got this error, in my opinion, I think it is because the log file is too big, cause I am scanning a log file which is 2gb file. But I have no idea how to fix the error. Please help, thank you. And below is my code with the error:
import glob
import gzip
import os
import pymysql
import logging
# path to gz directory
GZ_DIR = '/Users/kiya/Desktop/mysql/csv'
# Database Infomation
DB_HOST='locahost'
DB_USER='dbuser'
DB_PASS='dbPassword'
DB_NAME='dbname'
LOGFILE="exception.log"
def csv_reader(file, header=False):
import csv
with open(file, "r") as f:
reader = csv.reader(f)
if header:
next(reader)
for row in reader:
yield row
def import_sql(filename, dbHostName, dbUser, dbPassword, databaseName):
db = pymysql.connect(host=dbHostName,
user=dbUser,
password=dbPassword,
db=databaseName,
charset='utf8')
for row in csv_reader(filename, False):
# prepare a cursor object using cursor() method
with db.cursor() as cursor:
if row[3] == "THREAT" and row[4] == "url":
sql = ("INSERT INTO PADIAGDB.url ("
"Domain,Receive_Time,Serial,Type,Threat_Content_Type,"
"Config_Version,Generate_Time,Source_address,Destination_address,"
"NAT_Source_IP,NAT_Destination_IP,Rule,Source_User,"
"Destination_User,Application,Virtual_System,Source_Zone,"
"Destination_Zone,Inbound_Interface,Outbound_Interface,Log_Action,"
"Time_Logged,Session_ID,Repeat_Count,Source_Port,Destination_Port,"
"NAT_Source_Port,NAT_Destination_Port,Flags,IP_Protocol,Action,"
"URL_Filename,Threat_Content_Name,Category,Severity,Direction,"
"Sequence_Number,Action_Flags,Source_Country,Destination_Country,"
"cpadding,contenttype,pcap_id,filedigest,cloud,url_idx,user_agent,"
"filetype,xff,referer,sender,subject,recipient,reportid,"
"dg_hier_level_1,dg_hier_level_2,dg_hier_level_3,dg_hier_level_4,"
"Virtual_System_Name,Device_Name,file_url )"
""
"VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,"
"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,"
"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s"
"); ")
elif row[3] == "SYSTEM":
sql = ("INSERT INTO PADIAGDB.system ("
"Domain,Receive_Time,Serial,Type,Threat_Content_Type,Config_Version,"
"Generate_Time,Virtual_System,Event_ID,Object,fmt,id,module,Severity,"
"Description,Sequence_Number,Action_Flags,dg_hier_level_1,"
"dg_hier_level_2,dg_hier_level_3,dg_hier_level_4,Virtual_System_Name,"
"Device_Name )"
""
"VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,"
"%s,%s,%s );")
else:
continue
try:
cursor.execute('SET foreign_key_checks = 0')
# Execute the SQL command
r = cursor.execute(sql, row)
#Commit your changes in the database
cursor.execute('SET foreign_key_checks = 1')
db.commit()
except Exception as e:
logging.exception(e)
db.rollback()
# disconnect from server
db.close()
gz_files = (gz for gz in glob.glob(os.path.join(GZ_DIR, '*.gz')))
for gz_file in gz_files:
with gzip.open(gz_file, 'rb') as in_file:
s = in_file.read()
sql_file = gz_file[:-3]
sql_file = sql_file[:-4] + '.csv'
with open(sql_file, 'wb') as out_file:
out_file.write(s)
import_sql(out_file, DB_HOST, DB_USER, DB_PASS, DB_NAME)
os.remove(sql_file)
This is the error I got:
Traceback (most recent call last):
File "/Users/kiya/Desktop/mysql/csv/sql3.py", line 180, in <module>
out_file.write(s)
OSError: [Errno 22] Invalid argument
for reading big files, you will have to read and write in chucks, try smth like (draft)
fr = open(input_file, 'rb')
fw = open(output_file, 'wb')
while True:
chunk = fr.read(1024)
if not chunk:
break
fw.write(chunk)
fr.close()
fw.close()
you can use context mangers of course aka with

Escaping the quote identifier while reading insert statement from a .sql file in python

I am new to python.
As part of the script, I am opening a file to read, adding the each line to list and executing the insert into table.
Snippet:
def convertFileToList(input_file):
try:
global ins_list
ins_list = []
f = io.open(input_file,'r',encoding='windows-1252')
except IOError:
pcm.write_log(log_fname, 1, "Could not read file:" + input_file + "\n")
return -1
with f:
for line in f:
line = line.replace('\n','')
ins_list.append(line)
f.close()
return ins_list
Insert:
def insert(list):
rows_affected = 0
try:
chk_db_conn()
for query in list:
db_cursor.execute(query)
db_conn.commit()
if (db_cursor.rowcount < 0):
pcm.write_log(log_fname, 1, "No rows were affected")
else:
rows_affected = rows_affected + db_cursor.rowcount
except:
return -1
Here, the list is nothing but the Insert statement in the sql file
After this, I am iterating the list and performing db_cursor.execute().
But, I have a problem when every there is a single quote in the insert statement the execution fails:
Example:
Insert into TABLE (Id,name,ColName,Val1,Val2) values (11,'tableName','name','Inter','Intern's');
Is there way to check if there is a quote identifier with the word then escape and insert that row ?

How to query unicode database with ascii characters

I am currently running a query on my postgresql database that ignores German characters - umlauts. I however, do not want to loose these characters and would rather have the German characters or at least their equivalent (e.g ä = ae) in the output of the query. Running Python 2.7.12
When I change the encode object to replace or xmlcharrefreplace I get the following error:
psycopg2.ProgrammingError: syntax error at or near "?"
LINE 1: ?SELECT
Code Snippet:
# -*- coding: utf-8 -*-
connection_str = r'postgresql://' + user + ':' + password + '#' + host + '/' + database
def query_db(conn, sql):
with conn.cursor() as curs:
curs.execute(sql)
rows = curs.fetchall()
print("fetched %s rows from db" % len(rows))
return rows
with psycopg2.connect(connection_str) as conn:
for filename in files:
# Read SQL
sql = u""
f = codecs.open(os.path.join(SQL_LOC, filename), "r", "utf-8")
for line in f:
sql += line.encode('ascii', 'replace').replace('\r\n', ' ')
rows = query_db(conn, f)
How can I pass a query as a unicode object with German characters ?
I also tried decoded the query as utf-8 but then I get the following error:
UnicodeEncodeError: 'ascii' codec can't encode character u'\xa0' in position 20: ordinal not in range(128)
Here is a solution to obtain their encoded equivalent. You will be able to re-encode it later and the query will not create an error:
SELECT convert_from(BYTEA 'foo ᚠ bar'::bytea, 'latin-1');
+----------------+
| convert_from |
|----------------|
| foo á<U+009A>  bar |
+----------------+
SELECT 1
Time: 0.011s
You just need to conn.set_client_encoding("utf-8") and then you can just execute unicode strings - sql and results will be encoded and decoded on the fly:
$ cat psycopg2-unicode.py
import sys
import os
import psycopg2
import csv
with psycopg2.connect("") as conn:
conn.set_client_encoding("utf-8")
for filename in sys.argv[1:]:
file = open(filename, "r", encoding="utf-8")
sql = file.read()
with conn.cursor() as cursor:
cursor.execute(sql)
try:
rows = cursor.fetchall()
except psycopg2.ProgrammingError as err:
# No results
continue
with open(filename+".out", "w", encoding="utf-8", newline="") as outfile:
csv.writer(outfile, dialect="excel-tab").writerows(rows)
$ cat sql0.sql
create temporary table t(v) as
select 'The quick brown fox jumps over the lazy dog.'
union all
select 'Zwölf große Boxkämpfer jagen Viktor quer über den Sylter Deich.'
union all
select 'Любя, съешь щипцы, — вздохнёт мэр, — кайф жгуч.'
union all
select 'Mężny bądź, chroń pułk twój i sześć flag.'
;
$ cat sql1.sql
select * from t;
$ python3 psycopg2-unicode.py sql0.sql sql1.sql
$ cat sql1.sql.out
The quick brown fox jumps over the lazy dog.
Zwölf große Boxkämpfer jagen Viktor quer über den Sylter Deich.
Любя, съешь щипцы, — вздохнёт мэр, — кайф жгуч.
Mężny bądź, chroń pułk twój i sześć flag.
A Python2 version of this program is a little bit more complicated, as we need to tell the driver that we'd like return values as unicode objects. Also csv module I used for output does not support unicode, so it needs a workaround. Here it is:
$ cat psycopg2-unicode2.py
from __future__ import print_function
import sys
import os
import csv
import codecs
import psycopg2
import psycopg2.extensions
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
with psycopg2.connect("") as conn:
conn.set_client_encoding("utf-8")
for filename in sys.argv[1:]:
file = codecs.open(filename, "r", encoding="utf-8")
sql = file.read()
with conn.cursor() as cursor:
cursor.execute(sql)
try:
rows = cursor.fetchall()
except psycopg2.ProgrammingError as err:
# No results from SQL
continue
with open(filename+".out", "wb") as outfile:
for row in rows:
row_utf8 = [v.encode('utf-8') for v in row]
csv.writer(outfile, dialect="excel-tab").writerow(row_utf8)

How to read special characters encoded in UTF-8 in python

I was trying to extract some data from mysql database using python, But I have problem with special characters (the data are strings in FR, ES, De and IT languages). Whenever a word has a special character (like an accent á ñ etc.) are no encoded properly in the file (I'm creating a csv with the extracted data)
This is the code I was using
import mysql.connector
if __name__ == '__main__':
cnx = mysql.connector.connect(user='user', password='psswrd',
host='slave',
database='DB',
buffered=True)
us_id_list = ['496305']
f = open('missing_cat_mappings.csv', 'w')
for (us_id) in us_id_list:
print us_id
mapping_cursor = cnx.cursor()
query = (format(user_id=us_id,))
success = False
fails = 0
while not success:
try:
print "try" + str(fails)
mapping_cursor.execute(query)
success = True
except:
fails += 1
if fails > 10:
raise
for row in mapping_cursor:
f.write(str(row) + "\n")
mapping_cursor.close()
f.close()
cnx.close()
I added:
#!/usr/bin/python
# vim: set fileencoding=<UTF-8> :
at the beggining but it didn't make any difference.
Basically you will need to open the CSV file in binary mode, 'wb' not text mode 'w'

Categories

Resources