How can I delete certain row/rows from sqlite table using Python3? - python

Here is the code I wrote. But when compiling, it says that LIMIT and OFFSET is not defined.
def sql_delete(user_id, task_number):
cur = connection.cursor()
cur.execute('DELETE FROM tasks WHERE user_id = (SELECT user_id FROM tasks WHERE user_id = ? LIMIT 1 OFFSET ?)', (user_id, task_number, ))
connection.commit()

UPDATE
Just now I understood what you did want to manage. Just give a try this:
def sql_delete(user_id, task_number):
cur = connection.cursor()
cur.execute('DELETE FROM tasks WHERE user_id = (SELECT user_id FROM tasks WHERE user_id = ? LIMIT 1 OFFSET ?)', (user_id, int(task_number)))
connection.commit()

Related

pyodbc - cannot delete from MSSQL tables

Trying to delete some of the table entries by using pyodbc in database results in nothing happening. I know for sure that database connection is working as intended, can select data. Perhaps any suggestions what could be the cause?
get_user_id = conn.cursor()
get_user_id.execute('''
SELECT b.UserId
FROM Bindery b
INNER JOIN ActiveUser au
ON au.Id = b.UserId
WHERE au.UserId = ?
''', user_to_kick)
id_list = [id[0] for id in get_user_id.fetchall()]
delete_user = conn.cursor()
#delete from bindery first
delete_user.execute('''
DELETE FROM Bindery
WHERE UserId in (?)
''', id_list)
conn.commit
#delete from active user list
delete_user.execute('''
DELETE FROM ActiveUser
WHERE UserId = ?
''', user_to_kick)
conn.commit
delete_user.close()
conn.close
This is a code block that should imo trigger the delete query, but nothing happens. Select query does indeed get the data.
UPDATE:
After some adjustments and passing list as a parameter fixed, the delete query now indeed works as intended.
get_user_id = conn.cursor()
get_user_id.execute('''
SELECT b.UserId
FROM Bindery b
INNER JOIN ActiveUser au
ON au.Id = b.UserId
WHERE au.UserId = ?
''', user_to_kick)
id_list = [id[0] for id in get_user_id.fetchall()]
placeholders = ", ".join(["?"] * len(id_list))
sql = 'DELETE FROM Bindery\
WHERE UserId in (%s)' % placeholders
delete_user = conn.cursor()
#delete from bindery first
delete_user.execute(sql, id_list)
conn.commit()
#delete from active user list
delete_user.execute('''
DELETE FROM ActiveUser
WHERE UserId = ?
''', user_to_kick)
conn.commit()
get_user_id.close()
delete_user.close()
conn.close()

Using LOCK to prevent serializable isolation issue in Redshift

I have a Python script that selects from a customer table then insert it into another table.
# coding: utf-8
import psycopg2
from psycopg2 import sql
from provider import post
from datetime import datetime
from uuid import uuid4
from utils import get_env
import constants
from utils import get_env, measure_runtime, raise_message, pprint, retry
from error_report import report, get_logger
db_url = get_env('conn_redshift')
conn = psycopg2.connect(db_url)
cur = conn.cursor()
#retry()
#raise_message("Unable to insert tuple to stripe customers")
def insert_tuple(cur, account, dictionary):
cur.execute(
begin;
lock {0}.customers;
sql.SQL("""
SELECT id, created_at FROM {0}.customers WHERE stripe_id = (%s);
""").format(sql.Identifier(account)), (dictionary['id'], ))
conn.commit()
customers = cur.fetchone()
print(f" > update customers {dictionary['id']}")
if customers and len(customers) == 2:
(id, created_at) = customers
else:
id = str(uuid4())
created_at = datetime.now().isoformat()
cur.execute(
"""
INSERT INTO stage_customers (
stripe_id,
name_1,
name_2,
address,
postcode,
city,
country_id,
updated_at,
created_at,
id
) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
(dictionary['id'],
dictionary['name_1'], dictionary['name_2'], dictionary['address'],
dictionary['postcode'], dictionary['city'], dictionary['country_id'],
dictionary['updated_at'], created_at, id))
conn.commit()
#raise_message("Unable to import customers")
def import_customers(account, customers):
if account != constants.PUBLIC_STRIPE_CH and account != constants.PUBLIC_STRIPE_DE:
raise Exception('Unknown account -> ' + account)
if customers:
#db_url = get_env('conn_redshift')
#conn = psycopg2.connect(db_url)
#cur = conn.cursor()
print(f"{account} customers import: start # {datetime.now()}")
cur.execute(
sql.SQL("""
CREATE temp TABLE stage_customers (LIKE {0}.customers)
""")
.format(sql.Identifier(account))
)
conn.commit()
for customer in customers:
insert_tuple(cur, account, customers)
cur.execute(
sql.SQL("""
DELETE FROM {0}.customers WHERE {0}.customers.stripe_id IN (SELECT stripe_id FROM stage_customers);
""")
.format(sql.Identifier(account))
)
conn.commit()
cur.execute(
sql.SQL("""
INSERT INTO {0}.customers SELECT * FROM stage_customers;
""")
.format(sql.Identifier(account))
)
conn.commit()
cur.execute(
sql.SQL("""
DROP TABLE stage_customers;
""")
.format(sql.Identifier(account))
)
conn.commit()
print(f"{account} contact import: finished # {datetime.now()}")
#measure_runtime
##report(get_logger(channel="#dwh", app_name="Stripe Customers"))
def main():
ch_stripe_customers = post(constants.PUBLIC_STRIPE_CH, "/contact/search",
constants.PAYLOAD)
de_stripe_customers = post(constants.PUBLIC_STRIPE_DE, "/contact/search",
constants.PAYLOAD)
import_customers(constants.PUBLIC_STRIPE_CH, ch_stripe_customers)
import_customers(constants.PUBLIC_STRIPE_DE, de_stripe_customers)
if __name__ == "__main__":
main()
cur.close()
conn.close()
While running the Python script above, I receive the error:
Serializable isolation violation on table - 20123891, transactions forming the cycle are: 282075744, 238073135
Upon doing some research, Redshift recommends using LOCK to prevent this issue.
Could you please give an example how I can use the LOCK command in this Python script?
Or does psycopg2 probably provide a functionality to prevent this issue?
---Update---
If I try
BEGIN;
LOCK customers;
def insert_tuple(cur, account, dictionary):
cur.execute(
sql.SQL("""
SELECT id, created_at FROM {0}.customers WHERE customer_id = (%s);
""").format(sql.Identifier(account)), (dictionary['id'], ))
I got the error:
begin;
^
SyntaxError: invalid syntax
Update:

cursor.execute Updating Table not updating

I have a table "Users" with the column "g_score". The other column I am storing are "username". I am trying to send an update to g_score via the username I get. I send the request and the value does not update. g_score is stored as an INT. I am looking to increment the value by + 1 each time.
The g_score value is default = 0
The value is not being updated by the following code
I'm going to leave some snippets here-
Creating the table -
cursor.execute("CREATE TABLE IF NOT EXISTS Users(username TEXT,hash TEXT,salt TEXT,g_score INT)")
If the user does not exist- we do
cursor.execute("INSERT INTO Users VALUES(?, ?, ?, ?)", (username, hashed_password, salt, 0))
This following code does not update the g_score-
db = lite.connect('log.db', check_same_thread=False)
cursor = db.cursor()
sql = ("UPDATE Users SET g_score = g_score + 1 WHERE username = ?")
cursor.execute(sql, [g_winner.get_name()])

Python - Sequence of interdependent SQL queries

I am running 3 consecutive and dependent SQL queries and I am wondering if my code could be more efficient. I had to create 3 separate cursors to execute my method. What can I do to make it more efficient?
What I am doing in that method is:
Insert a new contributor in my contributors table based on the values send on the form
Get the primary key of that new contribution which is it's contributor_id
Insert a new question on the questions table and the foreign key of that table is the contributor_id from the contributors table
I don't want to use an ORM such as SQLAlchemy.
conn = pymysql.connect(
host = 'localhost',
user = 'root',
passwd = 'xxx!',
db = 'xxx'
)
#app.route('/add_contributor',methods = ['POST', 'GET'])
def add_contributor():
name = request.form.get('contrib_name')
question = request.form.get('question')
sql_1 = "INSERT INTO contributors (name) VALUES (%s)"
sql_2 = "SELECT contributor_id from contributors WHERE name=(%s)"
sql_3 = "INSERT INTO questions (contributor_id, question_text) VALUES (%s, %s)"
cursor = conn.cursor()
cursor.execute(sql_1, name)
cursor.fetchall()
conn.commit()
cursor_2 = conn.cursor()
cursor_2.execute(sql_2, name)
contrib_val = cursor_2.fetchall()
contrib_id = contrib_val[0][0]
cursor_3 = conn.cursor()
cursor_3.execute(sql_3, (contrib_id,question))
cursor_3.fetchall()
conn.commit()

what slows down parsing?

I have big XML files to parse (about 200k lines and 10MB). The structure is following:
<el1>
<el2>
<el3>
<el3-1>...</el3-1>
<el3-2>...</el3-2>
</el3>
<el4>
<el4-1>...</el4-1>
<el4-2>...</el4-2>
</el4>
<el5>
<el5-1>...</el4-1>
<el5-2>...</el5-2>
</el5>
</el2>
</el1>
Here is my code:
tree = ElementTree.parse(filename)
doc = tree.getroot()
cursor.execute(
'INSERT INTO first_table() VALUES()',
())
cursor.execute('SELECT id FROM first_table ORDER BY id DESC limit 1')
row = cursor.fetchone()
v_id1 = row[0]
for el1 in doc.findall('EL1'):
cursor.execute(
'INSERT INTO second_table() VALUES(v_id1)',
(v_id1))
cursor.execute(
'SELECT id FROM second_table ORDER BY id DESC limit 1')
row = cursor.fetchone()
v_id2 = row[0]
for el2 in el1.findall('EL2'):
cursor.execute(
'INSERT INTO third_table(v_id2) VALUES()',
(v_id2))
cursor.execute(
'SELECT id FROM third_table ORDER BY id DESC limit 1')
row = cursor.fetchone()
v_id3 = row[0]
for el3 in el2.findall('EL3'):
cursor.execute(
'INSERT INTO fourth_table(v_id3) VALUES()',
(v_id3))
cursor.execute(
'SELECT id FROM fourth_table ORDER BY id DESC limit 1')
row = cursor.fetchone()
v_id4 = row[0]
for el4 in el3.findall('EL4'):
cursor.execute(
'INSERT INTO fifth_table(v_id4) VALUES()',
(v_id4))
for el5 in el4.findall('EL5'):
cursor.execute(
'INSERT INTO sixth_table(v_id4) VALUES()',
(v_id4))
cursor.execute(
'SELECT id FROM sixth_table ORDER BY id DESC limit 1')
row = cursor.fetchone()
v_id5 = row[0]
...
conn.commit()
Basically I get values from attributes and send them into the database. When I need to process nested elements, I have to SELECT last inserted ID from the database and INSERT it as a foreign key into the next INSERT statement.
The whole process takes about 50s but apparently it's too long for the data I have. The SELECT statements for sure take some time, but I already selecting only 1 attribute on last row.
I don't know if it can be faster since I'm not good at programming so I ask you guys.
You have 4 nested for loops. That's why. It is O(n^4).

Categories

Resources