how do i dump a single sqlite3 table in python? - python
I would like to dump only one table but by the looks of it, there is no parameter for this.
I found this example of the dump but it is for all the tables in the DB:
# Convert file existing_db.db to SQL dump file dump.sql
import sqlite3, os
con = sqlite3.connect('existing_db.db')
with open('dump.sql', 'w') as f:
for line in con.iterdump():
f.write('%s\n' % line)
You can copy only the single table in an in memory db:
import sqlite3
def getTableDump(db_file, table_to_dump):
conn = sqlite3.connect(':memory:')
cu = conn.cursor()
cu.execute("attach database '" + db_file + "' as attached_db")
cu.execute("select sql from attached_db.sqlite_master "
"where type='table' and name='" + table_to_dump + "'")
sql_create_table = cu.fetchone()[0]
cu.execute(sql_create_table);
cu.execute("insert into " + table_to_dump +
" select * from attached_db." + table_to_dump)
conn.commit()
cu.execute("detach database attached_db")
return "\n".join(conn.iterdump())
TABLE_TO_DUMP = 'table_to_dump'
DB_FILE = 'db_file'
print getTableDump(DB_FILE, TABLE_TO_DUMP)
Pro:
Simplicity and reliability: you don't have to re-write any library method, and you are more assured that the code is compatible with future versions of the sqlite3 module.
Con:
You need to load the whole table in memory, which may or may not be a big deal depending on how big the table is, and how much memory is available.
Dump realization lies here http://coverage.livinglogic.de/Lib/sqlite3/dump.py.html (local path: PythonPath/Lib/sqlite3/dump.py)
You can modify it a little:
# Mimic the sqlite3 console shell's .dump command
# Author: Paul Kippes <kippesp#gmail.com>
def _iterdump(connection, table_name):
"""
Returns an iterator to the dump of the database in an SQL text format.
Used to produce an SQL dump of the database. Useful to save an in-memory
database for later restoration. This function should not be called
directly but instead called from the Connection method, iterdump().
"""
cu = connection.cursor()
table_name = table_name
yield('BEGIN TRANSACTION;')
# sqlite_master table contains the SQL CREATE statements for the database.
q = """
SELECT name, type, sql
FROM sqlite_master
WHERE sql NOT NULL AND
type == 'table' AND
name == :table_name
"""
schema_res = cu.execute(q, {'table_name': table_name})
for table_name, type, sql in schema_res.fetchall():
if table_name == 'sqlite_sequence':
yield('DELETE FROM sqlite_sequence;')
elif table_name == 'sqlite_stat1':
yield('ANALYZE sqlite_master;')
elif table_name.startswith('sqlite_'):
continue
else:
yield('%s;' % sql)
# Build the insert statement for each row of the current table
res = cu.execute("PRAGMA table_info('%s')" % table_name)
column_names = [str(table_info[1]) for table_info in res.fetchall()]
q = "SELECT 'INSERT INTO \"%(tbl_name)s\" VALUES("
q += ",".join(["'||quote(" + col + ")||'" for col in column_names])
q += ")' FROM '%(tbl_name)s'"
query_res = cu.execute(q % {'tbl_name': table_name})
for row in query_res:
yield("%s;" % row[0])
# Now when the type is 'index', 'trigger', or 'view'
#q = """
# SELECT name, type, sql
# FROM sqlite_master
# WHERE sql NOT NULL AND
# type IN ('index', 'trigger', 'view')
# """
#schema_res = cu.execute(q)
#for name, type, sql in schema_res.fetchall():
# yield('%s;' % sql)
yield('COMMIT;')
Now it accepts table name as second argument.
You can use it like this:
with open('dump.sql', 'w') as f:
for line in _iterdump(con, 'GTS_vehicle'):
f.write('%s\n' % line)
Will get something like:
BEGIN TRANSACTION;
CREATE TABLE "GTS_vehicle" ("id" integer NOT NULL PRIMARY KEY, "name" varchar(20) NOT NULL, "company_id" integer NULL, "license_plate" varchar(20) NULL, "icon" varchar(100) NOT NULL DEFAULT 'baseicon.png', "car_brand" varchar(30) NULL, "content_type_id" integer NULL, "modemID" varchar(100) NULL, "distance" integer NULL, "max_speed" integer NULL DEFAULT 100, "max_rpm" integer NULL DEFAULT 4000, "fuel_tank_volume" integer NULL DEFAULT 70, "max_battery_voltage" integer NULL, "creation_date" datetime NOT NULL, "last_RFID" text NULL);
INSERT INTO "GTS_vehicle" VALUES(1,'lan1_op1_car1',1,'03115','baseicon.png','UFP',16,'lan_op1_car1',NULL,100,4000,70,12,'2011-06-23 11:54:32.395000',NULL);
INSERT INTO "GTS_vehicle" VALUES(2,'lang_op1_car2',1,'03','baseicon.png','ыва',16,'lan_op1_car2',NULL,100,4000,70,12,'2011-06-23 11:55:02.372000',NULL);
INSERT INTO "GTS_vehicle" VALUES(3,'lang_sup_car1',1,'0000','baseicon.png','Fiat',16,'lan_sup_car1',NULL,100,4000,70,12,'2011-06-23 12:32:09.017000',NULL);
INSERT INTO "GTS_vehicle" VALUES(4,'lang_sup_car2',1,'123','baseicon.png','ЗАЗ',16,'lan_sup_car2',NULL,100,4000,70,12,'2011-06-23 12:31:38.108000',NULL);
INSERT INTO "GTS_vehicle" VALUES(9,'lang_op2_car1',1,'','baseicon.png','',16,'1233211234',NULL,100,4000,70,12,'2011-07-05 13:32:09.865000',NULL);
INSERT INTO "GTS_vehicle" VALUES(11,'Big RIder',1,'','baseicon.png','0311523',16,'111',NULL,100,4000,70,20,'2011-07-07 12:12:40.358000',NULL);
COMMIT;
By iterdump(), all information would be displayed like this:
INSERT INTO "name" VALUES(1, 'John')
INSERT INTO "name" VALUES(2, 'Jane')
INSERT INTO "phone" VALUES(1, '111000')
INSERT INTO "phone" VALUES(2, '111001')
An easy way is by filter certain keywords by string.startswith() method.
For example, the table name is 'phone':
# Convert file existing_db.db to SQL dump file dump.sql
import sqlite3, os
con = sqlite3.connect('existing_db.db')
with open('dump.sql', 'w') as f:
for line in con.iterdump():
if line.startswith('INSERT INTO "phone"'):
f.write('%s\n' % line)
Not very smart, but can fit your objective.
Related
Python: Post json to MySql Database
I'm a bit at a loss for how to just push my json data to MySql. I have the cursor. Do I need to create a table first? Is there a method to simply push my json data straight in? with open("trades.json") as f: trades_data = json.load(f) trades_data = reconfigure_data(trades_data) db = mysql.connector.connect( host="localhost", user=os.environ.get('MYSQL_DB_USER'), password=os.environ.get('MYSQL_DB_USER_PASS', database='trades') ) cursor = db.cursor()
I found the syntax that works: cursor = db.cursor() # create table on database sql = "CREATE TABLE IF NOT EXISTS `test` (`id` INT(11) UNSIGNED NOT NULL AUTO_INCREMENT, `objectid` VARCHAR(100), `type` VARCHAR(5), `ownerid` VARCHAR(100), `color` VARCHAR(15), `shape` VARCHAR(15), `size` VARCHAR(15), PRIMARY KEY (`id`) );" print(sql) cursor.execute(sql) with open("trades.json") as f: trades_data = json.load(f) for trade in trades_data: for right in trade['rights']: sql = f"INSERT INTO test (objectid, type, ownerid, color, shape, size) VALUES ('{trade['objectid']}', '{trade['type']}', '{trade['ownerid']}', '{right['color']}', '{right['shape']}', '{right['size']}' )" cursor.execute(sql) db.commit()
How to get columns name in mysqldb with a Python 2.7?
If I am using select * from query it is working well, but when I am trying to query the columns name too, it isnt working (maybe because I have got a column called "FROM" but that's why i used 'FROM!?) Here my code: connection = MySQLdb.connect(host='localhost', user='admin', passwd='', db='database1', use_unicode=True, charset="utf8") cursor = connection.cursor() query = """ select ACTUAL_TIME, 'FROM, ID union all select ACTUAL_TIME, FROM , ID from TEST into outfile '/tmp/test.csv' fields terminated by ';' enclosed by '"' lines terminated by '\n'; """ cursor.execute(query) connection.commit() cursor.close() I get this error message: raise errorvalue _mysql_exceptions.OperationalError: (1054, "Unknown column 'ACTUAL_TIME' in 'field list'") EDIT: SHOW CREATE TABLE TEST; | TEST | CREATE TABLE `TEST` ( `ACTUAL_TIME` varchar(100) DEFAULT NULL, `FROM` varchar(100) DEFAULT NULL, `STATUS` varchar(100) DEFAULT NULL, `ID` int(10) NOT NULL AUTO_INCREMENT, PRIMARY KEY (`ID`) ) ENGINE=InnoDB AUTO_INCREMENT=76287 DEFAULT CHARSET=utf8 |
try this : connection = MySQLdb.connect(host='localhost', user='admin', passwd='', db='database1', use_unicode=True, charset="utf8") cursor = connection.cursor() query = """ select 'ACTUAL_TIME', 'FROM', 'ID' -- add single quotes union all select `ACTUAL_TIME`, `FROM`, `ID` -- add here backtick in column names from TEST into outfile '/tmp/test.csv' fields terminated by ';' enclosed by '"' lines terminated by '\n'; """ cursor.execute(query) connection.commit() cursor.close() or else you can use this to get column names "SHOW columns" or : cursor.execute("SELECT * FROM table_name LIMIT 0") print cursor.description
columns = cursor.description result = [{columns[index][0]:column for index, column in enumerate(value)} for value in cursor.fetchall()] print(result)
No data inserted after successful SQLite statement within Python
Extension from previous question Attempting to insert SQL values into database after pulling from XML file, but none seem to be appearing in database after insert statement embedded in Python code. Without the SQL section included, the entries are printed as expected. I am not getting an error in my Python environment (Anaconda Navigator), so totally lost on how the queries were processed, but nothing was entered! I tried a basic select statement to display the table, but get an empty table back. Select Query %sql SELECT * FROM publication; Main Python code import sqlite3 con = sqlite3.connect("publications.db") cur = con.cursor() from xml.dom import minidom xmldoc = minidom.parse("test.xml") #loop through <pub> tags to find number of pubs to grab root = xmldoc.getElementsByTagName("root")[0] pubs = [a.firstChild.data for a in root.getElementsByTagName("pub")] num_pubs = len(pubs) count = 0 while(count < num_pubs): #get data from each <pub> tag temp_pub = root.getElementsByTagName("pub")[count] temp_ID = temp_pub.getElementsByTagName("ID")[0].firstChild.data temp_title = temp_pub.getElementsByTagName("title")[0].firstChild.data temp_year = temp_pub.getElementsByTagName("year")[0].firstChild.data temp_booktitle = temp_pub.getElementsByTagName("booktitle")[0].firstChild.data temp_pages = temp_pub.getElementsByTagName("pages")[0].firstChild.data temp_authors = temp_pub.getElementsByTagName("authors")[0] temp_author_array = [a.firstChild.data for a in temp_authors.getElementsByTagName("author")] num_authors = len(temp_author_array) count = count + 1 #process results into sqlite pub_params = (temp_ID, temp_title) cur.execute("INSERT INTO publication (id, ptitle) VALUES (?, ?)", pub_params) journal_params = (temp_booktitle, temp_pages, temp_year) cur.execute("INSERT INTO journal (jtitle, pages, year) VALUES (?, ?, ?)", journal_params) x = 0 while(x < num_authors): cur.execute("INSERT OR IGNORE INTO authors (name) VALUES (?)", (temp_author_array[x],)) x = x + 1 #display results print("\nEntry processed: ", count) print("------------------\nPublication ID: ", temp_ID) print("Publication Title: ", temp_title) print("Year: ", temp_year) print("Journal title: ", temp_booktitle) print("Pages: ", temp_pages) i = 0 print("Authors: ") while(i < num_authors): print("-",temp_author_array[i]) i = i + 1 print("\nNumber of entries processed: ", count) SQL queries %%sql DROP TABLE IF EXISTS publication; CREATE TABLE publication( id INT PRIMARY KEY NOT NULL, ptitle VARCHAR NOT NULL ); /* Author Entity set and writes_for relationship */ DROP TABLE IF EXISTS authors; CREATE TABLE authors( name VARCHAR(200) PRIMARY KEY NOT NULL, pub_id INT, pub_title VARCHAR(200), FOREIGN KEY(pub_id, pub_title) REFERENCES publication(id, ptitle) ); /* Journal Entity set and apart_of relationship */ DROP TABLE IF EXISTS journal; CREATE TABLE journal( jtitle VARCHAR(200) PRIMARY KEY NOT NULL, pages INT, year INT(4), pub_id INT, pub_title VARCHAR(200), FOREIGN KEY(pub_id, pub_title) REFERENCES publication(id, ptitle) ); /* Wrote relationship b/w journal & authors */ DROP TABLE IF EXISTS wrote; CREATE TABLE wrote( name VARCHAR(100) NOT NULL, jtitle VARCHAR(50) NOT NULL, PRIMARY KEY(name, jtitle), FOREIGN KEY(name) REFERENCES authors(name), FOREIGN KEY(jtitle) REFERENCES journal(jtitle) );
You need to call con.commit() in order to commit the data to the database. If you use the connection as a context manager (with con:), the connection will commit any changes you make (or roll them back if there is an error). Explicitly closing the connection is also a good practice.
It looks like you are forgetting to commit and close the connection. You need to call these two functions in order to properly close the connection and to save the work you have done to the database. conn.commit() conn.close()
Python request from MySQL JSON column returns a string instead of a float
My MySQL table: CREATE TABLE ref_data ( id BIGINT AUTO_INCREMENT NOT NULL, symbol VARCHAR(64) NOT NULL, metadata JSON NOT NULL, PRIMARY KEY (id) ); INSERT INTO ref_data (symbol, metadata) VALUES ('XYZ', '{"currency": "USD", "tick_size": 0.01}'); My Python script: import mysql.connector con = mysql.connector.connect(**config) cur = con.cursor() cur.execute( """ SELECT JSON_EXTRACT(metadata, '$.tick_size') FROM ref_data WHERE symbol = 'CL'; """) And the result is a unicode: cur.fetchall()[0][0] >> u'0.01' When I run the same query within MySQL Workbench I get a double. I know I could convert the string to a float, but the point of using a JSON was flexibility and not having to specify what each column is, etc. Thanks!
Python: How to fill/append data from python loop to SQL table?
I created a python script in Pycharm that works like a charm... But these days I'm sensing that I could have a problem with size of monthly .csv file and also I would like to analyze data using SQL over Python so I could automatize process and make charts, pies from that queries. So instead of exporting to csv I would like to append to SQL table. Here is a part of code that exports data to .csv: for content in driver.find_elements_by_class_name('companychatroom'): people = content.find_element_by_xpath('.//div[#class="theid"]').text if people != "Monthy" and people != "Python" : pass mybook = open(r'C:\Users\Director\PycharmProjects\MyProjects\Employees\archive' + datetime.now().strftime("%m_%y") + '.csv', 'a') mybook.write('%20s,%20s\n'%(people, datetime.now().strftime("%d/%m/%y %H:%M"))) mybook.close() ================== EDIT: ============== I tried over sqlite3 and these is what I could manage to write by now and it works... But how to append data into SQL table it always overwrite previous with INSERT TO and it shouldn't? import sqlite3 from datetime import datetime sqlite_file = (r"C:\Users\Director\PycharmProjects\MyProjects\Employees\MyDatabase.db") conn = sqlite3.connect(sqlite_file) cursor = conn.cursor() table_name = 'Archive' + datetime.now().strftime("%m_%y") sql = 'CREATE TABLE IF NOT EXISTS ' + table_name + '("first_name" varchar NOT NULL, "Currdat"date NOT NULL)' cursor.execute(sql) sql = 'INSERT INTO ' + table_name + '(first_name,Currdat) VALUES ("value1",CURRENT_TIMESTAMP);' cursor.execute(sql) sql = 'SELECT * FROM Archive06_16 ' for row in cursor.execute(sql): print(row) cursor.close() Thanks in advance,