I want to make a call to a function from the finally error detection module with a variable passed. But it shows error as:
TypeError: 'NoneType' object has no attribute '__getitem__'
My code is as follows:
from mysql.connector import MySQLConnection, Error
import MySQLdb
import sys
import time
import signal
#from python_mysql_dbconfig import read_db_config
#from MySQL import row
sys.setrecursionlimit(1500)
def query_with_fetchone():
try:
db = MySQLdb.connect(host="localhost", # your host, usually localhost
user="root", # your username
passwd="faheemmcfc", # your password
db="python") # name of the data base
cur=db.cursor()
cursor = db.cursor()
cursor.execute("SELECT * FROM down")
queue=0
data= cursor.fetchone()
lastid=data[0]
print(data[0])
def check(lastid):
print lastid
global last
last=lastid
while True:
cursor.execute("Select * from down where id>%s"%(last))
data = cursor.fetchone()
last=data[0]
print(data[0])
print data[1]
#signal.pause()
check(lastid)
except Error as e:
print(e)
finally:
print'last',last
time.sleep(30)
check(last)
if __name__ == '__main__':
query_with_fetchone()
Here my need is to infinitely run the while loop, so whenever a new entry is made in the database it can be retrieved.
When I call the check from finally, it shows error as:
/usr/bin/python2.7 /home/faheem/PycharmProjects/untitled1/test2.py 1 1
2 CCLEANER 3 FANCONTROL 4 CCLEANER 5 FANCONTROL 6 CCLEANER last 6 6
Traceback (most recent call last): File
"/home/faheem/PycharmProjects/untitled1/test2.py", line 55, in
<module>
query_with_fetchone() File "/home/faheem/PycharmProjects/untitled1/test2.py", line 45, in
query_with_fetchone
check(last) File "/home/faheem/PycharmProjects/untitled1/test2.py", line 34, in check
last=data[0] TypeError: 'NoneType' object has no attribute '__getitem__'
Process finished with exit code 1
Please show me how to correct it or provide another method to run the while loop infinitely without allowing it to go to the finally part.
Please feel free to correct the question, as I am not so familiar with stackoverflow.
Related
I am trying to use a sqlite3 database in python but I get the following error:
Traceback (most recent call last):
File "C:\Users\Angel\Desktop\Proyecto\Src_School_Admin\SchoolAdmin-Director.py", line 4, in <module>
from execute_files.Sqlitedb import FirstUseInfo
File "C:\Users\Angel\Desktop\Proyecto\Src_School_Admin\execute_files\Sqlitedb.py", line 17, in <module>
FirstUseInfo()
File "C:\Users\Angel\Desktop\Proyecto\Src_School_Admin\execute_files\Sqlitedb.py", line 12, in FirstUseInfo
s = cursor.execute("SELECT Use FROM MainData")
sqlite3.OperationalError: no such table: MainData
[Finished in 0.2s]
The FirstUseInfo function is located in a file in the following path:C:\Users\Angel\Desktop\Project\Src_School_Admin\execute_files together with the database
and the file that sends to call the FirstUseInfo function is inC:\Users\Angel\Desktop\Project\Src_School_Admin
but it does not work keeps marking the error
First.py
from PyQt5.QtWidgets import QMainWindow,QApplication
from PyQt5 import uic
from PyQt5 import QtCore
from execute_files.Sqlitedb import FirstUseInfo
class InitWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
#uic.loadUi("UIX/first.ui",self)
#self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
#self.LStatus.setText("Cargando...")
self.FirstUse()
FirstUseInfo()
def FirstUse(self):
pass
app = QApplication([])
iw = InitWindow()
iw.show()
app.exec_()
Sqlitedb.py
import sqlite3
from PyQt5.QtWidgets import QMessageBox
import os
def FirstUseInfo():
r = str(os.getcwd())
final = r.replace("\\","/")
result = None
d=final+"/InfoDB.db"
conexion = sqlite3.connect(d)
cursor = conexion.cursor()
s = cursor.execute("SELECT Use FROM MainData")
for i in s:
result = int(i[0])
return print(result)
conexion.close()
FirstUseInfo()
if I run FirstUseInfo () from Sqlitedb.py there is no problem but if I execute it from another side it throws the error.
I realized that for some reason another database is created in the directory C: \ Users \ Angel \ Desktop \ Project \ Src_School_Admin, which is where the file that sends to call theFirstUseInfo function is located
but if I execute it from another side it throws the error.
So your code
r = str(os.getcwd())
final = r.replace("\\","/")
result = None
d=final+"/InfoDB.db"
gives new filename every time you are running program from somewhere else.
When you run
conexion = sqlite3.connect(d)
SQLite doesn't see the database and it creates there.
You should make some variable where you will store real filename for your database file.
solve it by attaching this line of code
r = os.path.dirname (__ file __)
source = r.replace ('\\'," / ") +" / InfoDB.db "
as I understand this line returns the absolute value of the location of the file
How can I solve it...
Exception in Tkinter callback
Traceback (most recent call last):
File "C:\Users\Mani\AppData\Local\Programs\Python\Python36-32\lib\tkinter\__init__.py", line 1699, in __call__
return self.func(*args)
File "F:\Monu\Work\python\PROJECT\New folder\LMS.py", line 262, in onDoubalclick
cursor.execute("SELECT * FROM `TRANSECTION` WHERE Book_Id=?",(val1.get(),))
AttributeError: 'str' object has no attribute 'get'
I already convert it in string or integer but not working
def onDoubalclick(event):
test=treeview.item(treeview.selection())
print(test)
items=treeview.selection()[0]
val1=str(treeview.item(items)['values'][0])
print(type(val1))
popsearch()
DataBase()
cursor.execute("SELECT * FROM `TRANSECTION` WHERE Book_Id=?",(val1.get(),))
info=cursor.fetchall()
for ROW1 in info:
print(rows)
treeview2.insert("",END,value=ROW1)
I want to get a value that stores in val1 and search that value in the database
The error message is correct Strings do not have a get attribute.
This is the easiest way to prevent this error from crashing your program. I just removed the get() function/method call from the val1 variable.
def onDoubalclick(event):
test=treeview.item(treeview.selection())
print(test)
items=treeview.selection()[0]
val1=str(treeview.item(items)['values'][0])
print(type(val1))
popsearch()
DataBase()
cursor.execute("SELECT * FROM `TRANSECTION` WHERE Book_Id=?",(val1,))
info=cursor.fetchall()
for ROW1 in info:
print(rows)
treeview2.insert("",END,value=ROW1)
Another option is to not fix the error but surround the bug in a try/except block to prevent the program from crashing.
So as an example you could do the following:
#more code above left off to keep things simple
try:
cursor.execute("SELECT * FROM `TRANSECTION` WHERE Book_Id=?", (val1.get(),))
info=cursor.fetchall()
#the rest of your code
except Exception as e:
print "This exception was thrown: %s" % str(e)
#the rest of your code
print "Blah. Print stuff here. Print variable state. Print time."
I am very new to python and I just can't seem to find an answer to this error. When I run the code below I get the error
AttributeError: module 'odbc' has no attribute 'connect'
However, the error only shows in eclipse. There's no problem if I run it via command line. I am running python 3.5. What am I doing wrong?
try:
import pyodbc
except ImportError:
import odbc as pyodbc
# Specifying the ODBC driver, server name, database, etc. directly
cnxn = pyodbc.connect('DRIVER={SQL Server};SERVER=PXLstr,17;DATABASE=Dept_MR;UID=guest;PWD=password')
The suggestion to remove the try...except block did not work for me. Now the actual import is throwing the error as below:
Traceback (most recent call last):
File "C:\Users\a\workspace\TestPyProject\src\helloworld.py", line 2, in <module>
import pyodbc
File "C:\Users\a\AppData\Local\Continuum\Anaconda3\Lib\site-packages\sqlalchemy\dialects\mssql\pyodbc.py", line 105, in <module>
from .base import MSExecutionContext, MSDialect, VARBINARY
I do have pyodbc installed and the import and connect works fine with the command line on windows.
thank you
The problem here is that the pyodbc module is not importing in your try / except block. I would highly recommend not putting import statements in try blocks. First, you would want to make sure you have pyodbc installed (pip install pyodbc), preferably in a virtualenv, then you can do something like this:
import pyodbc
cnxn = pyodbc.connect('DRIVER={SQL Server};SERVER=PXLstr,17;DATABASE=Dept_MR;UID=guest;PWD=password')
cursor = cnxn.cursor()
cursor.execute('SELECT 1')
for row in cursor.fetchall():
print(row)
If you're running on Windows (it appears so, given the DRIVER= parameter), take a look at virtualenvwrapper-win for managing Windows Python virtual environments: https://pypi.python.org/pypi/virtualenvwrapper-win
Good luck!
Flipper's answer helped to establish that the problem was with referencing an incorrect library in External Libraries list in eclipse. After fixing it, the issue was resolved.
What is the name of your python file? If you inadvertently name it as 'pyodbc.py', you got that error. Because it tries to import itself instead of the intended pyodbc module.
here is the solution!
simply install and use 'pypyodbc' instead of 'pyodbc'!
I have my tested example as below. change your data for SERVER_NAME and DATA_NAME and DRIVER. also put your own records.good luck!
import sys
import pypyodbc as odbc
records = [
['x', 'Movie', '2020-01-09', 2020],
['y', 'TV Show', None, 2019]
]
DRIVER = 'ODBC Driver 11 for SQL Server'
SERVER_NAME = '(LocalDB)\MSSQLLocalDB'
DATABASE_NAME = 'D:\ASPNET\SHOJA.IR\SHOJA.IR\APP_DATA\DATABASE3.MDF'
conn_string = f"""
Driver={{{DRIVER}}};
Server={SERVER_NAME};
Database={DATABASE_NAME};
Trust_Connection=yes;
"""
try:
conn = odbc.connect(conn_string)
except Exception as e:
print(e)
print('task is terminated')
sys.exit()
else:
cursor = conn.cursor()
insert_statement = """
INSERT INTO NetflixMovies
VALUES (?, ?, ?, ?)
"""
try:
for record in records:
print(record)
cursor.execute(insert_statement, record)
except Exception as e:
cursor.rollback()
print(e.value)
print('transaction rolled back')
else:
print('records inserted successfully')
cursor.commit()
cursor.close()
finally:
if conn.connected == 1:
print('connection closed')
conn.close()
I am trying to add a SQLAlchemyJobStore job store (and make it the default job store) and store some jobs on it. I am running mysql which has a database named jobstore.
I have the following program which tries to open a SQLAlchemyJobStore job store to the mysql db that is running:
# sqlalchemy.py
from sqlalchemy import *
from apscheduler.jobstores.sqlalchemy_store import SQLAlchemyJobStore
from apscheduler.scheduler import Scheduler
from datetime import datetime, timedelta
import time
def alarm(time):
print('Alarm! This alarm was scheduled at %s.' % time)
_aps_config = {'standalone': 'True'}
_dbURL = 'mysql://root:<root-password>#localhost/jobstore'
if __name__ == '__main__':
scheduler = Scheduler(_aps_config)
scheduler.add_jobstore(SQLAlchemyJobStore(url=_dbURL), 'default')
alarm_time = datetime.now() + timedelta(seconds=10)
scheduler.add_date_job(alarm, alarm_time, name='alarm1', args=[datetime.now()])
print 'alarms added: ', alarm_time
alarm_time = datetime.now() + timedelta(seconds=15)
scheduler.add_date_job(alarm, alarm_time, name='alarm2', args=[datetime.now()])
print 'alarms added: ', alarm_time
alarm_time = datetime.now() + timedelta(seconds=20)
scheduler.add_date_job(alarm, alarm_time, name='alarm3', args=[datetime.now()])
print 'alarms added: ', alarm_time
try:
scheduler.start()
except (KeyboardInterrupt, SystemExit):
scheduler.shutdown()
pass
When trying to run the above code I see the following:
NameError: global name 'create_engine' is not defined
$ python sqlalchemy.py
Traceback (most recent call last):
File "sqlalchemy.py", line 19, in <module>
scheduler.add_jobstore(SQLAlchemyJobStore(url=_dbURL), 'default')
File "/usr/lib/python2.7/site-packages/APScheduler-2.1.0-py2.7.egg/apscheduler/jobstores/sqlalchemy_store.py", line 29, in __init__
self.engine = create_engine(url)
NameError: global name 'create_engine' is not defined
$
I see "/usr/lib/python2.7/site-packages/APScheduler-2.1.0-py2.7.egg/apscheduler/jobstores/sqlalchemy_store.py", the __init__ is trying to create_engine and its failing.
20 class SQLAlchemyJobStore(JobStore):
21 def __init__(self, url=None, engine=None, tablename='apscheduler_jobs',
22 metadata=None, pickle_protocol=pickle.HIGHEST_PROTOCOL):
23 self.jobs = []
24 self.pickle_protocol = pickle_protocol
25
26 if engine:
27 self.engine = engine
28 elif url:
29 self.engine = create_engine(url)
What is going wrong here?! In other words, how do I create a SQLAlchemyJobStore using APScheduler and successfully store the jobs on them? Any example/code-snippet would be a great help!
There seem no problem in your code. I tried to run it and it successfully completes with the following output:
python jobstore.py
alarms added: 2013-02-07 10:31:10.234000
alarms added: 2013-02-07 10:31:15.240000
alarms added: 2013-02-07 10:31:20.240000
The only change I made was updating _dbURL = 'sqlite:///:memory:' to use sqlite engine.
Please check do you have sqlalchemy installed and it can be found in PYTHONPATH by your script.
Run the following code in python console or better add it at the beginning of your script and check the output.
import sqlalchemy
print sqlalchemy.__version__
UPDATE
I reread your post and realized that my test code had one more difference - I created file with another name: jobstore.py
I tried to rename file to sqlalchemy.py and got same exception:
Traceback (most recent call last):
File "C:/stackoverflow/so/sqlalchemy.py", line 22, in <module>
scheduler.add_jobstore(SQLAlchemyJobStore(url=_dbURL), 'default')
File "C:\Progs\Python27\lib\site-packages\apscheduler\jobstores\sqlalchemy_store.py", line 29, in __init__
self.engine = create_engine(url)
NameError: global name 'create_engine' is not defined
Process finished with exit code 1
Basically the problem is that your python script name has same name as sqlalchemy module name thus python loads your scripts first and cannot access sqlalchemy code.
Try to rename script name to something other then sqlalchemy.py - this would help if you have sqlalchemy module installed.
I am trying to do share a psycopg2 connection between multiple threads. As was mentioned in the docs, I am doing that by creating new cursor objects from the shared connection, whenever I use it in a new thread.
def delete(conn):
while True:
conn.commit()
def test(conn):
cur = conn.cursor()
thread.start_new_thread(delete,(conn,))
i = 1
while True:
cur.execute("INSERT INTO mas(taru,s) values (2,%s)",(i,))
print i
i = i +1
conn.commit()
After running, I get output like,
1
2
...
98
99
Traceback (most recent call last):
File "postgres_test_send.py", line 44, in <module>
cur.execute("INSERT INTO mas(taru,s) values (2,%s)",(i,))
psycopg2.InternalError: SET TRANSACTION ISOLATION LEVEL must be called before any query
What's going on here?
The bug is not in the most recent psycopg2 versions: it has probably been fixed in 2.4.2.