I have a dictionary called users and it saves the user data that he inputs in a textinput in kivy ...however it works fine but when i rerun the program the info is all gone it is not saved and i need to add the user again ..also it's an atm system so i edit in the values of that dictionary which means i can't save it to a file.
class Data:
users = {}
def add_user(self, email,
password,name,lastname,country,num,day,month,year,gender,balance,created):
if email not in self.users:
self.users[email] =
[password,name,lastname,country,num,day,month,year,gender,balance,created]
return 1
else:
print("Email exists!")
return -1
def get_user(self, email):
if email in self.users:
return self.users[email]
else:
return -1
def validate(self, email, password):
if self.get_user(email) != -1:
return self.users[email][0] == password
else:
return False
class Depositpage(Screen,Widget,Data):
def __init__(self, **kwargs):
super(Depositpage, self).__init__(**kwargs)
btn1 = Button(text='Add',size_hint=(0.08,0.06),pos_hint=
{'x':0.903,'top':0.599},color=(0,0,0,1),background_color=(0,0,0,0))
btn1.bind(on_release=lambda x: self.add())
self.txt1= TextInput(multiline=False,size_hint=(0.45,0.13),pos_hint=
{'x':0.27,'top':0.475},font_size=43)
#self.ballabel = Label(text="text",font_size=20,pos_hint=
{'x':-0.04,'top':1.27},color=(0,0,0,1))
self.add_widget(self.txt1)
self.add_widget(btn1)
#self.add_widget(self.ballabel)
def add(self):
result = int(self.users['mo#gmail.com'][9]) + int(self.txt1.text)
self.users['mo#gmail.com'][9] = result
print(f"add {self.users['mo#gmail.com'][9]}")
print(self.users['mo#gmail.com'][9])
A dictionary is not designed to store data persistently.
Though you can dump it to a JSON file, and then load it from there where you need it?
import json
with open('my_dict.json', 'w') as f:
json.dump(my_dict, f)
# elsewhere...
with open('my_dict.json') as f:
my_dict = json.load(f)
Loading from JSON is fairly efficient.
Another option would be to use pickle and marshal modules, but unlike JSON, the files it generates aren't human-readable, turns many Python data types into a stream of bytes and then recreate the objects from the bytes.
Data persistence in Python:
https://docs.python.org/3/library/persistence.html
The code below works. See how you can adopt it to your code.
import json
data = {'x':7}
# save the dict to disk
with open('data.json','w') as f:
f.write(json.dumps(data))
# read the json into a dict
with open('data.json','r') as f:
data_from_disk = json.loads(f.read())
print('data_from_disk: ' + str(data_from_disk))
output
data_from_disk: {'x': 7}
Related
I hope everyone's having a good day!
So I have this code that loads a text file, reads all the data, assigns each line to a different variable. I want to be able to change (for example) the current_user.config(text=User1) in FileRead function to current_user.config(text=User2) whenever I call the function NextAccount so I can sort of print each set of user and pass on screen (or do something with them).
Edit: Should've mentioned I'm a beginner so I'm probably not doing this the best way. My program is basically supposed to read around 30 combinations of user/pass and I want to display the first one first and then use a button to navigate through (Next account, previous account). I wanted to assign each to a different variable just because I want to use pyautogui to copy paste these combinations to a field in another program
from tkinter import *
from tkinter import filedialog as fd
file_path = ''
datalist = []
def OpenFile():
global file_path
file_path = fd.askopenfilename()
FileRead()
def FileRead():
data = open(file_path)
datalist = data.readlines()
User1 = datalist[0]
Pass1 = datalist[1]
User2 = datalist[2]
Pass2 = datalist[3]
User3 = datalist[4]
Pass3 = datalist[5]
#.....so on
current_user.config(text=User1) #<<<THESE TWO VALUES WHEN function NextAccount is called
current_pass.config(text=Pass1) #<<<
data.close()
def NextAccount():
#I want THIS func to be able to change the FileRead function...
window = Tk()
window.geometry('600x600')
window.config(bg='black')
file_button = Button(window,text='Select File', command=OpenFile)
file_button.pack()
current_user = Label(window)
current_user.pack()
current_pass = Label(window)
current_pass.pack()
next_acc_button = Button(window,command= NextAcc)
window.mainloop()
One way of accomplishing what you're after might be for NextAccount to pop the first user/password from the list. This is easier IMO if your OpenFile function gives you a list of [(user1, pass1), ...] rather than [user1, pass1, ...].
I might structure it something like this:
datalist = []
def FileRead(file_path: str) -> list[tuple[str, str]]:
"""Reads file_path, returns list of (user, passwd) tuples."""
with open(file_path) as data:
datalist = data.readlines()
return [
(user, passwd)
for user, passwd in zip(datalist[::2], datalist[1::2])
]
def OpenFile() -> None:
"""Asks user for a filename, read user/password data, and
add all data from the file into datalist."""
file_path = fd.askopenfilename()
datalist.extend(FileRead(file_path))
def NextAccount() -> None:
"""Print the current user/password and pop it from datalist."""
print(datalist.pop(0))
I'm not sure to understand well what are you asking for.
First of all, if you read a config file, maybe you should have a look on configparser, your code will be more readable as it is a json like way to get config.
If I understand well, you want to go through all the users you get with your config file and change which one you call ?
If yes, put your users into a list and create an interator on that list.
user1 = {"username": "user1", "password": "1234"}
user2 = {"username": "user2", "password": "4567"}
users = [user1, user2]
itr_users = iter(users)
then, when you call your function, just call itr_users.next() to get the next item of the users list and do your stuff. You should be able to access users informations this way
def next_item():
curr_user = next(itr_users)
curr_user["username"]
# First call
# > user1
# Second call
# > user2
In this scenario, I would rather try to:
Give the FileRead function a parameter that indicates which User and Pass to use, like:
def FileRead(n):
data = open(file_path)
datalist = data.readlines()
user_pass_list = [(datalist[i], datalist[i+1]) for i in range( ... )]
#.....so on
current_user.config(text=user_pass_list[n][0]) #<<<THESE TWO VALUES WHEN function NextAccount is called
current_pass.config(text=user_pass_list[n][1]) #<<<
data.close()
Or set a global variable that the FileRead function will use:
n_user_pass = 0
def FileRead():
data = open(file_path)
datalist = data.readlines()
user_pass_list = [(datalist[i], datalist[i+1]) for i in range( ... )]
#.....so on
current_user.config(text=user_pass_list[n][0]) #<<<THESE TWO VALUES WHEN function NextAccount is called
current_pass.config(text=user_pass_list[n][1]) #<<<
data.close()
def NextAccount():
global n_user_pass
n_user_pass = ...
I changed the way you stored your user and passes, to make it into a list [(user1, pass1), ... ] that you can access through indices
So my confusion comes from how to even begin a setup with MVC. I understand that the View is simply printing info to the user, however I do not understand how I can pass variables from class to class.
For example how can I pass the user input "option" and pass it to the controller/main class. Shown in the third image (mainclass pt 2) I have a basic menu and I need all the functionality done. However #2 is creating a new txt file to print the data from the csv file into it. However when I run the code the new txt file is not created.
Any info on how I should reformat this code to be a proper MVC architecture along with any help to solving why my txt file is not being created would be extremely helpful.
Controller class:
import pandas as pd
import os
import View
'''variables to declare basic values such as name, filepath'''
csv_filepath = "C:/Users/Liam/PycharmProjects/assignment1/pipeline-incidents-comprehensive-data.csv"
Fname = "Liam Arscott"
'''Data frame reads the csv file using pandas, ensure utf-8 encoding'''
try:
df = pd.read_csv(csv_filepath, encoding="utf-8")
except:
'''written exception for if the file does not load'''
print("file did not load or could not be located")
'''create list '''
C_list = []
for ind in df.index:
'''index the df and append to the list'''
col = (df['Incident Number'][ind], df['Incident Types'][ind], df['Reported Date'][ind],
df['Nearest Populated Centre'][ind], df['Province'][ind], df['Company'][ind],
df['Substance'][ind], df['Significant'][ind], df['What happened category'][ind])
C_list.append(col)
'''Create break statement once the for loop has gone over 100 times'''
if ind == 99:
break
View class:
import pandas as pd
'''declare csv_filepath for quick and easy use '''
csv_filepath = "C:/Users/Liam/PycharmProjects/assignment1/pipeline-incidents-comprehensive-data.csv"
'''File io '''
def read():
return pd.read_csv("pipeline-incidents-comprehensive-data.csv", encoding="utf-8")
def load(file):
file.to_csv("pipeline-incidents-comprehensive-data.csv", index=False)
class Menu:
print("MENU")
print(" 1. Reload the data from the dataset")
print(" 2. Write dataset to new csv file")
print(" 3. Display 1 or many records")
print(" 4. Create a new record ")
print(" 5. Edit an existing record")
print(" 6. Delete an existing record")
print(" 7. Display all records")
option = input("which menu option would you like?")
if option == 1:
file = read()
print("refreshing data")
load(file)
print("data has been refreshed")
if option == 2:
with open("pipeline-incidents-comprehensive-data.csv", "r") as ffile, open("secfile.txt", "a") as secfile:
for line in ffile:
secfile.write(line)
print(" dataset has been loaded to new file ")
Model class:
'''deal with current object using self and create class Columns to initialize attributes using the init method'''
class Columns:
def __init__(self, incNum, incType, rDate, NPC, Province, Company, Sub, Sig, wtHap):
self.incNum = incNum
self.incType = incType
self.rDate = rDate
self.NPC = NPC
self.Province = Province
self.Company = Company
self.Sub = Sub
self.Sig = Sig
self.wtHap = wtHap
'''convert attribute values to formatted string literals using str method'''
def __str__(self):
return {self.incNum}, {self.incType}, {self.rDate}, {self.NPC}, {self.Province}, {self.Company}, {
self.Sub}, {self.Sig}, {self.wtHap}
I want to use a .txt file to store API tokens for an application, but I got stuck trying to find a way to Replace the API key/token if it's found in the file. This is the code tried (Python 3.5):
data_to_save = {}
data_to_save['savetime'] = str(datetime.datetime.now())[:19]
data_to_save['api_key'] = key_submitted
data_to_save['user'] = uniqueid
api_exists = False
user_exists = False
with open("databases/api_keys.txt", 'r+') as f:
database = json.loads(f.read())
for i in database:
if i['api_key'] == key_submitted:
send_text_to_user(userid, "[b]Error: That API key is already in use.[/b]", "red")
api_exists = True
if i['user'] == uniqueid:
user_exists = True
if user_exists == True:
if api_exists = True:
send_text_to_user(userid, "[b]Error: Your API key was already saved at another time.[/b]", "red")
else:
f.write(json.dumps(data_to_save)) #Here, StackOverflow
send_text_to_user(userid, "[b]Okay, I replaced your API key.[/b]", "green")
f.close()
if user_exists == False:
writing = open("databases/api_keys.txt", 'a')
writing.write(json.dumps(data_to_save))
writing.close()
I also want to know if this is the best way to do it or the code could be optimized and how.
Thank you, it has been done. Final code:
data_to_save = {'savetime': str(datetime.datetime.now())[:19], 'api_key': key_submitted, 'user': uniqueid}
with open("databases/api_keys.txt", 'r') as f:
database = json.loads(f.read())
for i in database:
if i['user'] == uniqueid:
database.remove(i)
if i['api_key'] == key_submitted:
send_text_to_user(userid, "[b]Error: That API key is already in use.[/b]", "red")
api_exists = True
break
if not api_exists:
database.append(data_to_save)
f.write(json.dumps(database)
send_text_to_user(userid, "[b]Okay, your API key was succesfully stored.[/b]")
With this approach we don't even need to write different saves just in case the user exists or doesn't exists because it deletes it if it's found, so it never exists when the code runs and it just need to save a "new" record every single time, except if the API key already belongs to another user.
There are many issues with given code, so let's start from the beginning:
We don't need to create empty dict object to fill it on the next lines
data_to_save = {}
data_to_save['savetime'] = str(datetime.datetime.now())[:19]
data_to_save['api_key'] = key_submitted
data_to_save['user'] = uniqueid
when we can just create it filled like
data_to_save = {'savetime': str(datetime.datetime.now())[:19],
'api_key': key_submitted,
'user': uniqueid}
Assignments are not allowed in if statements (more at docs)
if api_exists = True:
so this line will cause SyntaxError (i guess this is a typo).
Checks like
if user_exists == True:
...
are redundant, we can just write
if user_exists:
...
and have the same effect.
We don't need to explicitly close file when use with statement, this is what context managers for: cleanup after exiting with statement block.
Your databases/api_key.txt file after first iteration will have invalid JSON object, because you are simply writing new serialized data_to_save object at the end of the file, while you should modify database object (which seems to be a list of dictionaries) and write serialized new version of it, so we don't need r+ mode as well.
Let's define utility function which saves new API key data like
def save_database(database,
api_keys_file_path="databases/api_keys.txt"):
with open(api_keys_file_path, 'w') as api_keys_file:
api_keys_file.write(json.dumps(database))
then we can have something like
data_to_save = {'savetime': str(datetime.datetime.now())[:19],
'api_key': key_submitted,
'user': uniqueid}
api_exists = False
user_exists = False
with open("databases/api_keys.txt", 'r') as api_keys_file:
database = json.loads(api_keys_file.read())
# database object should be iterable,
# containing dictionaries as elements,
# so only possible solution -- it is a list of dictionaries
for i in database:
if i['api_key'] == key_submitted:
send_text_to_user(userid, "[b]Error: That API key is already in use.[/b]", "red")
api_exists = True
if i['user'] == uniqueid:
user_exists = True
if user_exists:
if api_exists:
send_text_to_user(userid, "[b]Error: Your API key was already saved at another time.[/b]", "red")
else:
# looking for user record in database
user_record = next(record for record in database
if record['user'] == uniqueid)
# setting new API key
user_record['api_key'] = key_submitted
save_database(database)
send_text_to_user(userid, "[b]Okay, I replaced your API key.[/b]", "green")
if not user_exists:
database.append(data_to_save)
save_database(database)
Example
I've created directory databases with api_keys.txt file which contains single line
[]
because at the beginning we have no API keys.
Let's assume our missed objects are defined like
key_submitted = '699aa2c2f9fc41f880d6ec79a9d55f29'
uniqueid = 3
userid = 42
def send_text_to_user(userid, msg, color):
print(msg)
so with above code it gives me at first script execution empty output, and on the second one:
[b]Error: That API key is already in use.[/b]
[b]Error: Your API key was already saved at another time.[/b]
Further improvements
Should we break from for-loop if one of conditions (API key or user has been already registered in database) is satisfied?
Maybe it will be better to use already written database instead of reinventing the wheel? If you need JSON objects you should take a look at TinyDB.
I'm pulling commit data from the Gerrit API, and the commit number is in the 226,000 range. Where I have to make a request to an endpoint for each and every commit, this is understandable taking a long time. I was wondering how I could best implement threading into my current process.
I have two classes, a Project class, which drills down and retrieves all commits associated with it, and saves them out as a Commit object that contains all the information necessary to then loop through and get the json associated with it. I am pulling them all into a big list, and then iterating through to call the get_data and write_data methods.
class Project(object):
def __init__(self, name):
self.name = name
self.commits = []
def add_commits(self, changes_list):
for change in changes_list:
change_id=change['change_id'],
revision_list=change['revisions']
self.commits.extend([Commit(rid, change_id)
for rid in revision_list.keys()])
def return_results(self, ger_obj, start=0):
self.ger = ger_obj
while True:
endpoint = (r'/changes/?q=project:{project}&o=ALL_REVISIONS&'
r'S={num}'.format(
project=self.name,
num=start
))
logging.info('Endpoint: {}'.format(endpoint))
try:
changes = ger_obj.get(endpoint)
self.add_commits(changes_list=changes)
except HTTPError:
break
start += 500
try:
if not changes[-1].get('_more_changes'):
break
except IndexError:
break
class Commit(object):
def __init__(self, rev_id, change_id):
self.rev_id = rev_id
self.change_id = change_id
def get_data(self, ger_obj):
endpoint = (r'/changes/{c_id}/revisions/{r_id}/commit'.format(
c_id=self.change_id[0],
r_id=self.rev_id
))
try:
self.data = ger_obj.get(endpoint)
except HTTPError as e:
logging.warning('Endpoint: {} did not return data'.format(
endpoint
))
else:
self.data['commitid'] = self.data.get('commit')
self.data['name'] = self.data.get('committer')['name']
self.data['email'] = self.data.get('committer')['email']
self.data['date'] = self.data.get('committer')['date']
hash = md5()
hash.update(json.dumps(self.data).encode('utf-8'))
self.data['etl_checksum_md5'] = hash.hexdigest()
self.data['etl_process_status'] = ETL_PROCESS_STATUS
self.data['etl_datetime_local'] = ETL_DATETIME_LOCAL
self.data['etl_pdi_version'] = ETL_PDI_VERSION
self.data['etl_pdi_build_version'] = ETL_PDI_BUILD_VERSION
self.data['etl_pdi_hostname'] = ETL_PDI_HOSTNAME
self.data['etl_pdi_ipaddress'] = ETL_PDI_IPADDRESS
self.data['message'] = self.data['message'].replace('\n', ' ').replace('|', '[pipe]')
def write_data(self, writer):
writer.writerow(self.data)
I'm thinking that the best place to implement the threads is once I have all the commits in a list and am ready to iterate over them:
projects = [Project(value['id']) for value in project_data.values()]
for project in projects[:10]:
if project.name in bad_names.keys():
project.name = bad_names[project.name]
project.return_results(rest)
all_commits.extend(project.commits)
fieldnames = get_fieldnames(
'ods_gerrit.staging_gerrit_commits',
REDSHIFT_POSTGRES_INFO)
with open('testfile.csv', 'wb') as outf:
writer = DictWriter(
outf,
fieldnames=fieldnames,
extrasaction='ignore',
delimiter='|'
)
# Implement Threading?
for commit in all_commits:
commit.get_data(rest)
try:
commit.write_data(writer=writer)
except AttributeError:
continue
except Exception:
print commit.data, 'caused an exception.'
continue
I've read a few threading tutorials, and am unsure as to how to properly do this. I'm particularly worried about overwriting data due to improper locking.
I've saved a document in the blobstore, and am trying to retrieve it within a handler (which handles a task). I've had a read of the appengine documentation regarding the how to use blobstore, but am struggling to get it to work for my case. I've tried the following within the handler but cannot seem to have the object returned as the saved file (e.g. .pdf or .txt)
class SendDocuments(webapp2.RequestHandler):
def post(self):
document_key = self.request.get("document_key")
document_key = Key(str(document_key))
the_document = DocumentsModel.all().filter("__key__ =", document_key).get()
file_data = blobstore.BlobInfo.get(str(the_document.blobstore_key)) # returns a blobinfo object
file_data.open() # returns a blobreader object
file_data.open().read() # returns a string
I've also tried
class ServeSavedDocument(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, blob_key):
self.send_blob(blob_key, save_as=True)
return
class SendDocuments(webapp2.RequestHandler):
def post(self):
document_key = self.request.get("document_key")
document_key = Key(str(document_key))
the_document = DocumentsModel.all().filter("__key__ =", document_key).get()
grab_blob = ServeSavedDocument()
file_data = grab_blob.get(self, str(the_document.blobstore_key))
But the call to ServeSavedDocument fails with
'NoneType' object has no attribute 'headers'
I've had a look at the files api but the only example that's not saving the file simply seems to return the blob key i.e.
blob_key = files.blobstore.get_blob_key(file_name)
What is the best way to grab a saved file in the blobstore from within a handler?
EDIT 1:
I'm trying to retrieve the txt file or pdf file from the blobstore in a format / state that can be encoded as a file in a post request using the following code
from google.appengine.api import urlfetch
from poster.encode import multipart_encode
# assuming here that file_data is the file object
payload = {}
payload['user_id'] = '1234123412341234'
payload['test_file'] = MultipartParam('test_file', filename=file_data.filename,
filetype=file_data.type,
fileobj=file_data.file)
data,headers= multipart_encode(payload)
send_url = "http://127.0.0.0/"
t = urlfetch.fetch(url=send_url, payload="".join(data), method=urlfetch.POST, headers=headers)
Okay, after a lot of messing around I"ve found that this works! The key was to simply call open() on the blobinfo object.
class SendDocuments(webapp2.RequestHandler):
def post(self):
document_key = self.request.get("document_key")
document_key = Key(str(document_key))
the_document = DocumentsModel.all().filter("__key__ =", document_key).get()
file_data = blobstore.BlobInfo.get(str(the_document.blobstore_key))
payload = {}
payload['user_id'] = '1234123412341234'
payload['test_file'] = MultipartParam('the_file', filename="something",
filetype=file_data.content_type,
fileobj=file_data.open())
Have a look at the BlobReader class, I think that's what you are looking for:
https://developers.google.com/appengine/docs/python/blobstore/blobreaderclass
It allows file-like read access to blobstore data:
# blob_key = ..
# Instantiate a BlobReader for a given Blobstore value.
blob_reader = blobstore.BlobReader(blob_key)
# Read the entire value into memory. This may take a while depending
# on the size of the value and the size of the read buffer, and is not
# recommended for large values.
value = blob_reader.read()
Try as this,specify the file name and it's type in the send_blob() as :
def mime_type(filename):
return guess_type(filename)[0]
class ServeSavedDocument(blobstore_handlers.BlobstoreDownloadHandler):
def get(self):
blob_key = self.request.get("blob_key")
if blob_key:
blob_info = blobstore.get(blob_key)
if blob_info:
save_as1 = blob_info.filename
type1=mime_type(blob_info.filename)
self.send_blob(blob_info,content_type=type1,save_as=save_as1)