This is the data I have in my data.txt file
{"setup": "test", "punchline": "ok", "numOfRatings": 0, "sumOfRatings": 0},
{"setup": "test2", "punchline": "ok2", "numOfRatings": 0, "sumOfRatings": 0}
How would I be able to get only data from every setup in the
dictionaries using a loop?
Thanks
I'm not sure how you're getting the dictionaries into your text file in the first place, but if it's possible to drop the trailing commas, i.e.
{"setup": "test", "punchline": "ok", "numOfRatings": 0, "sumOfRatings": 0}
{"setup": "test2", "punchline": "ok2", "numOfRatings": 0, "sumOfRatings": 0}
Something like this may work for you:
def dicts_from_file(file):
dicts_from_file = []
with open(file,'r') as inf:
for line in inf:
dicts_from_file.append(eval(line))
return dicts_from_file
def get_setups(dicts):
setups = []
for dict in dicts:
for key in dict:
if key == "setup":
setups.append(dict[key])
return setups
print get_setups(dicts_from_file("data.txt"))
f = open('data')
for line in f:
d = ast.literal_eval(line)[0]
print d['setup']
for this code you need to put ',' after every line because ast.literal_eval(line) convert line into a tuple.
and if you do not have ',' after every dict then use this
f = open('data')
for line in f:
d = ast.literal_eval(line)
print d['setup']
You can try this if the line in your file is standard dict string.
def get_setup_from_file(file_name):
result = []
f = open(file_name, "r")
for line in f.xreadlines():
# or line_dict = json.loads(line)
line_dict = eval(line) # if line end witch ',', try eval(line[0:-1])
result.append(line_dict["setup"])
return result
Wish this can help you.
if it is standard dict string, try this:
with open(file,'r') as file_input:
for line in file_input:
print eval(line).get("setup")
Related
So far I have this code which is creating a dictionary from an input file:
def read_file(filename):
with open("menu1.csv") as file:
file.readline()
for line in file:
line_strip = [line.rstrip('\n')]
lines= [line.split(',')]
result = {key: (float(fl), int(intg),
text.strip()) for key,
fl, intg,text in lines}
print(result)
read_file("menu1.csv")
I have to keep that code in that def format. However, this outputs 27 different dictionaries. How do I make it so it is all in ONE dictionary?
ALso:
I want to alphabetize the keys and put them into a list. I tried something like this but it won't work:
def alphabetical_menu(dict):
names = []
for name in d:
names.append(name)
names.sort()
print(names)
What am I doing wrong? or do you have a way to do it?
Is this what you wanted?
def read_file(filename):
result = {}
with open(filename) as file:
file.readline()
for line in file:
line_strip = line.rstrip()
line_split= line.split(',')
key, fl, intg, text = tuple(line_split)
result[key] = (float(fl), int(intg), text.strip())
return result
def alphabetical_menu(d):
return sorted(d.keys())
menu_dict = read_file("menu1.csv")
menu_sorted_keys = alphabetical_menu(menu_dict)
# To check the result
print(menu_dict)
print(menu_sorted_keys)
I have a file with multiple dictionaries, one in each line.
They all have the same keys. I want to rename one key from 'id' to 'orderid' in all of them. What is the most efficient way to do so?
Sample data:
{'total_ex_tax': '11.0000', 'is_deleted': False, 'status_id': 5, 'id': 614534}
{'total_ex_tax': '100.0000', 'is_deleted': False, 'status_id': 5, 'id': 614535}
Code so far:
def popAndMergeDicts(dicts):
dictLine = ast.literal_eval(dicts)
tempDict = dictLine['billing_address']
del dictLine['billing_address']
for i in tempDict:
dictLine[i] = tempDict[i]
# insertOrdersInDatabase(dictLine)
rename_id(dictLine)
return dictLine
def rename_id(dictionary):
pass
def process_orders_file(filename):
lines = tuple(open(filename))
for line in lines[0:]:
popAndMergeDicts(line)
process_orders_file('allOrdersData')
This is trivial:
def rename_id(dictionary):
try:
dictionary['new_key'] = dictionary.pop('old_key')
except KeyError:
# 'old_key' is not present in dictionary.
if not dictionary.has_key('new_key'):
raise KeyError('This dictionary is missing "old_key": %s' %
dictionary)
If KeyError is raised, search your file for the error-causing dictionary and correct it as necessary.
Do you want to use the new dict directly, if not then:
with open("input.txt") as f:
for line in f:
print(line.strip().replace("id", "orderid"))
If you want to use it as dict, then you can try:
import ast
with open("input.txt") as f:
for line in f:
mydict = ast.literal_eval(line.strip())
mydict["ordeid"] = mydict.pop("id")
# use mydict
so, I have text file (a paragraph) and I need to read the file and create a dictionary containing each different word from the file as a key and the corresponding value for each key will be an integer showing the frequency of the word in the text file.
an example of what the dictionary should look like:
{'and':2, 'all':1, 'be':1, 'is':3} etc.
so far I have this,
def create_word_frequency_dictionary () :
filename = 'dictionary.txt'
infile = open(filename, 'r')
line = infile.readline()
my_dictionary = {}
frequency = 0
while line != '' :
row = line.lower()
word_list = row.split()
print(word_list)
print (word_list[0])
words = word_list[0]
my_dictionary[words] = frequency+1
line = infile.readline()
infile.close()
print (my_dictionary)
create_word_frequency_dictionary()
any help would be appreciated thanks.
Documentation defines collections module as "High-performance container datatypes". Consider using collections.Counter instead of re-inventing the wheel.
from collections import Counter
filename = 'dictionary.txt'
infile = open(filename, 'r')
text = str(infile.read())
print(Counter(text.split()))
Update:
Okay, I fixed your code and now it works, but Counter is still a better option:
def create_word_frequency_dictionary () :
filename = 'dictionary.txt'
infile = open(filename, 'r')
lines = infile.readlines()
my_dictionary = {}
for line in lines:
row = str(line.lower())
for word in row.split():
if word in my_dictionary:
my_dictionary[word] = my_dictionary[word] + 1
else:
my_dictionary[word] = 1
infile.close()
print (my_dictionary)
create_word_frequency_dictionary()
If you are not using version of python which has Counter:
>>> import collections
>>> words = ["a", "b", "a", "c"]
>>> word_frequency = collections.defaultdict(int)
>>> for w in words:
... word_frequency[w] += 1
...
>>> print word_frequency
defaultdict(<type 'int'>, {'a': 2, 'c': 1, 'b': 1})
Just replace my_dictionary[words] = frequency+1 with my_dictionary[words] = my_dictionary[words]+1.
I have a file with the following format.
>abc
qqqwqwqwewrrefee
eededededededded
dededededededd
>bcd
swswswswswswswws
wswswsddewewewew
wrwwewedsddfrrfe
>fgv
wewewewewewewewew
wewewewewewewxxee
wwewewewe
I was trying to create a dictionary with (>abc,>bcd,>fgv) as keys and the string below them as values. I could extract the keys but confused about updating the values. help me pls.
file2 = open("ref.txt",'r')
for line in file2.readlines():
if ">" in line:
print (line)
Not sure what you mean about "updating" the values, but try this:
mydict=[]
with open("ref.txt", "r") as file2:
current = None
for line in file2.readlines():
if line[0] == ">":
current = line[1:-1]
mydict[current] = ""
elif current:
mydict[current] += line # use line[:-1] if you don't want the '\n'
In [2]: mydict
Out[2]: {'abc': 'qqqwqwqwewrrefee\neededededededded\ndededededededd\n',
'bcd': 'swswswswswswswws\nwswswsddewewewew\nwrwewedsddfrrfe\n',
'fgv': 'wewewewewewewewew\nwewewewewewewxxee\nwwewewewe\n'}
When you get a line value with the '>' in it, save the line in a variable. When you read a line without the '>' in it, add it to a dictionary entry keyed by the previously saved variable.
key = None
dict = {}
for line in file2.readlines():
if ">" in line:
key = line
dict[key] = '' # Initialise dictionary entry
elif key is not None:
dict[key] += line # Append to dictionary entry
dictionary = {}
with open("file.txt","r") as r:
for line in r.readlines():
if ">" in line:
key = line[1:].strip()
dictionary[key] = ""
else:
dictionary[key] += line
print(dictionary)
d={}
key=''
file2 = open("ref.txt",'r')
for line in file2.readlines():
if line.startswith('>'):
key=line.strip()
d[key]=[]
continue
d[key].append(line.strip())
file.close()
I have not tested the above code, but it should work
So have an input file to script like as follows:
20248109|Generic|1|xxx|2|yyy|LINEA|68.66|68.67|True|2920958141272
.
.
.
21248109|Generic|3|xxx|4|www|LINEB|7618|7622|True|2920958281071.97
want the python script to iterate through and put LINEA into dictionary like as follows {{1:[68.66,68.67]},{3:[7618,7622]}}
here's as far as i've gotten:
Key = ["LINEA", "LINEB"]
fin = open(path)
test = []
for line in fin.readlines():
if True in [item in line for item in Key]:
test.append(line)
Any help at all would be fantastic.
First, you should use the csv module:
import csv
with open(path, "rb") as infile:
reader = csv.reader(infile, delimiter="|")
Then, you can iterate over the lines:
test = []
for row in reader:
if row[6] in Key:
test.append({int(row[2]): row[7:9]})
I would do this:
keys = ["LINEA", "LINEB"]
with open(path) as fin
answer = {line.partition("Generic|")[-1]:line for line in fin if any(key in line for key in keys)}
To edit your answer directly, you're actually quite close:
Key = ["LINEA", "LINEB"]
fin = open(path)
test = {} # dictionary
for line in fin.readlines():
if True in [item in line for item in Key]:
dict_key = line.partition("Generic|")[-1]
test[dict_key] = line