python create multiple json objects - python

I am trying to build a list in Python as below. I want to call this function multiple times and build a json array, when i try with json.dumps for p in range (0,10) it adds extra [] for each json object
def buildlist():
objects_list = []
d = collections.OrderedDict()
d['batteryLevel'] = random.randint(0, 100)
d['firmwareVersion'] = "2016-04-16-ENGG"
d['macId'] = MACprettyprint(randomMAC())
d['name'] = "".join([random.choice(string.digits+string.letters) for i in xrange(7)])
d['rssi'] = random.randint(0, 100) * -1
d['status'] = random.choice([OPEN, LOCKED])
objects_list.append(d)
return objects_list

I'm not exactly sure what your question is. But the function you posted will always return a list containing just a single OrderedDict. Why don't you just return the OrderedDict and build the list outside the function?
def builditem():
d = collections.OrderedDict()
d['batteryLevel'] = random.randint(0, 100)
...
d['status'] = random.choice([OPEN, LOCKED])
return d
json.dumps([builditem() for n in range(10)])

Related

While adding data to a dictionary that holds list as it's value, all my previous keys get updated with the latest value of the list

allWords = {}
groupIterate = []
for j in range(0,30):
group = columnsList[j]
groupIterate.clear()
for i in range(30):
word = maal[group][i]
url = f'https://www.vocabulary.com/dictionary/{word}'
groupIterate.append(url)
allWords[group] = groupIterate
All the keys in "allWords" get updated by the latest value of the list "groupIterate". However, I don't want to overwrite but simply add new keys with the respective "groupIterate" list in that iteration.
You're using only one groupIterate instance, that you set as value of each key, you may not clear it, but build a new one at each iteratinon
for j in range(0, 30):
group = columnsList[j]
groupIterate = []
for i in range(30):
word = maal[group][i]
url = f'https://www.vocabulary.com/dictionary/{word}'
groupIterate.append(url)
allWords[group] = groupIterate
Or use a list comprehension
for j in range(0, 30):
group = columnsList[j]
allWords[group] = [f'https://www.vocabulary.com/dictionary/{maal[group][i]}' for i in range(30)]

Creating a new list from the means of a list of a function

I've defined a function called CalcspeedAngle
def CalcspeedAngle(k):
xpos = [1,2,3,4,5,6,7,8,9]
ypos = [10,11,12,13,14,15,16,17,18]
turnrates = [entry/3.099 for entry in xpos][::k]
vts = [entry/3.099 for entry in ypos][::k]
return(turnrates,vts)
then I write
turnrates, vts = CalcspeedAngle(k)
My question is: How can I create two new lists (m1list and m2list) that contain the averages of the turnrates and vts with k = 1,2,3,4,5,6,7,8,9,10,15,20? In other words, how can I create:
m1list = [mean(flattened(turnrates(k=1))), mean(flattened(turnrates(k=2))), ...]
m2list = [mean(flattened(vts(k=1))), mean(vts(turnrates(k=2))), ...]
Any help would be greatly appreciated.
One way to simplify your code is to perform one iteration over range(10), unpack your 2 lists of lists, calculate the mean for each, and append them to your result lists:
def lists_of_means():
m1list = []
m2list = []
for i in range(10):
turnrates, vts = CalcspeedAngle(i)
m1list.append(np.mean(turnrates))
m2list.append(np.mean(vts))
return m1list, m2list

Problems with the zip function: lists that seem not iterable

I'm having some troubles trying to use four lists with the zip function.
In particular, I'm getting the following error at line 36:
TypeError: zip argument #3 must support iteration
I've already read that it happens with not iterable objects, but I'm using it on two lists! And if I try use the zip only on the first 2 lists it works perfectly: I have problems only with the last two.
Someone has ideas on how to solve that? Many thanks!
import numpy
#setting initial values
R = 330
C = 0.1
f_T = 1/(2*numpy.pi*R*C)
w_T = 2*numpy.pi*f_T
n = 10
T = 1
w = (2*numpy.pi)/T
t = numpy.linspace(-2, 2, 100)
#making the lists c_k, w_k, a_k, phi_k
c_karray = []
w_karray = []
A_karray = []
phi_karray = []
#populating the lists
for k in range(1, n, 2):
c_k = 2/(k*numpy.pi)
w_k = k*w
A_k = 1/(numpy.sqrt(1+(w_k)**2))
phi_k = numpy.arctan(-w_k)
c_karray.append(c_k)
w_karray.append(w_k)
A_karray.append(A_k)
phi_karray.append(phi_k)
#making the function w(t)
w = []
#doing the sum for each t and populate w(t)
for i in t:
w_i = ([(A_k*c_k*numpy.sin(w_k*i+phi_k)) for c_k, w_k, A_k, phi_k in zip(c_karray, w_karray, A_k, phi_k)])
w.append(sum(w_i)
Probably you mistyped the last 2 elements in zip. They should be A_karray and phi_karray, because phi_k and A_k are single values.
My result for w is:
[-0.11741034896740517,
-0.099189027720991918,
-0.073206290274556718,
...
-0.089754003567358978,
-0.10828235682188027,
-0.1174103489674052]
HTH,
Germán.
I believe you want zip(c_karray, w_karray, A_karray, phi_karray). Additionally, you should produce this once, not each iteration of the for the loop.
Furthermore, you are not really making use of numpy. Try this instead of your loops.
d = numpy.arange(1, n, 2)
c_karray = 2/(d*numpy.pi)
w_karray = d*w
A_karray = 1/(numpy.sqrt(1+(w_karray)**2))
phi_karray = numpy.arctan(-w_karray)
w = (A_karray*c_karray*numpy.sin(w_karray*t[:,None]+phi_karray)).sum(axis=-1)

Python: Concatenate similiar objects in List

I have a list containing strings as ['Country-Points'].
For example:
lst = ['Albania-10', 'Albania-5', 'Andorra-0', 'Andorra-4', 'Andorra-8', ...other countries...]
I want to calculate the average for each country without creating a new list. So the output would be (in the case above):
lst = ['Albania-7.5', 'Andorra-4.25', ...other countries...]
Would realy appreciate if anyone can help me with this.
EDIT:
this is what I've got so far. So, "data" is actually a dictionary, where the keys are countries and the values are list of other countries points' to this country (the one as Key). Again, I'm new at Python so I don't realy know all the built-in functions.
for key in self.data:
lst = []
index = 0
score = 0
cnt = 0
s = str(self.data[key][0]).split("-")[0]
for i in range(len(self.data[key])):
if s in self.data[key][i]:
a = str(self.data[key][i]).split("-")
score += int(float(a[1]))
cnt+=1
index+=1
if i+1 != len(self.data[key]) and not s in self.data[key][i+1]:
lst.append(s + "-" + str(float(score/cnt)))
s = str(self.data[key][index]).split("-")[0]
score = 0
self.data[key] = lst
itertools.groupby with a suitable key function can help:
import itertools
def get_country_name(item):
return item.split('-', 1)[0]
def get_country_value(item):
return float(item.split('-', 1)[1])
def country_avg_grouper(lst) :
for ctry, group in itertools.groupby(lst, key=get_country_name):
values = list(get_country_value(c) for c in group)
avg = sum(values)/len(values)
yield '{country}-{avg}'.format(country=ctry, avg=avg)
lst[:] = country_avg_grouper(lst)
The key here is that I wrote a function to do the change out of place and then I can easily make the substitution happen in place by using slice assignment.
I would probabkly do this with an intermediate dictionary.
def country(s):
return s.split('-')[0]
def value(s):
return float(s.split('-')[1])
def country_average(lst):
country_map = {}|
for point in lst:
c = country(pair)
v = value(pair)
old = country_map.get(c, (0, 0))
country_map[c] = (old[0]+v, old[1]+1)
return ['%s-%f' % (country, sum/count)
for (country, (sum, count)) in country_map.items()]
It tries hard to only traverse the original list only once, at the expense of quite a few tuple allocations.

Fast way to remove a few items from a list/queue

This is a follow up to a similar question which asked the best way to write
for item in somelist:
if determine(item):
code_to_remove_item
and it seems the consensus was on something like
somelist[:] = [x for x in somelist if not determine(x)]
However, I think if you are only removing a few items, most of the items are being copied into the same object, and perhaps that is slow. In an answer to another related question, someone suggests:
for item in reversed(somelist):
if determine(item):
somelist.remove(item)
However, here the list.remove will search for the item, which is O(N) in the length of the list. May be we are limited in that the list is represented as an array, rather than a linked list, so removing items will need to move everything after it. However, it is suggested here that collections.dequeue is represented as a doubly linked list. It should then be possible to remove in O(1) while iterating. How would we actually accomplish this?
Update:
I did some time testing as well, with the following code:
import timeit
setup = """
import random
random.seed(1)
b = [(random.random(),random.random()) for i in xrange(1000)]
c = []
def tokeep(x):
return (x[1]>.45) and (x[1]<.5)
"""
listcomp = """
c[:] = [x for x in b if tokeep(x)]
"""
filt = """
c = filter(tokeep, b)
"""
print "list comp = ", timeit.timeit(listcomp,setup, number = 10000)
print "filtering = ", timeit.timeit(filt,setup, number = 10000)
and got:
list comp = 4.01255393028
filtering = 3.59962391853
The list comprehension is the asymptotically optimal solution:
somelist = [x for x in somelist if not determine(x)]
It only makes one pass over the list, so runs in O(n) time. Since you need to call determine() on each object, any algorithm will require at least O(n) operations. The list comprehension does have to do some copying, but it's only copying references to the objects not copying the objects themselves.
Removing items from a list in Python is O(n), so anything with a remove, pop, or del inside the loop will be O(n**2).
Also, in CPython list comprehensions are faster than for loops.
If you need to remove item in O(1) you can use HashMaps
Since list.remove is equivalent to del list[list.index(x)], you could do:
for idx, item in enumerate(somelist):
if determine(item):
del somelist[idx]
But: you should not modify the list while iterating over it. It will bite you, sooner or later. Use filter or list comprehension first, and optimise later.
A deque is optimized for head and tail removal, not for arbitrary removal in the middle. The removal itself is fast, but you still have to traverse the list to the removal point. If you're iterating through the entire length, then the only difference between filtering a deque and filtering a list (using filter or a comprehension) is the overhead of copying, which at worst is a constant multiple; it's still a O(n) operation. Also, note that the objects in the list aren't being copied -- just the references to them. So it's not that much overhead.
It's possible that you could avoid copying like so, but I have no particular reason to believe this is faster than a straightforward list comprehension -- it's probably not:
write_i = 0
for read_i in range(len(L)):
L[write_i] = L[read_i]
if L[read_i] not in ['a', 'c']:
write_i += 1
del L[write_i:]
I took a stab at this. My solution is slower, but requires less memory overhead (i.e. doesn't create a new array). It might even be faster in some circumstances!
This code has been edited since its first posting
I had problems with timeit, I might be doing this wrong.
import timeit
setup = """
import random
random.seed(1)
global b
setup_b = [(random.random(), random.random()) for i in xrange(1000)]
c = []
def tokeep(x):
return (x[1]>.45) and (x[1]<.5)
# define and call to turn into psyco bytecode (if using psyco)
b = setup_b[:]
def listcomp():
c[:] = [x for x in b if tokeep(x)]
listcomp()
b = setup_b[:]
def filt():
c = filter(tokeep, b)
filt()
b = setup_b[:]
def forfilt():
marked = (i for i, x in enumerate(b) if tokeep(x))
shift = 0
for n in marked:
del b[n - shift]
shift += 1
forfilt()
b = setup_b[:]
def forfiltCheating():
marked = (i for i, x in enumerate(b) if (x[1] > .45) and (x[1] < .5))
shift = 0
for n in marked:
del b[n - shift]
shift += 1
forfiltCheating()
"""
listcomp = """
b = setup_b[:]
listcomp()
"""
filt = """
b = setup_b[:]
filt()
"""
forfilt = """
b = setup_b[:]
forfilt()
"""
forfiltCheating = '''
b = setup_b[:]
forfiltCheating()
'''
psycosetup = '''
import psyco
psyco.full()
'''
print "list comp = ", timeit.timeit(listcomp, setup, number = 10000)
print "filtering = ", timeit.timeit(filt, setup, number = 10000)
print 'forfilter = ', timeit.timeit(forfilt, setup, number = 10000)
print 'forfiltCheating = ', timeit.timeit(forfiltCheating, setup, number = 10000)
print '\nnow with psyco \n'
print "list comp = ", timeit.timeit(listcomp, psycosetup + setup, number = 10000)
print "filtering = ", timeit.timeit(filt, psycosetup + setup, number = 10000)
print 'forfilter = ', timeit.timeit(forfilt, psycosetup + setup, number = 10000)
print 'forfiltCheating = ', timeit.timeit(forfiltCheating, psycosetup + setup, number = 10000)
And here are the results
list comp = 6.56407690048
filtering = 5.64738512039
forfilter = 7.31555104256
forfiltCheating = 4.8994679451
now with psyco
list comp = 8.0485959053
filtering = 7.79016900063
forfilter = 9.00477004051
forfiltCheating = 4.90830993652
I must be doing something wrong with psyco, because it is actually running slower.
elements are not copied by list comprehension
this took me a while to figure out. See the example code below, to experiment yourself with different approaches
code
You can specify how long a list element takes to copy and how long it takes to evaluate. The time to copy is irrelevant for list comprehension, as it turned out.
import time
import timeit
import numpy as np
def ObjectFactory(time_eval, time_copy):
"""
Creates a class
Parameters
----------
time_eval : float
time to evaluate (True or False, i.e. keep in list or not) an object
time_copy : float
time to (shallow-) copy an object. Used by list comprehension.
Returns
-------
New class with defined copy-evaluate performance
"""
class Object:
def __init__(self, id_, keep):
self.id_ = id_
self._keep = keep
def __repr__(self):
return f"Object({self.id_}, {self.keep})"
#property
def keep(self):
time.sleep(time_eval)
return self._keep
def __copy__(self): # list comprehension does not copy the object
time.sleep(time_copy)
return self.__class__(self.id_, self._keep)
return Object
def remove_items_from_list_list_comprehension(lst):
return [el for el in lst if el.keep]
def remove_items_from_list_new_list(lst):
new_list = []
for el in lst:
if el.keep:
new_list += [el]
return new_list
def remove_items_from_list_new_list_by_ind(lst):
new_list_inds = []
for ee in range(len(lst)):
if lst[ee].keep:
new_list_inds += [ee]
return [lst[ee] for ee in new_list_inds]
def remove_items_from_list_del_elements(lst):
"""WARNING: Modifies lst"""
new_list_inds = []
for ee in range(len(lst)):
if lst[ee].keep:
new_list_inds += [ee]
for ind in new_list_inds[::-1]:
if not lst[ind].keep:
del lst[ind]
if __name__ == "__main__":
ClassSlowCopy = ObjectFactory(time_eval=0, time_copy=0.1)
ClassSlowEval = ObjectFactory(time_eval=1e-8, time_copy=0)
keep_ratio = .8
n_runs_timeit = int(1e2)
n_elements_list = int(1e2)
lsts_to_tests = dict(
list_slow_copy_remove_many = [ClassSlowCopy(ii, np.random.rand() > keep_ratio) for ii in range(n_elements_list)],
list_slow_copy_keep_many = [ClassSlowCopy(ii, np.random.rand() > keep_ratio) for ii in range(n_elements_list)],
list_slow_eval_remove_many = [ClassSlowEval(ii, np.random.rand() > keep_ratio) for ii in range(n_elements_list)],
list_slow_eval_keep_many = [ClassSlowEval(ii, np.random.rand() > keep_ratio) for ii in range(n_elements_list)],
)
for lbl, lst in lsts_to_tests.items():
print()
for fct in [
remove_items_from_list_list_comprehension,
remove_items_from_list_new_list,
remove_items_from_list_new_list_by_ind,
remove_items_from_list_del_elements,
]:
lst_loc = lst.copy()
t = timeit.timeit(lambda: fct(lst_loc), number=n_runs_timeit)
print(f"{fct.__name__}, {lbl}: {t=}")
output
remove_items_from_list_list_comprehension, list_slow_copy_remove_many: t=0.0064229519994114526
remove_items_from_list_new_list, list_slow_copy_remove_many: t=0.006507338999654166
remove_items_from_list_new_list_by_ind, list_slow_copy_remove_many: t=0.006562008995388169
remove_items_from_list_del_elements, list_slow_copy_remove_many: t=0.0076057760015828535
remove_items_from_list_list_comprehension, list_slow_copy_keep_many: t=0.006243691001145635
remove_items_from_list_new_list, list_slow_copy_keep_many: t=0.007145451003452763
remove_items_from_list_new_list_by_ind, list_slow_copy_keep_many: t=0.007032064997474663
remove_items_from_list_del_elements, list_slow_copy_keep_many: t=0.007690364996960852
remove_items_from_list_list_comprehension, list_slow_eval_remove_many: t=1.2495998149970546
remove_items_from_list_new_list, list_slow_eval_remove_many: t=1.1657221479981672
remove_items_from_list_new_list_by_ind, list_slow_eval_remove_many: t=1.2621939050004585
remove_items_from_list_del_elements, list_slow_eval_remove_many: t=1.4632593330024974
remove_items_from_list_list_comprehension, list_slow_eval_keep_many: t=1.1344162709938246
remove_items_from_list_new_list, list_slow_eval_keep_many: t=1.1323430630000075
remove_items_from_list_new_list_by_ind, list_slow_eval_keep_many: t=1.1354237199993804
remove_items_from_list_del_elements, list_slow_eval_keep_many: t=1.3084568729973398
import collections
list1=collections.deque(list1)
for i in list2:
try:
list1.remove(i)
except:
pass
INSTEAD OF CHECKING IF ELEMENT IS THERE. USING TRY EXCEPT.
I GUESS THIS FASTER

Categories

Resources