list = [[159.2213, 222.2223, 101.2122]
[359.2222, 22.2210, 301.2144]]
if list[1][0] < list[0][0]:
avg = (list[1][0] + list[0][0] - 200)/2
else:
avg = (list[1][0] + list[0][0] + 200)/2
Hello! I want to do this for every column and output the results in another list.
Fix
You may loop iterate the number of cols there is
values = [[159.2213, 222.2223, 101.2122], [359.2222, 22.2210, 301.2144]]
avgs = []
for idx_col in range(len(values[0])):
if values[1][idx_col] < values[0][idx_col]:
avg = (values[1][idx_col] + values[0][idx_col] - 200) / 2
else:
avg = (values[1][idx_col] + values[0][idx_col] + 200) / 2
avgs.append(avg)
Simplify
You can use zip to iterate on both rows at a time, and simplify the if/else condition
avgs = []
for first_row, second_row in zip(*values):
factor = -1 if second_row < first_row else 1
avgs.append((first_row + second_row + (200 * factor)) / 2)
Best with numpy
Easy syntax and best performance
import numpy as np
values = np.array(values)
res = values.sum(axis=0) / 2
res += np.where(values[1] < values[0], -100, 100)
A list comprehension would look like this:
avg = [(x + y + (200 if x <= y else -200)) / 2 for x, y in zip(*lst)]
Arguably easier if you use numpy:
arr = np.array(lst)
avg = 0.5 * (arr.sum(axis=0) + np.copysign(200, np.diff(arr, axis=0)))
lis = [[159.2213, 222.2223, 101.2122],
[359.2222, 22.2210, 301.2144]]
res = []
for i in range(len(lis[0])):
if lis[1][i] < lis[0][i]:
res.append((lis[1][i] + lis[0][i] - 200)/2)
else:
res.append((lis[1][i] + lis[0][i] + 200)/2)
This should work, however using numpy would be a better solution for these kind of problems.
You can do it like this:
list = [[159.2213, 222.2223, 101.2122]
[359.2222, 22.2210, 301.2144]]
results = []
for x,y in zip(list[0],list[1]):
if y < x:
avg = (y + x - 200)/2
else:
avg = (y + x + 200)/2
results.append(avg)
Related
Thank you very much everyone who helped answer. These all work as should and have been appendable. As no demonstrated below.
I am wanting to print working out of multiplication and addition.
import numpy as np
# [x, w] including bias
X = [[0.5, 0.15], [0.1, -0.3], [1, 0]]
in_str = 'in = '
for input in X:
substr = '('+str(input[0])+' x '+str(input[1])+') + '
in_str += substr
in_str = in_str[:-3]
print(in_str)
calcs = [x * y for x, y in X]
in_str = ' = '
for c in calcs:
substr = '('+str(c)+') + '
in_str += substr
in_str = in_str[:-3]
print(in_str)
ans = sum([x * y for x, y in X])
print(' = ' + str(ans))
Output:
in = (0.5 x 0.15) + (0.1 x -0.3) + (1 x 0)
= (0.075) + (-0.03) + (0)
= 0.045
Use list comprehension:
ans=sum([x*y for x,y in X])
If I understand your question correctly, You need value of product of elements in each sub list appended to a final list and sum of all elements in final list. Please find the below code. Traditional way
multiplier = 1
multiplier_list = []
final_sum = 0
for each_list in X:
for i in range(len(each_list)):
multiplier = multiplier * each_list[i]
#multiplier_list.append(multiplier) #If you need final list
final_sum = final_sum + multiplier
multiplier = 1
#print(multiplier_list)
print(final_sum)
X = [[0.5, 0.15], [0.1, -0.3], [1, 0]] # INPUT HERE
print(f"in X = {X}")
output =""
ans = 0
for input in X:
mul=0
mul = input[0]*input[1]
print(f"{input[0]} x {input[1]} = {mul}")
ans+=mul
output = output+f"{mul} + "
output = output[:-2]
output = output+f" = {ans}"
print(output)
I'm fairly new to this but will try and be as clear as possible.
Essentially I have 5 different lists of lists. 4 are imported from txt files and the 5th is a merger of the 4. Each inner list contains a value at index position 3. My objective is to maximize the sum by picking appropriately.
I also have a couple constraints:
The sum of the values at index 6 position can't exceed 50000
I pick 2 items from set C, 3 from set W, 2 from set D, 1 from set G, and 1 from set U (the combined) and I can't pick the same item for each set. Ie. each pick in W has to be different.
My code is below. I'm having trouble in that the optimizer just spits out my initial list of picks. Looking at the data though, I know for sure there are better solutions. I read that the issue may be related to late binding but I'm not sure if that's right and if it is, not sure how to update to fix error either. Appreciate any help. Thanks!
Read: Scipy.optimize.minimize SLSQP with linear constraints fails
import numpy as np
from scipy.optimize import minimize
C = open('C.txt','r').read().splitlines()
W = open('W.txt','r').read().splitlines()
D = open('D.txt','r').read().splitlines()
G = open('G.txt','r').read().splitlines()
def splitdata(file):
for index,line in enumerate(file):
file[index] = line.split('\t')
return(file)
def objective(x, sign=-1.0):
x = list(map(int, x))
pos = 3
Cvalue = float(C[x[0]][pos]) + float(C[x[1]][pos])
Wvalue = float(W[x[2]][pos]) + float(W[x[3]][pos]) + float(W[x[4]][pos])
Dvalue = float(D[x[5]][pos]) + float(D[x[6]][pos])
Gvalue = float(G[x[7]][pos])
Uvalue = float(U[x[8]][pos])
grand_value = sign*(Cvalue + Wvalue + Dvalue + Gvalue + Uvalue)
#print(grand_value)
return grand_value
def constraint_cost(x):
x = list(map(int, x))
pos = 6
Ccost = int(C[x[0]][pos]) + int(C[x[1]][pos])
Wcost = int(W[x[2]][pos]) + int(W[x[3]][pos]) + int(W[x[4]][pos])
Dcost = int(D[x[5]][pos]) + int(D[x[6]][pos])
Gcost = int(G[x[7]][pos])
Ucost = int(U[x[8]][pos])
grand_cost = 50000 - (Ccost + Wcost + Dcost + Gcost + Ucost)
#print(grand_cost)
return grand_cost
def constraint_C(x):
if x[0] == x[1]:
return 0
else:
return 1
def constraint_W(x):
if x[2] == x[3] or x[2] == x[4] or x[3] == x[4]:
return 0
else:
return 1
def constraint_D(x):
if x[5] == init[6]:
return 0
else:
return 1
con1 = {'type':'ineq','fun':constraint_cost}
con2 = {'type':'ineq','fun':constraint_C}
con3 = {'type':'ineq','fun':constraint_W}
con4 = {'type':'ineq','fun':constraint_D}
con = [con1, con2, con3, con4]
c0 = [0,1]
w0 = [0,1,2]
d0 = [0,1]
g0 = [0]
u0 = [0]
init = c0+w0+d0+g0+u0
C = splitdata(C)
W = splitdata(W)
D = splitdata(D)
G = splitdata(G)
U = C + W + D + G
sol = minimize(objective, init, method='SLSQP',constraints=con)
print(sol)
Given the following two lists:
dates = [1,2,3,4,5]
rates = [0.0154, 0.0169, 0.0179, 0.0187, 0.0194]
I would like to generate a list
df = []
of same lengths as dates and rates (0 to 4 = 5 elements) in 'pure' Python (without Numpy) as an exercise.
df[i] would be equal to:
df[0] = (1 / (1 + rates[0])
df[1] = (1 - df[0] * rates[1]) / (1 + rates[1])
...
df[4] = (1 - (df[0] + df[1]..+df[3])*rates[4]) / (1 + rates[4])
I was trying:
df = []
df.append(1 + rates[0]) #create df[0]
for date in enumerate(dates, start = 1):
running_sum_vec = 0
for i in enumerate(rates, start = 1):
running_sum_vec += df[i] * rates[i]
df[i] = (1 - running_sum_vec) / (1+ rates[i])
return df
but am getting as TypeError: list indices must be integers. Thank you.
So, the enumerate method return two values: index and value
>>> x = ['a', 'b', 'a']
>>> for y_count, y in enumerate(x):
... print('index: {}, value: {}'.format(y_count, y))
...
index: 0, value: a
index: 1, value: b
index: 2, value: a
It's because of for i in enumerate(rates, start = 1):. enumerate generates tuples of the index and the object in the list. You should do something like
for i, rate in enumerate(rates, start=1):
running_sum_vec += df[i] * rate
You'll need to fix the other loop (for date in enumerate...) as well.
You also need to move df[i] = (1 - running_sum_vec) / (1+ rates[i]) back into the loop (currently it will only set the last value) (and change it to append since currently it will try to set at an index out of bounds).
Not sure if this is what you want:
df = []
sum = 0
for ind, val in enumerate(dates):
df.append( (1 - (sum * rates[ind])) / (1 + rates[ind]) )
sum += df[ind]
Enumerate returns both index and entry.
So assuming the lists contain ints, your code can be:
df = []
df.append(1 + rates[0]) #create df[0]
for date in dates:
running_sum_vec = 0
for i, rate in enumerate(rates[1:], start = 1):
running_sum_vec += df[i] * rate
df[i] = (1 - running_sum_vec) / (1+ rate)
return df
Although I'm almost positive there's a way with list comprehension. I'll have to think about it for a bit.
For this question http://www.spoj.com/problems/ACPC10D/ on SPOJ, I wrote a python solution as below:
count = 1
while True:
no_rows = int(raw_input())
if no_rows == 0:
break
grid = [[None for x in range(3)] for y in range(2)]
input_arr = map(int, raw_input().split())
grid[0][0] = 10000000
grid[0][1] = input_arr[1]
grid[0][2] = input_arr[1] + input_arr[2]
r = 1
for i in range(0, no_rows-1):
input_arr = map(int, raw_input().split())
_r = r ^ 1
grid[r][0] = input_arr[0] + min(grid[_r][0], grid[_r][1])
grid[r][1] = input_arr[1] + min(min(grid[_r][0], grid[r][0]), min(grid[_r][1], grid[_r][2]))
grid[r][2] = input_arr[2] + min(min(grid[_r][1], grid[r][1]), grid[_r][2])
r = _r
print str(count) + ". " + str(grid[(no_rows -1) & 1][1])
count += 1
The above code exceeds time limit. However, when I change the line
grid[r][2] = input_arr[2] + min(min(grid[_r][1], grid[r][1]), grid[_r][2])
to
grid[r][2] = input_arr[2] + min(min(grid[_r][1], grid[_r][2]), grid[r][1])
the solution is accepted. If you notice the difference, the first line compares, grid[_r][1], grid[r][1] for minimum (i.e. the row number are different) and second line compares grid[_r][1], grid[_r][2] for minimum(i.e. the row number are same)
This is a consistent behaviour. I want to understand, how python is processing those two lines - so that one results in exceeding time limit, while other is fine.
I want to apply the following function to multiple instances of a, b, c but it seems I can't apply this function to a list. The goal is to compute a few inequalities and finally plug them into a new z = ax + bx equation in order to find the lowest or highest ordered pair.
This is a cleaner code that omits the use of lists:
xMin,yMin = 0,0
a,b,c = 2,-3,12
enter code here
def FindVar(object):
x = (-b*yMin)/a + c/a
y = (-a*xMin)/b + c/b
print '(', FindVar.x, ',', yMin, ')'
print '(', xMin, ',', FindVar.y, ')'
This is a longer code that uses lists a bit more sloppily:
xMin = 0
yMin = 0
def i1():
a,b,c = 2,-3,12
#Create and append l1
global l1
l1 = []
l1.extend((a,b,c))
#Find X,Y
y = (-a*xMin)/b + (c/b)
x = (-b*yMin)/a + c/a
#Add to list
pair = []
pair.append((xMin,y))
pair.append((x,yMin))
print '%sx + %sy = %s' % (a,b,c)
print 'RETURNS'
print pair[0], z1
print pair[1], z2
def i2():
a,b,c = 1,1,5
#Create and append l2
global l2
l2 = []
l2.extend((a,b,c))
#Find X,Y
y = (-a*xMin)/b + c/b
x = (-b*yMin)/a + c/a
#Add to list
pair = []
pair.append((xMin,y))
pair.append((x,yMin))
print '%sx + %sy = %s' % (a,b,c)
print 'RETURNS'
print pair[0], z1
print pair[1], z2
So with the second bit of code I end up with 4 list items, each of which should be applied to a final equation, z = ax + by where a and b are independent from other functions.
EDIT: The purpose is to take an equation like "z = 2x + 7y" and subject it to the rules:
2x - 3y ≤ 12,
x + y ≤ 5,
3x + 4y ≥ 24,
x ≥ 0,
y ≥ 0.
I take these equations and put them into a list so that a,b,c = [2,-3,12],[1,1,5],[3,4,24] (where a = 2,1,3, b = -3,1,4, and c = 12,5,24). Then I can find (x,y) according to each of the three instances and plug each of those ordered pairs into my initial "z = 2x + 7y". The point of all of this is to take sets of data and find which set is the most efficient.
z1 and z2 were used in a prior version of the code to apply the "z=2x+7y" to the first and second ordered pairs of the first equation.
EDIT 2:
This is the much cleaner code I came up with.
xMin = 0
yMin = 0
a = [10,11,1]
b = [7,-8,1]
c = [200,63,42]
def findxy(a,b,c):
#Finds x,y for ax+by=c
x = (-b*yMin)/a + c/a
y = (-a*xMin)/b + c/b
#The results, followed by the z function "z = 15x + 15y"
if x >= xMin:
print '(%s, %s)' % (x,yMin), 15 * x + 15 * yMin
if y >= yMin:
print '(%s, %s)' % (xMin,y), 15 * xMin + 15 * y
map(findxy,a,b,c)
Results in
(20, 0) 300
(0, 28) 420
(5, 0) 75
(42, 0) 630
(0, 42) 630
Thanks!
To apply a function to each object in a list you can use the built in function map.
The list you pass to map can consist of primitives, class instances, tuples or lists.