l1 = number_of_employees - no_shift + 1
l2 = range(l1)
l3 = range(l1-2)
l4 = []
print("---------l2----value",l2)
shift_time = (24/no_shift)
shift_time1 = (24/no_shift1 )
shift_timing = []
shift_timing1 = []
days = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday","Saturday","Sunday"]
weekday = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday"]
weekend = ["Saturday","Sunday"]
data= [[l3, [1], [2]],
[l3, [1], [2]],
[l2, [1], [2]],
[l2, [1], [2]],
[l2, [1], [2]]]
print(" ------data list-----", data)
data1 = [ [[1], [2]],
[[1], [2]]]
if number_of_employees < no_shift:
print ("Not possible to schedule with the given constraints")
exit()
for day in range(0, no_shift):
t = (start_time + shift_time)
if t > 24 :
t = (t-24)
a = (str(start_time) + '-' + str(t))
shift_timing.append(a)
if t < 24:
start_time = t
elif t >= 24:
start_time = 0
for day in range(0, no_shift1):
t = (start_time1 + shift_time1)
if t > 24 :
t = (t-24)
a = (str(start_time1) + '-' + str(t))
shift_timing1.append(a)
if t < 24:
start_time1 = t
elif t >= 24:
start_time1 = 0
employee_array = []
cfg.read('config.ini')
l = dict(cfg.items('staffData'))
employee_array = list(l.values())
orignal_employee_array = employee_array
print (json.dumps(employee_array))
flag = 1
if number_of_employees % 2 == 0:
employee_count_for_loop = (number_of_employees / 2 + 1)
elif number_of_employees % 2 != 0:
employee_count_for_loop = (number_of_employees + 1)
y = 0
z = 0
while flag < employee_count_for_loop:
print("------------------------------- WEEK", flag, '--------------------------------')
y = 0
for day in range(0, 7):
if day == 5 or day == 6:
z = 0
if number_of_employees < 5:
for shift in range(1, 2):
data1[y][z] = employee_array[shift + no_shift]
z += 1
else:
for shift in range(0, 2):
data1[y][z] = employee_array[shift + number_of_employees - 2]
z += 1
y+=1
elif day == 2 or day == 3 or day == 4:
for shift in range(0, no_shift):
p = shift
if shift == 0:
data[day][shift][shift] = employee_array[shift]
data[day][shift][shift] = json.dumps(data[day][shift][shift])
for shift_number in range(no_shift, number_of_employees ):
data[day][shift][p + 1] = employee_array[shift_number]
data[day][shift][p + 1] = json.dumps(data[day][shift][p + 1])
p = p + 1
else:
data[day][shift] = employee_array[shift]
else:
for shift in range(0, no_shift):
p = shift
if shift == 0:
data[day][shift][shift] = employee_array[shift]
data[day][shift][shift]= json.dumps(data[day][shift][shift])
for shift_number in range(no_shift, number_of_employees-2):
data[day][shift][p + 1] = employee_array[shift_number]
data[day][shift][p + 1] = json.dumps(data[day][shift][p + 1])
p = p + 1
else:
data[day][shift] = employee_array[shift]
print("------EmployeeArray---", employee_array)
employee_array = employee_array[-2:] + employee_array[:-2]
if employee_array == orignal_employee_array:
if no_shift == 2:
employees_array = employee_array[-1:] + employee_array[:-1]
orignal_employee_array = employee_array
employee_array = employee_array[-4:] + employee_array[:-4]
orignal_employee_array = employee_array
flag = flag + 1
print ("*****WEEKDAY*****")
midx = pd.MultiIndex.from_product([weekday, shift_timing])
test = pd.DataFrame(data, index=weekday ,columns=shift_timing)
test1 = tabulate(test, headers=shift_timing1, tablefmt='orgtbl')
l4.append(test)
print ("*****WEEKEND*****")
midx = pd.MultiIndex.from_product([weekend, shift_timing1])
test = pd.DataFrame(data1, index=weekend, columns=shift_timing1)
test2 = (tabulate(test,headers=shift_timing1,tablefmt='orgtbl'))
l4.append(test)
pd.concat(l4,ignore_index = False).to_csv('file1.csv')
I tried implementing it using pandas and csv writer
I want each dataFrame/table to have it own index not a common index not able to find the solution for this anywhere in the internet.
I even tried csv writer the way its getting printed in the csv file is not correct.
I got the answer by combining the functionality of both, csvwriter and pandas,
with the following line:
df.to_csv(path_or_buf=csv_file)
YOU can refer to this link for more information: https://www.journaldev.com/33511/pandas-to_csv-convert-dataframe-to-csv
Related
I have a certain number of matrix in a list and when I try to append a matrix to the NumPy array It only appends the specified row and when I try to edit the code to append the whole matrix It keep returning the following error:
Traceback (most recent call last):
File "so.py", line 129, in <module>
parents = selection(cal_fitness(t1, t2, t3, matlist, 300, arr), 2, matlist) #
File "so.py", line 124, in selection
parents[i, :] = population[max_fitness_idx[0][0]]
ValueError: could not broadcast input array from shape (3,5) into shape (5,)
The error related function:
def selection(fitness, num_parents, population):
fitness = list(fitness)
parents = numpy.empty((num_parents, len(population)))
for i in range(num_parents):
max_fitness_idx = numpy.where(fitness == numpy.max(fitness))
#parents[i, :] = population[max_fitness_idx[0][0]][2]
parents[i, :] = population[max_fitness_idx[0][0]]
fitness[max_fitness_idx[0][0]] = -999999
return parents.astype(int)
parents = selection(cal_fitness(t1, t2, t3, matlist, 300, arr), 2, matlist)
print(parents)
The full code:
import numpy
import random
import pandas
wsn = numpy.arange(1, 6)
taskn = 3
t1 = numpy.random.randint(30, 200, size=len(wsn))
t2 = numpy.random.randint(30, 200, size=len(wsn))
t3 = numpy.random.randint(30, 200, size=len(wsn))
# print('\nGenerated Data:\t\n\nNumber \t Task 1 \t Task 2 \t Task 3\n')
ni = min(len(t1), len(t2), len(t3))
# for i in range(ni):
# print('\t {0} \t {1} \t {2} \t\t {3}\n'.format(wsn[i], t1[i], t2[i], t3[i]))
# print('\n\n')
qmin = 50
qmax = 140
for i in range(len(t1)):
if t1[i] <= qmin or t1[i] >= qmax:
# t1=numpy.delete(t1,i)
t1[i] = 0
for i in range(len(t2)):
if t2[i] <= qmin or t2[i] >= qmax:
# t2=numpy.delete(t2,i)
t2[i] = 0
for i in range(len(t3)):
if t3[i] <= qmin or t3[i] >= qmax:
# t3=numpy.delete(t3,i)
t3[i] = 0
i = 0
m = max(len(t1), len(t2), len(t3))
if t1[i] == 0 and t2[i] == 0 and t3[i] == 0:
t1 = numpy.delete(t1, i)
t2 = numpy.delete(t2, i)
t3 = numpy.delete(t3, i)
i += 1
solperpop = len(wsn)
gen = 20
j = 0
pop_size = (taskn, solperpop)
# print('population size: {}'.format(pop_size))
# for j in range(ni):
# pop_size=list(solperpop,taskn)
matlist = list()
# print('\n\n')
i = 0
k = 0
nbrofindv = 5
arr = []
for i in range(nbrofindv):
init_pop = numpy.zeros(pop_size, dtype=int)
init_pop = init_pop.astype(int)
k = 0
l = 0
for k in range(taskn):
l = random.randrange(solperpop - 1)
init_pop[k][l] = 1
arr.append(l)
matlist.append(init_pop)
pandas.set_option('display.max_columns', None)
pandas.set_option('display.width', None)
zipped = pandas.DataFrame(list(zip(*matlist)))
# , columns=['Individual 1', 'Individual 2', 'Individual 3', 'Individual 4', 'Individual 5'])
print(zipped)
print('\n\n')
i = 0
for i in range(len(wsn)):
if t1[i] == 0:
if init_pop[0][i] != 0:
init_pop[0][i] == 0
if t2[i] == 0:
if init_pop[1][i] != 0:
init_pop[1][i] == 0
if t3[i] == 0:
if init_pop[2][i] != 0:
init_pop[2][i] == 0
def cal_fitness(task1, task2, task3, matix, mmax, array):
fitness = numpy.empty(len(matix))
S1 = numpy.empty(len(matix), dtype=int)
z = 0
for i in range(len(matix)):
S1[i] = task1[array[0 + z]] + task2[array[1 + z]] + task3[array[2 + z]]
z += 3
if S1[i] <= mmax:
fitness[i] = S1[i]
else:
fitness[i] = 0
return fitness.astype(int)
fitness = cal_fitness(t1, t2, t3, matlist, 300, arr)
def selection(fitness, num_parents, population):
fitness = list(fitness)
parents = numpy.empty((num_parents, len(population)))
for i in range(num_parents):
max_fitness_idx = numpy.where(fitness == numpy.max(fitness))
#parents[i, :] = population[max_fitness_idx[0][0]][2]
parents[i, :] = population[max_fitness_idx[0][0]]
fitness[max_fitness_idx[0][0]] = -999999
return parents.astype(int)
parents = selection(cal_fitness(t1, t2, t3, matlist, 300, arr), 2, matlist)
print(parents)
print('\n\n')
def crossover(parents, num_offsprings):
offsprings = numpy.empty((num_offsprings, parents.shape[1]))
crossover_point = int(parents.shape[1] / 2)
crossover_rate = 0.5
i = 0
while parents.shape[0] < num_offsprings:
parent1_index = i % parents.shape[0]
parent2_index = (i + 1) % parents.shape[0]
x = random.random()
if x > crossover_rate:
continue
parent1_index = i % parents.shape[0]
parent2_index = (i + 1) % parents.shape[0]
offsprings[i, 0:crossover_point] = parents[parent1_index, 0:crossover_point]
offsprings[i, crossover_point:] = parents[parent2_index, crossover_point:]
i += 1 # <== modified
return offsprings # <== modified
print(crossover(parents, 2)) # <== modified
This issue is related to dimensions that specified for parents, which is 2d now so must be modified to 3d. So, IIUC, you can achieve this aim by:
def selection(fitness, num_parents, population):
fitness = list(fitness)
# parents shape is modified (a new axis with the needed length is added)
parents = numpy.empty((num_parents, len(population[0]), len(population[0][0]))) # <==
for i in range(num_parents):
max_fitness_idx = numpy.where(fitness == numpy.max(fitness))
parents[i, :] = population[max_fitness_idx[0][0]]
fitness[max_fitness_idx[0][0]] = -999999
return parents.astype(int)
I am trying to create an indicator that will find all the divergences between 2 signals.
The output of the function so far looks like this
But the problem is that is painfully slow when I am trying to use it with long signals. Could any of you guys help me to make it faster if is possible?
My code:
def find_divergence(price: pd.Series, indicator: pd.Series, width_divergence: int, order: int):
div = pd.DataFrame(index=range(price.size), columns=[
f"Bullish_{width_divergence}_{order}",
f"Berish_{width_divergence}_{order}"
])
div[f'Bullish_idx_{width_divergence}_{order}'] = False
div[f'Berish_idx_{width_divergence}_{order}'] = False
def calc_argrelextrema(price_: np.numarray):
return argrelextrema(price_, np.less_equal, order=order)[0]
price_ranges = []
for i in range(len(price)):
price_ranges.append(price.values[0:i + 1])
f = []
with ThreadPoolExecutor(max_workers=16) as exe:
for i in price_ranges:
f.append(exe.submit(calc_argrelextrema, i))
prices_lows = SortedSet()
for r in concurrent.futures.as_completed(f):
data = r.result()
for d in reversed(data):
if d not in prices_lows:
prices_lows.add(d)
else:
break
price_lows_idx = pd.Series(prices_lows)
for idx_1 in range(price_lows_idx.size):
min_price = price[price_lows_idx[idx_1]]
min_indicator = indicator[price_lows_idx[idx_1]]
for idx_2 in range(idx_1 + 1, idx_1 + width_divergence):
if idx_2 >= price_lows_idx.size:
break
if price[price_lows_idx[idx_2]] < min_price:
min_price = price[price_lows_idx[idx_2]]
if indicator[price_lows_idx[idx_2]] < min_indicator:
min_indicator = indicator[price_lows_idx[idx_2]]
consistency_price_rd = min_price == price[price_lows_idx[idx_2]]
consistency_indicator_rd = min_indicator == indicator[price_lows_idx[idx_1]]
consistency_price_hd = min_price == price[price_lows_idx[idx_1]]
consistency_indicator_hd = min_indicator == indicator[price_lows_idx[idx_2]]
diff_price = price[price_lows_idx[idx_1]] - price[price_lows_idx[idx_2]] # should be neg
diff_indicator = indicator[price_lows_idx[idx_1]] - indicator[price_lows_idx[idx_2]] # should be pos
is_regular_divergence = diff_price > 0 and diff_indicator < 0
is_hidden_divergence = diff_price < 0 and diff_indicator > 0
if is_regular_divergence and consistency_price_rd and consistency_indicator_rd:
div.at[price_lows_idx[idx_2], f'Bullish_{width_divergence}_{order}'] = (price_lows_idx[idx_1], price_lows_idx[idx_2])
div.at[price_lows_idx[idx_2], f'Bullish_idx_{width_divergence}_{order}'] = True
elif is_hidden_divergence and consistency_price_hd and consistency_indicator_hd:
div.at[price_lows_idx[idx_2], f'Berish_{width_divergence}_{order}'] = (price_lows_idx[idx_1], price_lows_idx[idx_2])
div.at[price_lows_idx[idx_2], f'Berish_idx_{width_divergence}_{order}'] = True
return div
Im parsed list of crew witch one looks like:
20;mechanic;0;68
21;cook;0;43
22;scientist;0;79
23;manager;1;65
24;mechanic;1;41
etc
And now I'm trying to figure out how to count number of workers who have 60 or more stamina( the last element in each employee )
There is my code:
with open('employee.txt', 'r') as employee_list:
count = 0
for employee in employee_list.readlines():
employee_data = employee.rstrip().split(';')
if int(employee_data[3]) >= 60:
count += 1
print(count)
Print from terminal:
1
2
3
...
90
And there is the right answer I think, but is there anyway to get only one 'total' count, not a 90ty strings ?
Just print one line after the loop is done.
with open('employee.txt', 'r') as employee_list:
count = 0
for employee in employee_list.readlines():
employee_data = employee.rstrip().split(';')
if int(employee_data[3]) >= 60:
count += 1
print(count)
But I would also recommend using pandas for data manipulation. For example:
df = pd.read_csv('employee.txt', sep=';')
df.columns = ['col1', 'col2', 'col3', 'stamina']
Then just filter and get the size:
df[df.stamina >= 60].size
So after a day of thinking I wrote this and get right answer ( maybe someone will find this helpful):
def total_resist_count():
# with open('employee.txt', 'r') as employee_list:
employee_list = [input() for i in range(120)]
candidates = []
for employee in employee_list:
employee_data = employee.rstrip().split(';')
if int(employee_data[3]) >= 60:
candidates.append(employee_data)
return candidates
required_professionals = {
'computers specialist': 5,
'cook': 3,
'doctor': 5,
'electrical engineer': 4,
'manager': 1,
'mechanic': 8,
'scientist': 14
}
expedition_total = 40
female_min = 21
male_min = 12
def validate_solution(cur_team, num_females, num_males):
global expedition_total, female_min, male_min
if sum(cur_team) != expedition_total or num_females < female_min or num_males < male_min:
return False
num_of_free_vacancies = 0
for k in required_professionals:
num_of_free_vacancies += required_professionals[k]
if num_of_free_vacancies > 0:
return False
return True
TEAM = None
def backtrack(candidates, cur_team, num_females, num_males):
global required_professionals, expedition_total, TEAM
if sum(cur_team) > expedition_total or TEAM is not None:
return
if validate_solution(cur_team, num_females, num_males):
team = []
for i, used in enumerate(cur_team):
if used == 1:
team.append(candidates[i])
TEAM = team
return
for i in range(len(candidates)):
if cur_team[i] == 0 and required_professionals[candidates[i][1]] > 0:
cur_team[i] = 1
required_professionals[candidates[i][1]] -= 1
if candidates[i][2] == '1':
backtrack(candidates, cur_team, num_females, num_males + 1)
else:
backtrack(candidates, cur_team, num_females + 1, num_males)
required_professionals[candidates[i][1]] += 1
cur_team[i] = 0
if __name__ == '__main__':
ec = decode_fcc_message()
candidates = total_resist_count(ec)
cur_team = [0] * len(candidates)
backtrack(candidates, cur_team, 0, 0)
s = ""
for t in TEAM:
s += str(t[0]) + ';'
print(s)
import os
import sys
import math
import cvxopt as cvx
import picos as pic
import pandas as pd
import matplotlib.pyplot as plt
from gurobipy import *
from statsmodels.tsa.arima_model import ARIMA
import numpy as np
from scipy import *
#import DeferableLoad
OPTmodel = Model('OPTIMIZER')
#general parameters
Tamb =22
N = 1440 # maximum iteration
i = range(1, N)
COP= 3.4 # Coeffient of performance
'''
Prediction need to be added here
'''
# Datacenter room defintion
R = 10 #length of room
B = 7
H = 9 #Height of room
L = 10
dT = 60
A = 2*((L*B)+(B*H)+(H*L))
Thick = 0.33 # thickness of wall
k = 0.7 # thermal conductivity of wall
mAir = 1.2 * (L * B * H)
C = 718
landa = k * A / Thick
a0 = 0.05 / dT
a1 = 1
ki = math.exp(-(landa * 60) / (mAir * C)) # value that constant and its related to property of room
kc = (1 - ki) * a0
ko = (1 - ki) * a1
kp = (1 - ki) * (COP / landa)
Tmin= 18
Tmax= 27
Tamb= 22
PcoolingRated = 100
Pbess_rated = 30.462
Pbess_ratedN = -30.462
Ebess_min = 0
Ebess_max = 300
with open ('Pcooling.csv','r') as f:
Pcooling = []
for line in f:
Pcooling.append(line)
f.close()
with open ('ITpower.csv','r') as f1:
ITload = []
for line1 in f1:
ITload.append(line1)
f1.close()
with open ('DR.csv','r') as f2:
DR =[]
for line2 in f2:
DR.append(line2)
f2.close()
print ITload
print Pcooling
print DR
for i in range(1,200):
for it in range(1, 1440):
Tm = np.empty(1440)
Tm.fill(18)
TmA = np.empty(1440)
TmA.fill(27)
Phvac_flex = {}
Phvac_up = {}
Phvac_down_= {}
Phvac_up_ = {}
Pbess_out_ = {}
Pbess_in_ = {}
Phvac_down = {}
Pbess_flex_ = {}
Pbess_flex = {}
Phvac_flex_ = {}
Pbess_in = {}
Pdc = {}
Pdc_base = {}
Pflex_i = {}
Tdc_i = {}
Pbess_out ={}
Ebess_i = {}
Phvac_flex[i] = OPTmodel.addVar(ub=GRB.INFINITY,vtype=GRB.CONTINUOUS,name="PHVAC_flex"+str(i))
Phvac_up[i] = OPTmodel.addVar(ub=GRB.INFINITY,vtype=GRB.CONTINUOUS, name="PHVAC_up" + str(i))
Phvac_up_[i] = OPTmodel.addVar(ub=GRB.INFINITY,vtype=GRB.CONTINUOUS, name="PHVAC_up_" + str(i))
Phvac_down_[i] = OPTmodel.addVar(ub=GRB.INFINITY,vtype=GRB.CONTINUOUS, name="PHVAC_down_" + str(i))
Pbess_out_[i] = OPTmodel.addVar(ub=GRB.INFINITY,vtype=GRB.CONTINUOUS, name="PBESS_out_" + str(i))
Pbess_in_[i] = OPTmodel.addVar(ub=GRB.INFINITY,vtype=GRB.CONTINUOUS, name="PBESS_in_" + str(i))
Phvac_down[i] = OPTmodel.addVar(ub=GRB.INFINITY,vtype=GRB.CONTINUOUS, name="PHVAC_down" + str(i))
Pbess_flex_[i] = OPTmodel.addVar(ub=GRB.INFINITY,vtype=GRB.CONTINUOUS, name="PBESS_flex_" + str(i))
Pbess_flex[i] = OPTmodel.addVar(lb=-GRB.INFINITY,ub=GRB.INFINITY,vtype=GRB.CONTINUOUS, name="PBESS_flex" + str(i))
Phvac_flex_[i] = OPTmodel.addVar(ub=GRB.INFINITY,vtype=GRB.CONTINUOUS, name="PHVAC_flex_" + str(i))
Pbess_in[i] = OPTmodel.addVar(ub=GRB.INFINITY,vtype=GRB.CONTINUOUS, name="PBESS_in" + str(i))
Pdc[i] = OPTmodel.addVar(ub=GRB.INFINITY,vtype=GRB.CONTINUOUS, name="PDC" + str(i))
Pdc_base[i] = OPTmodel.addVar(ub=GRB.INFINITY,vtype=GRB.CONTINUOUS, name="PDC_base" + str(i))
Pflex_i[i]= OPTmodel.addVar(ub=GRB.INFINITY,vtype=GRB.CONTINUOUS, name="Pflex_i" + str(i))
Tdc_i[i]= OPTmodel.addVar(ub=GRB.INFINITY,vtype = GRB.CONTINUOUS, name = "Tdc_i" + str(i))
Pbess_out[i] = OPTmodel.addVar(lb=-GRB.INFINITY,ub=GRB.INFINITY,vtype=GRB.CONTINUOUS, name="PBESS_out" + str(i))
Ebess_i[i]= OPTmodel.addVar(ub=GRB.INFINITY,vtype=GRB.CONTINUOUS,name="Ebess_i" + str(i))
Pflex_i[1] = 0
Pflex_i[1] = 0
Tdc_i[0] = 18
Phvac_flex[1] = 0
# Phvac_flex_[1] = 0
Phvac_down[1] = 0
Phvac_up[1] = 0
Phvac_down_[1] = 0
Phvac_up_[1] = 0
# Phvac_down_pos[1] = 0
# Phvac_up_pos(1) = 0;
Pbess_flex[1] = 0
# Pbess_flex_[1] = 0
Pbess_out[1] = 0
Pbess_in[1] = 0
# Pbess_out_[1] = 0
Pbess_in_[1] = 0
# Pbess_out_pos[1] = -250
# Pbess_in_pos(1) = 250;
Ebess_i[1] = 150
OPTmodel.update()
'''
if float(DR[i]) > 0:
Phvac_down_[i] = 0
Phvac_up_[i] = float(DR[i])
Pbess_out_[i] = 0
Pbess_in_[i] = float(DR[i])
#Pbess_flex_[i] = Pbess_in_[i] + Pbess_out_[i]
#Phvac_flex_[i] = Phvac_down_[i] + Phvac_up_[i]
OPTmodel.update()
elif float(DR[i]) < 0:
Phvac_down_[i] = float(DR[i])
Phvac_up_[i] = 0
#Phvac_flex_[i] = Phvac_down_[i] + Phvac_up_[i]
Pbess_out_[i] = float(DR[i])
Pbess_in_[i] = 0
#Pbess_flex_[i] = Pbess_in_[i] + Pbess_out_[i]
OPTmodel.update()
else:
Phvac_down_[i] = 0
Phvac_up_[i] = 0
Phvac_flex_[i] = Phvac_down_[i] + Phvac_up_[i]
Pbess_out_[i] = 0
Pbess_in_[i] = 0
Pbess_flex_[i] = Pbess_in_[i] + Pbess_out_[i]
OPTmodel.update()
'''
#print Phvac_up.values()
#print Phvac_flex_[i]
print OPTmodel
OPTmodel.update()
ConHVAC1 = OPTmodel.addConstr(Phvac_flex[i] == Phvac_up[i] + Phvac_down[i], name='ConHVAC1')
ConHVAC2 = OPTmodel.addConstr(0 <= Phvac_flex[i] , name='ConHVAC2')
ConHVAC3 = OPTmodel.addConstr(Phvac_flex[i] <= PcoolingRated, name='ConHVAC3')
PH = pd.read_csv('Pcooling.csv')
PHVAC = PH.values
newList2 = map(lambda x: x / 1000, PHVAC)
p=[]
p=PcoolingRated-newList2[i]
#CONHVAC4 = OPTmodel.addConstr(Phvac_up[i]==np.minimum((Phvac_up_[i]),(float(newList2[i]))))
#Phvac_u(1:MaxIter) == min(Phvac_u_(1:MaxIter), (repelem(Phvac_max, MaxIter) - (Pcooling(1:MaxIter)'/1000)))
ConTemp1 = OPTmodel.addConstr(Tm[it] <= Tdc_i[i] <= TmA[it], name='ConTemp1')
ConBESS1 = OPTmodel.addConstr(Pbess_ratedN <= Pbess_flex[i] <= Pbess_rated, name='ConBESS1')
ConBESS2 = OPTmodel.addConstr(Pbess_flex[i] == Pbess_in[i] + Pbess_out[i], name='ConBESS2')
ConBESS3 = OPTmodel.addConstr(0 <= Pbess_in[i] <= min(Pbess_rated, Pbess_in_[i]), name='ConBESS3')
ConBESS4 = OPTmodel.addConstr(np.maximum(Pbess_ratedN,Pbess_out_[i]) <= Pbess_out[i]<=0 , name='ConBESS4') # need to modifty
ConEBESS1 = OPTmodel.addConstr(Ebess_min <= Ebess_i[i], name='ConEBESS1')
ConEBESS2 = OPTmodel.addConstr(Ebess_i[i] <= Ebess_max, name='ConEBESS2')
D = pd.read_csv('DR.csv').values
DRN = map(lambda x: x / 1000, D)
PDRN=map(lambda x: x / 4.8, DRN)
if float((PDRN[i])) > 0:
CON1 = OPTmodel.addConstr(Pbess_flex_[i] == Pbess_in_[i] + Pbess_out_[i],'CON1')
CON2 = OPTmodel.addConstr(Phvac_flex_[i] == Phvac_up_[i] + Phvac_down_[i],'CON2')
CON3=OPTmodel.addConstr(Phvac_down_[i] == 0, name='CON3')
CON4=OPTmodel.addConstr(Phvac_up_[i] == float((PDRN[i])),name='CON4')
CON5=OPTmodel.addConstr(Pbess_out_[i] == 0,name='CON5')
CON6=OPTmodel.addConstr(Pbess_in_[i] == float((PDRN[i])),name='CON6')
elif float(np.transpose(PDRN[i])) < 0:
CON7=OPTmodel.addConstr(Phvac_down_[i] == float(np.transpose(PDRN[i])),name='CON7')
CON8=OPTmodel.addConstr(Phvac_up_[i] == 0,name='CON8')
# Phvac_flex_[i] = Phvac_down_[i] + Phvac_up_[i]
CON9=OPTmodel.addConstr(Pbess_out_[i] == float((PDRN[i])),name='CON9')
CON10=OPTmodel.addConstr(Pbess_in_[i] == 0,name='CON10')
else:
CON11=OPTmodel.addConstr(Phvac_down_[i] == 0,name='CON11')
CON12=OPTmodel.addConstr(Phvac_up_[i] == 0,name='CON12')
CON13=OPTmodel.addConstr(Phvac_flex_[i] == Phvac_down_[i] + Phvac_up_[i],name='CON13')
CON14=OPTmodel.addConstr(Pbess_out_[i] == 0)
CON15=OPTmodel.addConstr(Pbess_in_[i] == 0,name='CON15')
CON16=OPTmodel.addConstr(Pbess_flex_[i] == Pbess_in_[i] + Pbess_out_[i],name='CON16')
OPTmodel.update()
ConPDC = OPTmodel.addConstr(Pdc[i] == Pflex_i[i] + float(ITload[i]), name='ConPDC')
# OPTmodel.addConstr(Tdc_i[i]==(ki*Tdc_i[i-1]+(ko*Tamb)))
#for x in Ebess_i:
#ConEBESS2 = OPTmodel.addConstr(Ebess_i[i] ==((Pbess_in[i] / 0.75) + (Pbess_out[i] * 0.75)))
cooling = np.array(pd.read_csv('Pcooling.csv'))
DRR = pd.read_csv('DR.csv')
DR = DRR.values
IT = pd.read_csv('ITpower.csv')
ITload = IT.values
newList = map(lambda x: x / 1000, ITload)
PH = pd.read_csv('Pcooling.csv')
PHVAC = PH.values
newList2 = map(lambda x: x / 1000, PHVAC)
#for y in Tdc_i:
T=pd.read_csv('TT.csv').values
OPTmodel.addConstr(Tdc_i[i]==((ki*float(T[i]))+(ko*Tamb)+(kc*float(newList[i]))-((kp*(float(newList2[i])))+(Phvac_flex[i]*3.14))))
print Tdc_i.values()
OPTmodel.addConstr(Pbess_out_[i]<=Phvac_flex[i] + Pbess_flex[i]<=Pbess_in_[i])
# Tdc_i[1:len(i)]==(Ki*Tdc_i[1:1438])+(Kc*array2[1:1438])+(Ko*Tamb))
ConBESS5 = OPTmodel.addConstr(Pbess_flex[i] == Pbess_in[i] + Pbess_out[i], name='ConBESS5')
#OPTmodel.addConstr(defIT[i]==DeferableLoad.j2 + DeferableLoad.j3)
# OPTmodel.addConstr(Pdc_base[i]==predictions[i])
ConFLEX = OPTmodel.addConstr(Pflex_i[i] == Pbess_flex[i] + Phvac_flex[i], name='ConFLEX')
PcoolingPredicted = pd.read_csv('PcoolingPredictionResult.csv')
PcoolingPredictedValue = PcoolingPredicted.values
ITPredicted = pd.read_csv('ITpredictionResult.csv')
ITPredictedValue = ITPredicted.values
ConPDCbase = OPTmodel.addConstr(Pdc_base[i] == np.transpose(ITPredictedValue[i]) + np.transpose(PcoolingPredictedValue[i]))
OPTmodel.update()
# OPTmodel.addConstr(Pdc_base[i]==prediction[i])
OPTmodel.setObjective((np.transpose(Pdc_base[i])-float(DR[i]) - (Pdc[i]) ), GRB.MINIMIZE)
OPTmodel.update()
OPTmodel.optimize()
print Pdc_base[i].X
#print Ebess_i[i].X
#print Phvac_flex[i].X
print Tdc_i[i]
print Pdc[i]
print Phvac_flex[i]
print Pbess_flex[i]
print Pbess_out[i]
print Pbess_in[i]
print Ebess_i[i]
print Pbess_flex_[i]
print Phvac_down[i]
print Phvac_up[i]
'''
def get_results(self):
"""
This function gets the results of the current optimization model
Returns
-------
"""
HVACresult = np.zeros(1,N)
BatteryResult = np.zeros(1,N)
SOC = np.zeros(1,N)
#r_Q_dot = np.zeros((self.gp.N_H, self.N_S))
#r_P = np.zeros((self.gp.N_H, self.N_S))
#r_P_self = np.zeros((self.gp.N_H, self.N_S))
#r_P_ex = np.zeros((self.gp.N_H, self.N_S))
#r_Q_dot_gas = np.zeros((self.gp.N_H, self.N_S))
#Load = np.zeros((self.gp.N_H, self.N_S))
try:
for t in range(1,N):
HVACresult[t]= Phvac_flex[t].X
BatteryResult[t]=Pbess_flex[t].X
SOC[t] = Ebess_i[t].X / Ebess_max
except:
pass
return { 'SOC' : SOC , 'BatteryResult': BatteryResult }
print OPTmodel.getVars()
# get results
Temp = {}
Battery = {}
Ebess_result = {}
ITloadd = {}
for t in range(1,N):
Temp[t] = OPTmodel.getVarByName("Tdc_i" )
Battery[t] = OPTmodel.getVarByName("PBESS_flex" )
Ebess_result[t] = OPTmodel.getVarByName("Ebess_i" )
#r_P_e[t] = model.getVarByName("P_export_%s_0" % t).X
fig, axes = plt.subplots(4, 1)
# plot elctricity
ax5 = axes[2]
ax6 = ax5.twinx()
ax5.plot( [Temp[t] for t in range(1,N)], 'g-')
ax6.plot([Ebess_result[t] for t in range(1,N)], 'b-')
ax5.set_xlabel('Time index')
ax5.set_ylabel('Power Import [W]', color='g')
ax6.set_ylabel('Power CHP [W]', color='b')
ax7 = axes[3]
ax7.plot([Battery[t] for t in range(1,N)], 'g-')
ax7.set_ylabel('Power Export [W]', color='g')
'''
print Pflex_i.values()
# print OPTmodel.getVars()
print OPTmodel.feasibility()
print OPTmodel.getObjective()
print Pdc_base.values()
'''
b = map(float, Phvac_flex)
plt.plot(b)
plt.show()
'''
#c = map(float, Pbess_flex_)
#plt.plot(c)
#plt.show()
print OPTmodel
print Tdc_i.values()
# get results
print OPTmodel.getVars()
# print OPTmodel.getAttr('EBESS_i')
status = OPTmodel.status
print status
# print Con10,Con12
print Phvac_flex.values()
print Pbess_flex.values()
print Ebess_i.values()
print OPTmodel.objval
print Tdc_i
print Pbess_in
print Pbess_out.values()
# print Pbess_flex
# print Phvac_flex
# print Ebess_i
print Pflex_i.values()
print Pbess_flex_.values()
#print OPTmodel.getVars()
print OPTmodel.feasibility()
print OPTmodel.getObjective()
print Ebess_i.values()
if OPTmodel.status == GRB.Status.INF_OR_UNBD:
# Turn presolve off to determine whether model is infeasible
# or unbounded
OPTmodel.setParam(GRB.Param.Presolve, 0)
OPTmodel.optimize()
OPTmodel.write("mymodel.lp")
if OPTmodel.status == GRB.Status.OPTIMAL:
print('Optimal objective: %g' % OPTmodel.objVal)
OPTmodel.write('model.sol')
exit(0)
elif OPTmodel.status != GRB.Status.INFEASIBLE:
print('Optimization was stopped with status %d' % OPTmodel.status)
exit(0)
# Model is infeasible - compute an Irreducible Inconsistent Subsystem (IIS)
print('')
print('Model is infeasible')
OPTmodel.computeIIS()
OPTmodel.write("model.ilp")
print("IIS written to file 'model.ilp'")
I want to plot the computed values from gurobi but when I want to get the X attribute of gurobi variable it says that AttributeError: it has no attribute 'X' and the when I cast the value from float to int it just showed me the empty plot but at the lp file I could see the result of each iteration
I am anxiously waiting for your response
cherrs
I would like to simulate a seven game baseball playoff series. Let's say I have the the win probabilities for each game in the series. I would like to know the probabilities for each possible series outcome. ie TeamA in 4 games, TeamB in 4 games, TeamA in 5 games, etc.
This is what I came up with and it seems to work but I think it could be done better.
winPercGM1 = .5
winPercGM2 = .56
winPercGM3 = .47
winPercGM4 = .55
winPercGM5 = .59
winPercGM6 = .59
winPercGM7 = .38
winPercs = [winPercGM1, winPercGM2, winPercGM3, winPercGM4, winPercGM5, winPercGM6, winPercGM7]
def WinSeries():
teamAwins = 0
teamBwins = 0
for perc in winPercs:
if teamAwins == 4:
break
elif teamBwins == 4:
break
elif perc > np.random.random():
teamAwins += 1
else:
teamBwins += 1
return teamAwins, teamBwins
def RunFun(n):
teamAWins = []
teamBWins = []
for i in xrange(n):
result = WinSeries()
teamAWin = result[0]
teamBWin = result[1]
teamAWins.append(teamAWin)
teamBWins.append(teamBWin)
return teamAWins, teamBWins
n = 500000
results = RunFun(n)
teamAwinSeries = results[0]
teamBwinSeries = results[1]
teamBin4 = teamAwinSeries.count(0)/n
teamBin5 = teamAwinSeries.count(1)/n
teamBin6 = teamAwinSeries.count(2)/n
teamBin7 = teamAwinSeries.count(3) / n
teamAin4 = teamBwinSeries.count(0)/n
teamAin5 = teamBwinSeries.count(1)/n
teamAin6 = teamBwinSeries.count(2)/n
teamAin7 = teamBwinSeries.count(3) / n
This can be done easily with numpy (Python 2.7)
import numpy as np
probs = np.array([.5 ,.56 ,.47 ,.55 ,.59 ,.59 ,.38])
nsims = 500000
chance = np.random.uniform(size=(nsims, 7))
teamAWins = (chance > probs[None, :]).astype('i4')
teamBWins = 1 - teamAWins
teamAwincount = {}
teamBwincount = {}
for ngames in range(4, 8):
afilt = teamAWins[:, :ngames].sum(axis=1) == 4
bfilt = teamBWins[:, :ngames].sum(axis=1) == 4
teamAwincount[ngames] = afilt.sum()
teamBwincount[ngames] = bfilt.sum()
teamAWins = teamAWins[~afilt]
teamBWins = teamBWins[~bfilt]
teamAwinprops = {k : 1. * count/nsims for k, count in teamAwincount.iteritems()}
teamBwinprops = {k : 1. * count/nsims for k, count in teamBwincount.iteritems()}
Output:
>>> sum(teamAwinprops.values()) + sum(teamBwinprops.values())
1.0
>>> teamAwincount
{4: 26186, 5: 47062, 6: 59222, 7: 95381}
>>> teamBwincount
{4: 36187, 5: 79695, 6: 97802, 7: 58465}