I want to do a system using a Autorules fuzzy controller where i create rules from process real data.
My problem is when i use the new data to simulate the fuzzy controller with the rules extracted of the older data, i get a error about the crisp output that cannot be calculated because do not exist rules enough, what is totally normal because my fuzzy system needs more rules and that's my point! I want to implement a new routine that analyses the crisp input/output and create new rules from this data, after that, i want to go back in my code and simulate again.
Is there a function that blocks the AssertionError from stop the code and redirect to another def or to a previously code line?
I tried to find some lib with a function that allows me to redirect the error steady to stop the code but no sucess. I think i will have to change the skfuzzy defuzz def code to allow to make it.
Thank you very much.
''' python
step = stp_ini
k = 0
delay = stp_ini
end = stp_fim - stp_ini
Tout_sim = pd.DataFrame(columns=['val'])
Vent_sim = pd.DataFrame(columns=['val'])
start = timeit.default_timer()
for step in range(end-k):
clear_output(wait=True)
simulation.input['PCA1'] = comp1n[step+delay]
simulation.input['PCA2'] = comp2n[step+delay]
simulation.input['PCA3'] = comp3n[step+delay]
simulation.input['PCA4'] = comp4n[step+delay]
simulation.input['Vent'] = dataoutf.NumVentOn[step+delay]
simulation.compute()
Tout_sim = Tout_sim.append({'val':simulation.output['Tout']},ignore_index=True)
stop = timeit.default_timer()
if ((step/(stp_fim-k-1))*100) < 5:
expected_time = "Calculating..."
else:
time_perc = timeit.default_timer()
expected_time = np.round( ( (time_perc-start)/(step/(end-k-1)) )/60,2)
'''
~\AppData\Local\Continuum\anaconda3\lib\site-packages\skfuzzy\control\controlsystem.py in defuzz(self)
587 self.var.defuzzify_method)
588 except AssertionError:
--> 589 raise ValueError("Crisp output cannot be calculated, likely "
590 "because the system is too sparse. Check to "
591 "make sure this set of input values will "
ValueError: Crisp output cannot be calculated, likely because the system is too sparse. Check to make sure this set of input values will activate at least one connected Term in each Antecedent via the current set of Rules.
edit:
I try to wrap the line code ValueError by try but the ValueError is activated yet
def defuzz(self):
"""Derive crisp value based on membership of adjective(s)."""
if not self.sim._array_inputs:
ups_universe, output_mf, cut_mfs = self.find_memberships()
if len(cut_mfs) == 0:
raise ValueError("No terms have memberships. Make sure you "
"have at least one rule connected to this "
"variable and have run the rules calculation.")
try:
return defuzz(ups_universe, output_mf,
self.var.defuzzify_method)
except AssertionError:
try:
new_c1 = []
new_c2 = []
new_c3 = []
new_c4 = []
new_vent = []
new_tout = []
newcondition1 = []
newcondition2 = []
newcondition3 = []
newcondition4 = []
newcondition5 = []
newcondition6 = []
#input
n = 0
for n in range(len(namespca)):
new_c1.append(fuzz.interp_membership(PCA1.universe, PCA1[namespcapd.name.loc[n]].mf, comp1n[step]))
new_c2.append(fuzz.interp_membership(PCA2.universe, PCA2[namespcapd.name.loc[n]].mf, comp2n[step]))
new_c3.append(fuzz.interp_membership(PCA3.universe, PCA3[namespcapd.name.loc[n]].mf, comp3n[step]))
new_c4.append(fuzz.interp_membership(PCA4.universe, PCA4[namespcapd.name.loc[n]].mf, comp4n[step]))
n = 0
for n in range(len(namesvent)):
new_vent.append(fuzz.interp_membership(Vent.universe, Vent[namesventpd.name.loc[n]].mf, dataoutf.NumVentOn[step]))
#output
n = 0
for n in range(len(namestemp)):
new_tout.append(fuzz.interp_membership(Tout.universe, Tout[namestemppd.name.loc[n]].mf, dataoutf.TsaidaHT[step]))
#new_c1 = np.transpose(new_c1)
new_c1_conv = pd.DataFrame(new_c1)
#new_c2 = np.transpose(new_c2)
new_c2_conv = pd.DataFrame(new_c2)
#new_c3 = np.transpose(new_c3)
new_c3_conv = pd.DataFrame(new_c3)
#new_c4 = np.transpose(new_c4)
new_c4_conv = pd.DataFrame(new_c4)
#new_vent = np.transpose(new_vent)
new_vent_conv = pd.DataFrame(new_vent)
#new_tout = np.transpose(new_tout)
new_tout_conv = pd.DataFrame(new_tout)
i=0
for i in range(pcamf):
newcondition1.append([new_c1_conv.idxmax(axis=0) == i])
newcondition2.append([new_c2_conv.idxmax(axis=0) == i])
newcondition3.append([new_c3_conv.idxmax(axis=0) == i])
newcondition4.append([new_c4_conv.idxmax(axis=0) == i])
i=0
for i in range(ventmf):
newcondition5.append([new_vent_conv.idxmax(axis=0) == i])
i=0
for i in range(tempmf):
newcondition6.append([new_tout_conv.idxmax(axis=0) == i])
choicelistpca = namespca
choicelistvent = namesvent
choicelisttout = namestemp
new_c1_rules = np.select(newcondition1, choicelistpca)
new_c2_rules = np.select(newcondition2, choicelistpca)
new_c3_rules = np.select(newcondition3, choicelistpca)
new_c4_rules = np.select(newcondition4, choicelistpca)
new_vent_rules = np.select(newcondition5, choicelistvent)
new_tout_rules = np.select(newcondition6, choicelisttout)
new_rules = np.vstack([new_c1_rules,new_c2_rules,new_c3_rules,new_c4_rules,new_vent_rules,new_tout_rules])
new_rules = new_rules.T
new_rulespd = pd.DataFrame(new_rules,columns=['PCA1','PCA2','PCA3','PCA4','Vent','Tout'])
#Checar se a nova regra está dentro do conjunto de regras fuzzy atual
if pd.merge(new_rulespd,AutoRules, on=['PCA1','PCA2','PCA3','PCA4','Vent','Tout'],how='inner').empty:
print('Nova regra não encontrada no conjunto atual de regras fuzzy!')
else:
pd.merge(new_rulespd,AutoRules, on=['PCA1','PCA2','PCA3','PCA4','Vent','Tout'],how='inner')
"""except AssertionError:
raise ValueError("Crisp output cannot be calculated, likely "
"because the system is too sparse. Check to "
"make sure this set of input values will "
"activate at least one connected Term in each "
"Antecedent via the current set of Rules.")"""
else:
# Calculate using array-aware version, one cut at a time.
output = np.zeros(self.sim._array_shape, dtype=np.float64)
it = np.nditer(output, ['multi_index'], [['writeonly', 'allocate']])
for out in it:
universe, mf = self.find_memberships_nd(it.multi_index)
out[...] = defuzz(universe, mf, self.var.defuzzify_method)
return output
Wrap the line of code that raises ValueError in a try. And decide what to do in its except ValueError: clause. Perhaps continue-ing on to the next iteration might be reasonable.
Related
I want to create comments from a dataset that details the growth rate, market share, etc for various markets and products. The dataset is in the form of a pd.DataFrame(). I would like the comment to include keywords like increase/decrease based on the calculations, for example, if 2020 Jan has sale of 1000, and 2021 Jan has a sale of 1600, then it will necessary mean an increase of 60%.
I defined a function outside as such and I would like to seek if this method is too clumsy, if so, how should I improve on it.
GrowthIncDec = namedtuple('gr_tuple', ['annual_growth_rate', 'quarterly_growth_rate'])
def increase_decrease(annual_gr, quarter_gr):
if annual_gr > 0:
annual_growth_rate = 'increased'
elif annual_gr < 0:
annual_growth_rate = 'decreased'
else:
annual_growth_rate = 'stayed the same'
if quarter_gr > 0:
quarterly_growth_rate = 'increased'
elif quarter_gr < 0:
quarterly_growth_rate = 'decreased'
else:
quarterly_growth_rate = 'stayed the same'
gr_named_tuple = GrowthIncDec(annual_growth_rate=annual_growth_rate, quarterly_growth_rate=quarterly_growth_rate)
return gr_named_tuple
myfunc = increase_decrease(5, -1)
myfunc.annual_growth_rate
output: 'increased'
A snippet of my main code is as follows to illustrate the use of the above function:
def get_comments(grp, some_dict: Dict[str, List[str]]):
.......
try:
subdf = the dataframe
annual_gr = subdf['Annual_Growth'].values[0]
quarter_gr = subdf['Quarterly_Growth'].values[0]
inc_dec_named_tup = increase_decrease(annual_gr, quarter_gr)
inc_dec_annual_gr = inc_dec_named_tup.annual_growth_rate
inc_dec_quarterly_gr = inc_dec_named_tup.quarterly_growth_rate
comment = "The {} has {} by {:.1%} in {} {} compared to {} {}"\
.format(market, inc_dec_annual_gr, annual_gr, timeperiod, curr_date, timeperiod, prev_year)
comments_df = pd.DataFrame(columns=['Date','Comments'])
# comments_df['Date'] = [curr_date]
comments_df['Comments'] = [comment]
return comments_df
except (IndexError, KeyError) as e:
# this is for all those nan values which is empty
annual_gr = 0
quarter_gr = 0
I'm using third-party modules and figting with error raised while calling those modules.
Here is what compiler is showing:
C:\Users\Dmitry\AppData\Local\Enthought\Canopy\edm\envs\User\lib\site- packages\backtrader\feeds\csvgeneric.py in _loadline(self, linetokens)
148 # get it from the token
149 csvfield = linetokens[csvidx]
--> 150 print(csvidx)
151
152 if csvfield == '':
IndexError: list index out of range
I deliberately added print(csvidx) to see the value of csvidx, but it's not showing up on console. What am I doing wrong? Thanks a lot.
Here is the code:
def _loadline(self, linetokens):
# Datetime needs special treatment
dtfield = linetokens[self.p.datetime]
if self._dtstr:
dtformat = self.p.dtformat
if self.p.time >= 0:
# add time value and format if it's in a separate field
dtfield += 'T' + linetokens[self.p.time]
dtformat += 'T' + self.p.tmformat
dt = datetime.strptime(dtfield, dtformat)
else:
dt = self._dtconvert(dtfield)
if self.p.timeframe >= TimeFrame.Days:
# check if the expected end of session is larger than parsed
if self._tzinput:
dtin = self._tzinput.localize(dt) # pytz compatible-ized
else:
dtin = dt
dtnum = date2num(dtin) # utc'ize
dteos = datetime.combine(dt.date(), self.p.sessionend)
dteosnum = self.date2num(dteos) # utc'ize
if dteosnum > dtnum:
self.lines.datetime[0] = dteosnum
else:
# Avoid reconversion if already converted dtin == dt
self.l.datetime[0] = date2num(dt) if self._tzinput else dtnum
else:
self.lines.datetime[0] = date2num(dt)
# The rest of the fields can be done with the same procedure
for linefield in (x for x in self.getlinealiases() if x != 'datetime'):
# Get the index created from the passed params
csvidx = getattr(self.params, linefield)
if csvidx is None or csvidx < 0:
# the field will not be present, assignt the "nullvalue"
csvfield = self.p.nullvalue
else:
# get it from the token
print(csvidx)
csvfield = linetokens[csvidx]
if csvfield == '':
# if empty ... assign the "nullvalue"
csvfield = self.p.nullvalue
# get the corresponding line reference and set the value
line = getattr(self.lines, linefield)
line[0] = float(float(csvfield))
return True
csvidx = getattr(self.params, linefield)
if csvidx is None or csvidx < 0:
# the field will not be present, assignt the "nullvalue"
csvfield = self.p.nullvalue
else:
# get it from the token
print(csvidx)
csvfield = linetokens[csvidx]
csvidx is being sought in self.params and it's obviously being found.
And it seems to be neither None nor < 0, so it seems to have a numeric value >= 0
The IndexError: list index out of range is clearly indicating that linetokens doesn't contain as many items as csvidxexpects.
Since the name self.params seems to indicate user input, it would seem that whatever value you have given is greater than the actual number of tokens available in linetokens
The code seems to be executed in one of those canned Python environments
--> 150 print(csvidx)
Because that's for sure not the usual Python console output. If that canned environment (and not the 3rd party packages) really allows you to print to the console, it would seem advisable to actually do it a lot sooner, as in:
for linefield in (x for x in self.getlinealiases() if x != 'datetime'):
# Get the index created from the passed params
csvidx = getattr(self.params, linefield)
print('linefield {} -> csvidx {}'.format(linefield, csvidx)
if csvidx is None or csvidx < 0:
# the field will not be present, assignt the "nullvalue"
csvfield = self.p.nullvalue
else:
# get it from the token
csvfield = linetokens[csvidx]
You should see each relationship linefield -> csvidx before the exception is triggered.
If your environment allows it, run everything with python -u which uses unbuffered output. (Strongly recommended under Windows where flushing on newline is known to either not work or have problems)
From the code mostly from the sample of myhdl:
from myhdl import Signal, intbv, delay, always, now, Simulation, toVerilog
__debug = True
def ClkDriver(clk):
halfPeriod = delay(10)
#always(halfPeriod)
def driveClk():
clk.next = not clk
return driveClk
def HelloWorld(clk, outs):
counts = intbv(3)[32:]
#always(clk.posedge)
def sayHello():
outs.next = not outs
if counts >= 3 - 1:
counts.next = 0
else:
counts.next = counts + 1
if __debug__:
print "%s Hello World! outs %s %s" % (
now(), str(outs), str(outs.next))
return sayHello
clk = Signal(bool(0))
outs = Signal(intbv(0)[1:])
clkdriver_inst = ClkDriver(clk)
hello_inst = toVerilog(HelloWorld, clk, outs)
sim = Simulation(clkdriver_inst, hello_inst)
sim.run(150)
I expect it to generate a verilog program that contains an initial block, like something:
module HelloWorld(...)
reg [31:0] counts;
initial begin
counts = 32'h3
end
always #(...
How can you get the initial block generated?
Note that on the google cache for old.myhdl.org/doku.php/dev:initial_values it links to example https://bitbucket.org/cfelton/examples/src/tip/ramrom/ . So it looks the feature should be supported. However the rom sample generates static case statements. That's not what I'm looking for.
Three steps to resolve it:
Update to the latest myhdl on master or a version that contains the hash 87784ad which added the feature under issue #105 or #150. As an example for virtualenv, run a git clone, followed by pip install -e <path-to-myhdl-dir>.
Change the signal to a list.
Set toVerilog.initial_values=True before calling toVerilog.
Code snippet follows.
def HelloWorld(clk, outs):
counts = [Signal(intbv(3)[32:])]
#always(clk.posedge)
def sayHello():
outs.next = not outs
if counts[0] >= 3 - 1:
counts[0].next = 0
else:
counts[0].next = counts[0] + 1
if __debug__:
print "%s Hello World! outs %s %s %d" % (
now(), str(outs), str(outs.next), counts[0])
return sayHello
clk = Signal(bool(0))
outs = Signal(intbv(0)[1:])
clkdriver_inst = ClkDriver(clk)
toVerilog.initial_values=True
hello_inst = toVerilog(HelloWorld, clk, outs)
sim = Simulation(clkdriver_inst, hello_inst)
sim.run(150)
I'm trying to figure out a way to create unified diffs with line numbers only showing N lines of context. I have been unable to do this with difflib.unified_diff. I need to show changes in both files.
The closest I can come is using diff on the command line like so:
/usr/bin/diff
--unchanged-line-format=' %.2dn %L'
--old-line-format="-%.2dn %L"
--new-line-format="+%.2dn %L"
file1.py
file2.py
BUT I only want to show N lines of context, and /usr/bin/diff doesn't seem to support context with a custom line format (eg. -U2 is not compatible with --line-format "conflicting output style options").
Below is an example of what I'd like to accomplish (the same output as the above diff, but only showing 1 line of context surrounding changes):
+01: def renamed_function()
-01: def original_function():
02:
+03: """ Neat stuff here """
04:
21:
+22: # Here's a new comment
23:
85: # Output the value of foo()
+86: print "Foo is %s"%(foo())
-86: print foo()
87:
I was able to figure out something very close to what I wanted to do. It's slower than regular diff, though. Here's the entire code, from my project GitGate.
def unified_diff(to_file_path, from_file_path, context=1):
""" Returns a list of differences between two files based
on some context. This is probably over-complicated. """
pat_diff = re.compile(r'## (.[0-9]+\,[0-9]+) (.[0-9]+,[0-9]+) ##')
from_lines = []
if os.path.exists(from_file_path):
from_fh = open(from_file_path,'r')
from_lines = from_fh.readlines()
from_fh.close()
to_lines = []
if os.path.exists(to_file_path):
to_fh = open(to_file_path,'r')
to_lines = to_fh.readlines()
to_fh.close()
diff_lines = []
lines = difflib.unified_diff(to_lines, from_lines, n=context)
for line in lines:
if line.startswith('--') or line.startswith('++'):
continue
m = pat_diff.match(line)
if m:
left = m.group(1)
right = m.group(2)
lstart = left.split(',')[0][1:]
rstart = right.split(',')[0][1:]
diff_lines.append("## %s %s ##\n"%(left, right))
to_lnum = int(lstart)
from_lnum = int(rstart)
continue
code = line[0]
lnum = from_lnum
if code == '-':
lnum = to_lnum
diff_lines.append("%s%.4d: %s"%(code, lnum, line[1:]))
if code == '-':
to_lnum += 1
elif code == '+':
from_lnum += 1
else:
to_lnum += 1
from_lnum += 1
return diff_lines
Issues description and environments
The OpenOPC library is friendly and easy to use, the api is simple too, but I have found two issues during the development of a tool to record real time OPC items data.
The development environment is: Window 8.1, Python 2.7.6, wxpython 2.8 unicode
The testing environment is: Window XP SP3, Python 2.7.6, wxpython 2.8 unicode, Rockwell's soft logix as OPC Server
The deploy environment is: Window XP SP3, connected with Rockwell's real PLC, installed RSLogix 5000 and RSLinx Classic Gateway
Questions
the opc.list function doesn't list all the item of specify node both in testing and workstaion environment. The question is how to list the 't' from the opc server?
An int array 'dint100' and a dint 't' is added with RS logix 5000 at the scope of soft_1
With the default OPC client test tool from Rockwell it could list the new added 't'
With OpenOPC library, I couldn't find out how to list the item 't', but I could read it's value by opc.read('[soft_1]t') with it's tag.
If the 't' could be listed, it could be added into the IO tree of my tool.
The opc.servers function will encounter an OPCError on the deploy environment, but the client could connect the 'RSLinx OPC Server' directly with the server name. Does opc.servers function dependent on some special dll or service?
Any suggestions will be appreciated! Thanks in advance!
Consider that the browsing problems ("opc.list") may not be on your side. RSLinx is notorious for its broken OPC browsing. Try some test/simulation server from a different vendor, to test this hypothesis.
I realize that I'm really late to this game. I found what was causing this issue. OpenOPC.py assumes that there cannot be both a "Leaf" and a "Branch" on the same level. Replace the function ilist with this:
def ilist(self, paths='*', recursive=False, flat=False, include_type=False):
"""Iterable version of list()"""
try:
self._update_tx_time()
pythoncom.CoInitialize()
try:
browser = self._opc.CreateBrowser()
# For OPC servers that don't support browsing
except:
return
paths, single, valid = type_check(paths)
if not valid:
raise TypeError("list(): 'paths' parameter must be a string or a list of strings")
if len(paths) == 0: paths = ['*']
nodes = {}
for path in paths:
if flat:
browser.MoveToRoot()
browser.Filter = ''
browser.ShowLeafs(True)
pattern = re.compile('^%s$' % wild2regex(path) , re.IGNORECASE)
matches = filter(pattern.search, browser)
if include_type: matches = [(x, node_type) for x in matches]
for node in matches: yield node
continue
queue = []
queue.append(path)
while len(queue) > 0:
tag = queue.pop(0)
browser.MoveToRoot()
browser.Filter = ''
pattern = None
path_str = '/'
path_list = tag.replace('.','/').split('/')
path_list = [p for p in path_list if len(p) > 0]
found_filter = False
path_postfix = '/'
for i, p in enumerate(path_list):
if found_filter:
path_postfix += p + '/'
elif p.find('*') >= 0:
pattern = re.compile('^%s$' % wild2regex(p) , re.IGNORECASE)
found_filter = True
elif len(p) != 0:
pattern = re.compile('^.*$')
browser.ShowBranches()
# Branch node, so move down
if len(browser) > 0:
try:
browser.MoveDown(p)
path_str += p + '/'
except:
if i < len(path_list)-1: return
pattern = re.compile('^%s$' % wild2regex(p) , re.IGNORECASE)
# Leaf node, so append all remaining path parts together
# to form a single search expression
else:
###################################### JG Edit - Flip the next two rows comment/uncommented
p = '.'.join(path_list[i:])
# p = string.join(path_list[i:], '.')
pattern = re.compile('^%s$' % wild2regex(p) , re.IGNORECASE)
break
###################################### JG Edit - Comment this to return to original
browser.ShowBranches()
node_types = ['Branch','Leaf']
if len(browser) == 0:
lowest_level = True
node_types.pop(0)
else:
lowest_level = False
for node_type in node_types:
if node_type=='Leaf':
browser.ShowLeafs(False)
matches = filter(pattern.search, browser)
if not lowest_level and recursive:
queue += [path_str + x + path_postfix for x in matches]
else:
###################################### JG Edit - Flip the next two rows comment/uncommented
if lowest_level or node_type=='Leaf': matches = [exceptional(browser.GetItemID,x)(x) for x in matches]
# if lowest_level: matches = [exceptional(browser.GetItemID,x)(x) for x in matches]
if include_type: matches = [(x, node_type) for x in matches]
for node in matches:
if not node in nodes: yield node
nodes[node] = True
###################################### Uncomment this to return to original
# browser.ShowBranches()
# if len(browser) == 0:
# browser.ShowLeafs(False)
# lowest_level = True
# node_type = 'Leaf'
# else:
# lowest_level = False
# node_type = 'Branch'
# matches = filter(pattern.search, browser)
# if not lowest_level and recursive:
# queue += [path_str + x + path_postfix for x in matches]
# else:
# if lowest_level: matches = [exceptional(browser.GetItemID,x)(x) for x in matches]
# if include_type: matches = [(x, node_type) for x in matches]
# for node in matches:
# if not node in nodes: yield node
# nodes[node] = True
except pythoncom.com_error as err:
error_msg = 'list: %s' % self._get_error_str(err)
raise OPCError(error_msg)