Odoo Selection depends of another Selection - python

I have model like:
class MyDict(models.Model):
_name='my_module.my_dict'
field1=fields.Char()
field2=fields.Char()
field3=fields.Char()
data looks like:
obj_1 | attr_1 | val_1
obj_1 | attr_2 | val_1
obj_1 | attr_2 | val_2
obj_2 | attr_1 | val_1
obj_2 | attr_1 | val_2
From another model I want to use this data step by step
class NewModel(models.Model):
_name='my_module.new_model'
selection1=field.Selection(selection='_get_selection1')
selection2=field.Selection(selection='_get_selection2')
selection3=field.Selection(selection='_get_selection3')
def _get_selection1(self):
my_list = []
selection_list = []
full_list = self.env['my_module.my_dict'].search([])
for record in full_list:
if record.field1 not in my_list:
my_list.append(record.field1)
for list_item in my_list:
selection_list.append((str(list_item), str(list_item)))
return selection_list
the second selection need to depends from the first
#api.onchange('selection1')
def _get_selection2(self):
my_list = []
selection_list = []
full_list = self.env['my_module.my_dict'].search([('field1', '=', self.selection1])
for record in full_list:
if record.field2 not in my_list:
my_list.append(record.field2)
for list_item in my_list:
selection_list.append((str(list_item), str(list_item)))
return selection_list
but I found Error here
File "/opt/odoo13/odoo/odoo/models.py", line 5817, in process
if res.get('value'):
AttributeError: 'list' object has no attribute 'get'
how can I get selection_list depends from first selection

Looks like the only way is to make 3 model for every field and bind it whith parent_id and child_ids fields
class MyDict1(models.Model):
_name='my_module.my_dict1'
name=fields.Char()
child_ids=fields.One2many('my_module.my_dict2', 'parent_id')
class MyDict2(models.Model):
_name='my_module.my_dict2'
name=fields.Char()
parent_id=fields.Many2one('my_module.my_dict1')
child_ids=fields.One2many('my_module.my_dict3', 'parent_id')

Related

I want to pass side input AsDIct but getting error "ValueError: dictionary update sequence element #0 has length 101; 2 is required"

class load_side_input(beam.DoFn):
def process(self,pubsub_message):
message = pubsub_message.decode("utf8")
output:typing.Dict={}
for key in message.keys():
output[key] = self.tag_model[key]
return [output]
side_input = (p
| "AMM Events" >> beam.io.ReadFromPubSub(subscription=opts.ammSub)
| "Trigger event" >> beam.WindowInto(window.GlobalWindows(), trigger=trigger.Repeatedly(trigger.AfterCount(1)),
accumulation_mode=trigger.AccumulationMode.DISCARDING)
| "Parse and Update Cache" >> beam.ParDo(load_side_input())
)
enrichment = (rows
| 'Data Validation and Enrichment' >> beam.ParDo(validation(),y_side=AsDict(side_input))
)
File "/usr/local/lib/python3.9/site-packages/apache_beam/runners/worker/bundle_processor.py", line 434, in __getitem__
self._cache[target_window] = self._side_input_data.view_fn(raw_view)
ValueError: dictionary update sequence element #0 has length 101; 2 is required [while running 'Data Enrichment-ptransform-128']
You feed the function beam.pvalue.AsDict the incorrect input format. According to the documentation:
Parameters: pcoll – Input pcollection. All elements should be key-value pairs (i.e. 2-tuples) with unique keys.
Here is a minimum working example, which can be run at Apache Play
import apache_beam as beam
def use_side_input(main, side_input):
return side_input[main]
class BadSideInputCreator(beam.DoFn):
def process(self, element):
output = {}
output['1'] = 'value1'
output['2'] = 'value2'
yield [output] # this is a list of an dict and not a 2-tuple
class GoodSideInputCreator(beam.DoFn):
def process(self, element):
output = {}
output['1'] = 'value1'
output['2'] = 'value2'
for key, value in output.items():
yield (key, value) # this is a 2-tuple
with beam.Pipeline() as pipeline:
main = (
pipeline
| "init main" >> beam.Create(['1', '2'])
)
side = (
pipeline
| "init side" >> beam.Create(['dummy'])
| beam.ParDo(BadSideInputCreator()) # replace with GoodSideInputCreator
)
(
main
| "use side input" >> beam.Map(use_side_input, side_input=beam.pvalue.AsDict(side))
| "print" >> beam.Map(print)
)
Running with BadSideInputCreator throws your error
ValueError: dictionary update sequence element #0 has length 1; 2 is required
while with GoodSideInputCreator we get the expected result
value1
value2

Recursive relations search between 2 columns in a table [Using python list / Dict]

I am trying to optimize a solution that I created to find recursive relations between 2 columns in a table. I need to find all accIDs for a bssID and recursively find all the bssIDs for those accIDs and so on till I find all the related bssIDs.
bssIDs
accIDs
ABC
4424
ABC
56424
ABC
2383
A100BC
2383
A100BC
4943
A100BC
4880
A100BC
6325
A100BC
4424
XYZ
123
The below solution works for an initial table of 100K rows but the below solution runs for >16 hours for a dataset of 20 million rows. I am trying to use dicts instead of list but I am unable to change the dict while iterating over the same as I am with a list.
import time
accIds = {4880: ['A100BC'], 6325: ['A100BC'], 2383: ['A100BC','ABC'],4424: ['A100BC','ABC'], 4943: ['A100BC'], 56424: ['ABC'],123: ['XYZ']}
bssIds = {'ABC': [4424,56424,2383], 'A100BC': [2383,4943,4880,6325,4424], 'XYZ':[123]}
def findBIDs(aID):
return accIds[aID]
def findAIDs(bID):
return bssIds[bID]
def getList(Ids):
return Ids.keys()
def checkList(inputList, value):
return (value in inputList)
def addToList(inputList, value):
return inputList.append(value)
def removeFromList(inputList, value):
return inputList.remove(value)
aIDlist = list(getList(accIds))
bIDlist = list(getList(bssIds))
bRelations = {}
runningList = list()
for x in bIDlist:
if not checkList(runningList,x):
aList = list()
bList = list()
addToList(bList, x)
for y in bList:
for c in findAIDs(y):
if not checkList(aList, c):
addToList(aList, c)
for z in aList:
for a in findBIDs(z):
if not checkList(bList, a):
addToList(bList, a)
bRelations.update({time.time_ns(): bList})
runningList.extend(bList)
print(bRelations)
Output : {1652374114032173632: ['ABC', 'A100BC'], 1652374114032180888: ['XYZ']}
Please suggest if there is a way to update a dict while iterating over it or If we can apply a recursive solution for the same.
This is the fastest I could think of:
accIds = {4880: frozenset(['A100BC']), 6325: frozenset(['A100BC']), 2383: frozenset(['A100BC','ABC']),4424: frozenset(['A100BC','ABC']), 4943: frozenset(['A100BC']), 56424: frozenset(['ABC']),123: frozenset(['XYZ'])}
bssIds = {'ABC': frozenset([4424,56424,2383]), 'A100BC': frozenset([2383,4943,4880,6325,4424]), 'XYZ':frozenset([123])}
def search_bssid(bssId):
traversed_accIds = set()
traversed_bssIds = {bssId}
accIds_to_check = []
bssIds_to_check = [bssId]
while bssIds_to_check:
bssId = bssIds_to_check.pop()
new_accids = bssIds[bssId] - traversed_accIds
traversed_accIds.update(new_accids)
accIds_to_check.extend(new_accids)
while accIds_to_check:
accId = accIds_to_check.pop()
new_bssids = accIds[accId] - traversed_bssIds
traversed_bssIds.update(new_bssids)
bssIds_to_check.extend(new_bssids)
return traversed_bssIds
print(search_bssid("ABC"))

Raise ‘PicklingError’error when apply functions in certain class with pyspark

I'm trying to use pandas functions in spark with applyInPandas,when I tranform it within a certain class,it raise errors like this:pickle.PicklingError: Could not serialize object: Exception: It appears that you are attempting to reference SparkContext from a broadcast variable, action, or transformation. SparkContext can only be used on the driver, not in code that it run on workers. For more information, see SPARK-5063.
my script run well in function-type coding:
from scipy.stats import kendalltau
import numpy as np
import pandas as pd
def kendall(dat, a, b):
kentmp = []
ken = [np.nan, np.nan]
if type(a) is list:
if dat.shape[0] > 3:
for item in a:
kentmp.append(kendalltau(dat[item], dat[b])[0])
tmp = pd.Series(kentmp, index=a).dropna()
if tmp.shape[0] > 0:
cato = tmp.idxmax()
if (tmp < 0).any():
cato = tmp.abs().idxmax()
ken = [cato, tmp[cato]]
index = ['category', 'corr']
else:
if dat.shape[0] >= 10:
ken = [kendalltau(dat[a], dat[b])[0], dat.shape[0]]
index = ['corr', 'N']
return pd.Series(ken, index=index)
def kendall_process(pdf):
result = pdf.groupby(['step_id','unit_id']).apply(kendall,'process','label')
result = pd.DataFrame(result).reset_index()
#result.columns = ['step_id','unit_id','corr','N']
pdf['label'] = pdf.label.astype('int')
result_ = pdf.groupby(['step_id','unit_id'])['label'].mean().reset_index()
result = pd.merge(result,result_,on=['step_id','unit_id'],how='left')
result.columns = ['step_id','unit_id','corr','N','ratio']
return result
result = datInOut.groupBy('step_id','unit_id').applyInPandas(kendall_process, schema='step_id string,\
unit_id string,\
corr float,\
N long,\
ratio float')
result.show(5)
+--------------+--------+-----------+----+-----+
| step_id| unit_id| corr| N|ratio|
+--------------+--------+-----------+----+-----+
|10303_A2AOI300|A2AOI300| null|null| 0.0|
|17613_A2AOI500|A2AOI500|-0.13477948| 14| 0.5|
|1B304_A2MAC100|A2MAC100| null|null| 1.0|
|1A106_A2SPR100|A2SPR100| null|null| 1.0|
|19103_A2AOI800|A2AOI800| null|null| 0.5|
+--------------+--------+-----------+----+-----+
only showing top 5 rows
but when I tansform it to class type coding,it raise the PicklingError:
#staticmethod
def kendall(dat,a,b):
kentmp=[]
ken=[np.nan,np.nan]
if type(a) is list:
if dat.shape[0]>3:
for item in a:
kentmp.append(kendalltau(dat[item],dat[b])[0])
tmp=pd.Series(kentmp,index=a).dropna()
if tmp.shape[0]>0:
cato=tmp.idxmax()
if (tmp<0).any():
cato=tmp.abs().idxmax()
ken=[cato,tmp[cato]]
index=['category','corr']
else:
if dat.shape[0]>=10:
ken=[kendalltau(dat[a],dat[b])[0],dat.shape[0]]
index=['corr','N']
return pd.Series(ken,index=index)
#staticmethod
def kendall_delay(pdf):
result = pdf.groupby(['step_id','equip_id']).apply(QTWorker.kendall,'delay','label')
result = pd.DataFrame(result).reset_index()
pdf['label'] = pdf.label.astype('int')
result_ = pdf.groupby(['step_id', 'equip_id'])['label'].mean().reset_index()
result = pd.merge(result, result_, on=['step_id', 'equip_id'], how='left')
result.columns = ['step_id', 'equip_id', 'corr', 'N', 'ratio']
return result
ret = datQ.groupBy(self.step, self.equip).applyInPandas(self.kendall_delay, schema='step_id string,equip_id string,corr float,N long,ratio float')
as see,I've already decorated the funtions used with staticmethod,but it still not work. I really wanna how to fix it!
Even I don't no why,but I've solved it by puting the kendall functions under kendall_delay.
I really wanna figure out the reason of it!
#staticmethod
def kendall_process(pdf):
def kendall(dat, a, b):
kentmp = []
ken = [np.nan, np.nan]
if type(a) is list:
if dat.shape[0] > 3:
for item in a:
kentmp.append(kendalltau(dat[item], dat[b])[0])
tmp = pd.Series(kentmp, index=a).dropna()
if tmp.shape[0] > 0:
cato = tmp.idxmax()
if (tmp < 0).any():
cato = tmp.abs().idxmax()
ken = [cato, tmp[cato]]
index = ['category', 'corr']
else:
if dat.shape[0] >= 10:
ken = [kendalltau(dat[a], dat[b])[0], dat.shape[0]]
index = ['corr', 'N']
return pd.Series(ken, index=index)
result = pdf.groupby(['step_id','equip_id']).apply(kendall,'process','label')
result = pd.DataFrame(result).reset_index()
pdf['label'] = pdf.label.astype('int')
result_ = pdf.groupby(['step_id', 'equip_id'])['label'].mean().reset_index()
result = pd.merge(result, result_, on=['step_id', 'equip_id'], how='left')
result.columns = ['step_id', 'equip_id', 'corr', 'N', 'ratio']
return result

Read file to determine the rules

I have an excel file with user defined business rules as below:
Column_Name|Operator|Column_Value1|Operand|RuleID|Result
ABC | Equal| 12| and| 1| 1
CDE | Equal| 10| and| 1| 1
XYZ | Equal| AD| | 1| 1.5
ABC | Equal| 11| and| 2| 1
CDE | Equal| 10| | 2| 1.2
and so on. (just for formatting purpose have put | symbol).
Input file (CSV) will look like below:
ABC,CDE,XYZ
12,10,AD
11,10,AD
Goal here is to derive an output column called Result which needs to be looked up to the user defined business rule excel.
Output Expected:
ABC,CDE,XYZ,Result
12,10,AD,1.5
11,10,AD,1.2
I have so far tried to generate an if statement and trying to assign the entire if/elif statement to a function. So that I can pass it to below statement to apply the rules.
ouput_df['result'] = input_df.apply(result_func, axis=1)
When I have the function with manually coding the rules it works as shown below:
def result_func(input_df):
if (input_df['ABC'] == 12):
return '1.25'
elif (ip_df['ABC'] == 11):
return '0.25'
else:
return '1'
Is this the right way of handling this scenario? If so how do I pass the entire dynamically generated if/elif to the function?
Code
import pandas as pd
import csv
# Load rules table
rules_table = []
with open('rules.csv') as csvfile:
reader = csv.DictReader(csvfile, delimiter='|')
for row in reader:
rules_table.append([x.strip() for x in row.values()])
# Load CSV file into DataFrame
df = pd.read_csv('data.csv', sep=",")
def rules_eval(row, rules):
" Steps through rules table for appropriate value "
def operator_eval(op, col, value):
if op == 'Equal':
return str(row[col]) == str(value)
else:
# Curently only Equal supported
raise ValueError(f"Unsupported Operator Value {op}, only Equal allowed")
prev_rule = '~'
for col, op, val, operand, rule, res in rules:
# loop through rows of rule table
if prev_rule != rule:
# rule ID changed so we can follow rule chains again
ignore_rule = False
if not ignore_rule:
if operator_eval(op, col, val):
if operand != 'and':
return res
else:
# Rule didn't work for an item in group
# ignore subsequent rules with this id
ignore_rule = True
prev_rule = rule
return None
df['results'] = df.apply(lambda row: rules_eval(row, rules_table), axis=1)
print(df)
Output
ABC CDE XYZ results
0 12 10 AD 1.5
1 11 10 AD 1.2
Explanation
df.apply - applies the rules_eval function to each row of the DataFrame.
The output is placed into column 'result' via
df['result'] = ...
Handling Rule Priority
Change
Added a Priority column to the rules_table so rules with the same RuleID are processed in order of priority.
Priority order decided by tuple ordering added to heap, currently
Priority, Column_Name, Operator, Column_Value, Operand, RuleID, Result
Code
import pandas as pd
import csv
from collections import namedtuple
from heapq import (heappush, heappop)
# Load CSV file into DataFrame
df = pd.read_csv('data.csv', sep=",")
class RulesEngine():
###########################################
# Static members
###########################################
# Named tuple for rules
fieldnames = 'Column_Name|Operator|Column_Value1|Operand|RuleID|Priority|Result'
Rule = namedtuple('Rule', fieldnames.replace('|', ' '))
number_fields = fieldnames.count('|') + 1
###########################################
# members
###########################################
def __init__(self, table_file):
# Load rules table
rules_table = []
with open(table_file) as csvfile:
reader = csv.DictReader(csvfile, delimiter='|')
for row in reader:
fields = [self.convert(x.strip()) for x in row.values() if x is not None]
if len(fields) != self.number_fields:
# Incorrect number of values
error = f"Rules require {self.number_fields} fields per row, was given {len(fields)}"
raise ValueError(error)
rules_table.append([self.convert(x.strip()) for x in row.values()])
#rules_table.append([x.strip() for x in row.values()])
self.rules_table = rules_table
def convert(self, s):
" Convert string to (int, float, or leave current value) "
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
return s
def operator_eval(self, row, rule):
" Determines value for a rule "
if rule.Operator == 'Equal':
return str(row[rule.Column_Name]) == str(rule.Column_Value1)
else:
# Curently only Equal supported
error = f"Unsupported Operator {rule.Operator}, only Equal allowed"
raise ValueError(error)
def get_rule_value(self, row, rule_queue):
" Value of a rule or None if no matching rule "
found_match = True
while rule_queue:
priority, rule_to_process = heappop(rule_queue)
if not self.operator_eval(row, rule_to_process):
found_match = False
break
return rule_to_process.Result if found_match else None
def rules_eval(self, row):
" Steps through rules table for appropriate value "
rule_queue = []
for index, r in enumerate(self.rules_table):
# Create named tuple with current rule values
current_rule = self.Rule(*r)
if not rule_queue or \
rule_queue[-1][1].RuleID == current_rule.RuleID:
# note: rule_queue[-1][1].RuleID is previous rule
# Within same rule group or last rule of group
priority = current_rule.Priority
# heap orders rules by pririty
# (lowest numbers are processed first)
heappush(rule_queue, (priority, current_rule))
if index < len(self.rules_table)-1:
continue # not at last rule, so keep accumulating
# Process rules in the rules queue
rule_value = self.get_rule_value(row, rule_queue)
if rule_value:
return rule_value
else:
# Starting over with new rule group
rule_queue = []
priority = current_rule.Priority
heappush(rule_queue, (priority, current_rule))
# Process Final queue if not empty
return self.get_rule_value(row, rule_queue)
# Init rules engine with rules from CSV file
rules_engine = RulesEngine('rules.csv')
df['results'] = df.apply(rules_engine.rules_eval, axis=1)
print(df)
Data Table
ABC,CDE,XYZ
12,10,AD
11,10,AD
12,12,AA
Rules Table
Column_Name|Operator|Column_Value1|Operand|RuleID|Priority|Result
ABC | Equal| 12| and| 1| 2|1
CDE | Equal| 10| and| 1| 1|1
XYZ | Equal| AD| and| 1| 3|1.5
ABC | Equal| 11| and| 2| 1|1
CDE | Equal| 10| foo| 2| 2|1.2
ABC | Equal| 12| foo| 3| 1|1.8
Output
ABC CDE XYZ results
0 12 10 AD 1.5
1 11 10 AD 1.2
2 12 12 AA 1.8

access list from query result on django(python)

DB table
select * from AAA;
id | name | grade
--------------------
1 | john | A
2 | cavin | B
django
grade_list = AAA.objects.all()
for item in grade_list:
print item.name
result ->
john
cavin
============================================
I want to change this code(same function)
grade_list = AAA.objects.all()
print_field = 'name'
for item in grade_list:
print item.( print_field... )
result ->
john
cavin
Q) fill in the blank please. Is it possible?
Yes you can do this with the getattr built-in function.
grade_list = AAA.objects.all()
print_field = 'name'
for item in grade_list:
print getattr(item, print_field)

Categories

Resources