component object has no attribute 'ln_solver' - python

I'm building a large new OpenMDAO component. When I run it, OpenMDAO crashes with AttributeError: 'myNewComponent' object has no attribute 'ln_solver' during the setup stage. What does this message mean?
import numpy as np
from openmdao.api import Group, Component, Problem, IndepVarComp, ParallelGroup
from openmdao.api import ScipyOptimizer
from openmdao.core.mpi_wrap import MPI
if MPI:
from openmdao.core.petsc_impl import PetscImpl as impl
else:
from openmdao.api import BasicImpl as impl
class WindSEComp(Component):
def __init__(self, nTurbs, rotor_diameter):
super(WindSEComp, self).__init__()
self.add_param('turbineX', val=np.ones(nTurbs), units='m', desc='x positions of turbines in original ref. frame')
self.add_output('AEP', shape=1)
def solve_nonlinear(self, params, unknowns, resids):
mx_opt = params['turbineX']
unknowns['AEP'] = np.sum(mx_opt)
def linearize(self, params, unknowns, resids):
mx_opt = params['turbineX']
J = {}
J['AEP', 'turbineX'] = 3 * mx_opt
return J
prob = Problem(impl=impl, root=WindSEComp(nTurbs=4, rotor_diameter=126.0))
#prob.driver = ScipyOptimizer()
#prob.driver.add_desvar('turbineX')
#prob.driver.add_objective('AEP')
prob.setup()
prob.run()

You're trying to use a component like a group: these are not the same. You want to do something like this:
top = Problem()
root = top.root = Group()
root.add('g', WindSEComp(nTurbs=4, rotor_diameter=126.0))
top.setup()
top.run()

Related

Python HDBScan class always fails on second iteration before even entering first function

I am attempting to look at conglomerated outlier information, utilizing several different SKLearn, HDBScan, and custom outlier detection classes. However, for some reason I am consistently running into an error where any class utilizing HDBScan cannot be iterated over. All other Sklearn and Custom classes can. The issue I am getting seems to consistently occur on the second pass of the HDBScan class and instantly happens upon algorithm.fit(tmp). Upon debugging the script, it looks like the error is thrown before even getting to the first line of the Class.
Any help? Below is the minimum viable reproduction:
import numpy as np
import pandas as pd
import hdbscan
from sklearn.datasets import make_blobs
from sklearn.svm import OneClassSVM
from sklearn.ensemble import IsolationForest
from sklearn.covariance import EllipticEnvelope
class DBClass():
def __init__(self, random = None):
self.random = random
def fit(self, data):
self.train_data = data
cluster = hdbscan.HDBSCAN()
cluster.fit(self.train_data)
self.fit = cluster
def predict(self, data):
self.predict_data = data
if self.train_data.equals(self.predict_data):
return self.fit.probabilities_
def OutlierEnsemble(df, anomaly_algorithms = None, num_slices = 5, num_columns = 7, outliers_fraction = 0.05):
if isinstance(df, np.ndarray):
df = pd.DataFrame(df)
assert isinstance(df, pd.DataFrame)
if not anomaly_algorithms:
anomaly_algorithms = [
("Robust covariance",
EllipticEnvelope(contamination=outliers_fraction)),
("One-Class SVM",
OneClassSVM(nu=outliers_fraction,
kernel="rbf")),
("Isolation Forest",
IsolationForest(contamination=outliers_fraction)),
("HDBScan LOF",
DBClass()),
]
data = []
for i in range(1, num_slices + 1):
data.append(df.sample(n = num_columns, axis = 1, replace = False))
predictions = []
names = []
for tmp in data:
counter = 0
for name, algorithm in anomaly_algorithms:
algorithm.fit(tmp)
predictions.append(algorithm.predict(tmp))
counter += 1
names.append(f"{name}{counter}")
return predictions
blobs, labels = make_blobs(n_samples=3000, n_features=12)
OutlierEnsemble(blobs)
The error provided is not the most helpful.
Traceback (most recent call last):
File "<ipython-input-4-e1d4b63cfccd>", line 75, in <module>
OutlierEnsemble(blobs)
File "<ipython-input-4-e1d4b63cfccd>", line 66, in OutlierEnsemble
algorithm.fit(tmp)
TypeError: 'HDBSCAN' object is not callable
In your DBClass.fit, DBClass.fit is unintentionally redefined.
You could perhaps use something like,
class DBClass():
def __init__(self, random = None):
self.random = random
def fit(self, data):
self.train_data = data
cluster = hdbscan.HDBSCAN()
cluster.fit(self.train_data)
self.myfit = cluster # save calculated cluster
def predict(self, data):
self.predict_data = data
if self.train_data.equals(self.predict_data):
return self.myfit.probabilities_ # use calculated cluster

Python unknown operand type - for custom class

I have a custom class in my Python code, that handles k-means clustering. The class takes some arguments to customize the clustering, however when subtracting two values from a list passed to the class, I get the following error:
Traceback (most recent call last):
File "/home/dev/PycharmProjects/KMeans/KMeansApplication.py", line 22, in <module>
application()
File "/home/dev/PycharmProjects/KMeans/KMeansApplication.py", line 16, in application
opt_num_clusters = cluster_calculator.calculate_optimum_clusters()
File "/home/dev/PycharmProjects/KMeans/ClusterCalculator.py", line 19, in calculate_optimum_clusters
self.init_opt_line()
File "/home/dev/PycharmProjects/KMeans/ClusterCalculator.py", line 33, in init_opt_line
self. m = (self.sum_squared_dist[0] - self.sum_squared_dist[1]) / (1 - self.calc_border)
TypeError: unsupported operand type(s) for -: 'KMeans' and 'KMeans'
Here is the code of my custom class:
import KMeansClusterer
from math import sqrt, fabs
from matplotlib import pyplot as plp
class ClusterCalculator:
m = 0
b = 0
sum_squared_dist = []
derivates = []
distances = []
line_coordinates = []
def __init__(self, calc_border, data):
self.calc_border = calc_border
self.data = data
def calculate_optimum_clusters(self):
self.calculate_squared_dist()
self.init_opt_line()
self.calc_distances()
self.calc_line_coordinates()
opt_clusters = self.get_optimum_clusters()
print("Evaluated", opt_clusters, "as optimum number of clusters")
return opt_clusters
def calculate_squared_dist(self):
for k in range(1, self.calc_border):
kmeans = KMeansClusterer.KMeansClusterer(k, self.data)
self.sum_squared_dist.append(kmeans.calc_custom_params(self.data, k))
def init_opt_line(self):
#here the error is thrown
self. m = (self.sum_squared_dist[0] - self.sum_squared_dist[1]) / (1 - self.calc_border)
self.b = (1 * self.sum_squared_dist[0] - self.calc_border*self.sum_squared_dist[0]) / (1 - self.calc_border)
def calc_y_value(self, x_calc):
return self.m * x_calc + self.b
def calc_line_coordinates(self):
for i in range(1, self.calc_border):
self.line_coordinates.append(self.calc_y_value(i))
def calc_distances(self):
for i in range(1, self.calc_border):
self.distances.append(sqrt(fabs(self.calc_y_value(i))))
print("For border", self.calc_border, ", calculated the following distances: \n", self.distances)
def get_optimum_clusters(self):
return self.distances.index((max(self.distances)))
def plot_results(self):
plp.plot(range(1, self.calc_border), self.sum_squared_dist, "bx-")
plp.plot(range(1, self.calc_border), self.line_coordinates, "bx-")
plp.xlabel("Number of clusters")
plp.ylabel("Sum of squared distances")
plp.show()
I append the KMeansClusterer as well, because sum_squared_dist is filled with values of there:
from sklearn.cluster import KMeans
from matplotlib import pyplot as plp
class KMeansClusterer:
def __init__(self, clusters, data):
self.clusters = clusters
self.data = data
def cluster(self):
kmeans = KMeans(n_clusters=self.cluster(), random_state=0).fit(self.data)
print("Clustered", len(kmeans.labels_), "GTINs")
for i, cluster_center in enumerate(kmeans.cluster_centers_):
plp.plot(cluster_center, label="Center {0}".format(i))
plp.legend(loc="best")
plp.show()
def calc_custom_params(self, data_frame, clusters):
kmeans = KMeans(n_clusters=clusters, random_state=0).fit(data_frame)
return kmeans
def cluster_without_plot(self):
return KMeans(n_clusters=self.cluster(), random_state=0).fit(self.data)
I cannot imagine why '-' should be unsupported, I trie to subtract two list values of type integer and 1 and a integer variable.
Python cannot automatically subtract classes. You need to implement the __sub__ method on your class for python to know how to handle subtracting these classes. You can find the full reference here https://docs.python.org/3/library/operator.html
KMeans.fit() returns a class instance, which implies calc_custom_params() returns a class instance, so your list sum_squared_dist does not contain integers, the elements are objects of the sklearn.cluster.KMeans class.

what is the use of .data in pytorch

I just got the code from https://github.com/heykeetae/Self-Attention-GAN (the file is spectral.py). Partial code is under there. I don't really understand what is the use of the .data, is this a method in some class? if it is, which class does it belong to?
import torch
from torch.optim.optimizer import Optimizer, required
from torch.autograd import Variable
import torch.nn.functional as F
from torch import nn
from torch import Tensor
from torch.nn import Parameter
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
class SpectralNorm(nn.Module):
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2normalize(u.data)
v.data = l2normalize(v.data)
w_bar = Parameter(w.data)
OK so SpectralNorm.__init__ sets self.module = module and self.name = name (default: weight) which is a constructor argument. This seems to be called like so SpectralNorm(nn.Conv2d(3, conv_dim, 4, 2, 1))) so module is an nn.Conv2d instance which subclasses nn.Module -- following the trail we finally find the answer

'numpy.ndarray' object has no attribute 'fitness'

I have this code for nsga3(evolutionary algorithm) but I get the error 'numpy.ndarray' object has no attribute 'fitness'.Generates reference points for NSGA-III selection. This code is based onjMetal NSGA-III implementation <https://github.com/jMetal/jMetal>_. Please help to remove this error
import copy
import random
import numpy as np
from deap import tools
class ReferencePoint(list): # A reference point exists in objective space an has a set of individuals associated with it
def __init__(self, *args):
list.__init__(self, *args)
self.associations_count = 0
self.associations = []
def generate_reference_points(num_objs, num_divisions_per_obj):
def gen_refs_recursive(work_point, num_objs, left, total, depth):
if depth == num_objs - 1:
work_point[depth] = left/total
ref = ReferencePoint(copy.deepcopy(work_point))
return [ref]
else:
res = []
for i in range(left):
work_point[depth] = i/total
res = res + gen_refs_recursive(work_point, num_objs, left-i, total, depth+1)
return res
print(gen_refs_recursive([0]*num_objs, num_objs, num_objs*num_divisions_per_obj,
num_objs*num_divisions_per_obj, 0))
def find_ideal_point(individuals):
'Finds the ideal point from a set individuals.'
current_ideal = [np.infty] * len(individuals[0].fitness.values) # Here th error is coming
for ind in individuals:
# Use wvalues to accomodate for maximization and minimization problems.
current_ideal = np.minimum(current_ideal,
np.multiply(ind.fitness.wvalues, -1))
print("Ideal POint is\n",current_ideal)
global individulas
individulas=np.random.rand(10,4)
generate_reference_points(2, 4)
find_ideal_point(individulas)
You can check how to prepare an input to find_ideal_point in this jupyter notebook. The implementation deals with records from deap.tools.Logbook which is "evolution records as a chronological list of dictionaries" not NumPy arrays.

AlwaysError when running a testbench on a synchronizer

I encountered this error when running a testbench, together with a synchronizer built on two existing D-FFs.
File "/home/runner/design.py", line 28, in Sync
#always_seq(clk.posedge, reset=reset)
File "/usr/share/myhdl-0.8/lib/python/myhdl/_always_seq.py", line 76, in _always_seq_decorator
raise AlwaysSeqError(_error.ArgType)
myhdl.AlwaysError: decorated object should be a classic (non-generator) function
My testbench is outlined as follows
from myhdl import *
from random import randrange
HALF_PERIOD = delay(10) ### This makes a 20-ns clock signal
ACTIVE_HIGH = 1
G_DELAY = delay(15)
def Main():
### Signal declaration
clk, d, dout = [Signal(intbv(0)) for i in range(3)]
reset = ResetSignal(1,active=ACTIVE_HIGH,async=True)
### Module Instantiation
S1 = Sync(dout, d, clk,reset)
### Clk generator
#always(HALF_PERIOD)
def ClkGen():
clk.next = not clk
### TB def
#instance
def Driver():
yield(HALF_PERIOD)
reset.next = 0
for i in range(4):
yield(G_DELAY)
d.next = not d
raise StopSimulation
return ClkGen, Driver, S1
m1 = traceSignals(Main)
sim = Simulation(m1)
sim.run()
And my synchronizer is coded as follows.
from myhdl import *
from DFF import *
def Sync(dout,din,clk,reset):
""" The module consists of two FFs with one internal signal
External signals
dout : output
din : input
clk : input
Internal signal:
F2F : output-to-input signal that connects two FFs together
"""
### Connectivity
F2F = Signal(intbv(0))
F1 = DFF(F2F,din,clk,reset)
F2 = DFF(dout,F2F,clk,reset)
### Function
#always_seq(clk.posedge,reset=reset)
def SyncLogic():
if reset:
F2F.next = 0
dout.next = 0
else:
F2F.next = din
yield(WIRE_DELAY)
dout.next = F2F
return SyncLogic
and the FF prototype is coded as follows.
from myhdl import *
def DFF(dout,din,clk,reset):
#always_seq(clk.posedge, reset=reset)
def Flogic():
if reset:
dout.next = 0
else:
dout.next = din
return Flogic
The testbench did work with the similar testbench I coded earlier(with slight modification), but it didn't work when combining two modules together. Please clarify. Thank you.
To model a wire delay, use the "delay" argument in the Signal.
change
#always_seq(clk.posedge,reset=reset)
def SyncLogic():
if reset:
F2F.next = 0
dout.next = 0
else:
F2F.next = din
yield(WIRE_DELAY)
dout.next = F2F
return SyncLogic
to:
dout = Signal(<type>, delay=WIRE_DELAY)
# ...
#always_seq(clk.posedge, reset=reset)
def synclogic():
dout.next = din
With the "always_seq" don't define the reset (it is automatically added). If you want to explicitly define the reset use "#always(clock.posedge, reset.negedge)".

Categories

Resources