Extract strain at nodes specified in node set [Abaqus python odb access] - python

Given an Abaqus odb-file including a node set (e.g. 'ALL_SECS').
NODAL-quantities like coordinates ('COORD') or displacement ('U') can be extracted at the nodes of the node set by the following pattern:
select step, frame and fieldoutput (e.g. 'COORD', 'U')
getSubset(region=) of the fieldoutput
get attributes of the resulting values
How can INTEGRATION_POINT-quantities be extracted / interpolated at nodes of the node set?
How can fieldoutput at NODAL-position be requested using abaqus-python?
from odbAccess import *
import numpy as np
# Helper function
def values_to_array(values, dim=2, item='data'):
length = len(values)
array = np.zeros((length, dim), dtype='float64')
for index in range(length):
array[index, :] = getattr(values[index], item)
return array
# Prepare and open
odb = openOdb(path='job.odb') # Solution of 2D-plane-stress model
instances = odb.rootAssembly.instances
instance = instances['PART']
sett = instance.nodeSets['ALL_SECS']
step = odb.steps.keys()[-1]
# Get coordinates and number of nodes in node set
frame = odb.steps[step].frames[-1]
values_xy = frame.fieldOutputs['COORD'].getSubset(region=sett).values
xy = values_to_array(values=values_xy, dim=2, item='dataDouble')
nbr_xy = len(values_xy)
print('len(values_xy)')
print(len(values_xy))
# Get nodal-quantity and number of nodes in node set
uvw = np.zeros((nbr_xy, 2), dtype=float)
outp = odb.steps[step].frames[-1].fieldOutputs['U']
values_u = outp.getSubset(region=sett).values
uvw = values_to_array(values=values_u, dim=2, item='dataDouble')
print('len(values_u)')
print(len(values_u))
eps = np.zeros((nbr_xy, 4), dtype=float)
outp = odb.steps[step].frames[-1].fieldOutputs['E']
values_eps = outp.getSubset(position=ELEMENT_NODAL, region=sett).values
# values_eps = outp.getSubset(position=ELEMENT_NODAL).getSubset(region=sett).values
print('len(values_eps)')
print(len(values_eps))
values_eps_nodal = outp.getSubset(position=NODAL, region=sett).values
print('len(values_eps_nodal)')
print(len(values_eps_nodal))
Output:
len(values_xy)
147
len(values_u)
147
len(values_eps)
408
len(values_eps_nodal)
0

The following solution is a workaround to get total strain (Fieldoutput 'E') at nodes, specified in the node set 'ALL_SECS'. As the order of the extracted nodes is not known, location information, i.e. coordinates of the nodes, is extracted as well.
The i-th strain in eps is the strain at the i-th coordinate in xy.
This feature seems not to exist in the Abaqus API.
Node-specific data, like displacements, can easily be extracted, see uv.
Key steps to extract strain data at element nodes and location:
Identify coordinates
Identify mapping nodeLabel -> index
Combine values at nodes, extrapolated from different elements using moving average. (See link for explanations)
Note: 2D model odb
from odbAccess import *
import numpy as np
import pickle
from operator import attrgetter
def values_to_array(values, dim=2, item='data', dtype=np.float64):
'''Thanks to https://stackoverflow.com/a/46925902/8935243'''
array = np.array(
map(attrgetter(item), values),
dtype=dtype,
)
return array
def values_to_index_mapping(values, item='nodeLabel', check=True):
node_labels = values_to_array(values, dim=1, item=item, dtype=np.int64)
if check:
assert len(set(node_labels)) == len(node_labels)
mapping = {}
for index, label in enumerate(node_labels):
mapping[label] = index
return mapping
odb = openOdb(path='job.odb')
instances = odb.rootAssembly.instances
instance = instances['PART']
sett = instance.nodeSets['ALL_SECS']
step = odb.steps.keys()[-1]
# Coordinates
frame = odb.steps[step].frames[-1]
values = frame.fieldOutputs['COORD'].getSubset(region=sett).values
xy = values_to_array(values=values, dim=2, item='data')
# Dimensions
nbr_xy = len(values)
# Mapping: nodeLabel -> index
index_map = values_to_index_mapping(values=values, check=True)
# Displacements
uv = np.zeros((nbr_xy, 2), dtype=float)
outp = odb.steps[step].frames[-1].fieldOutputs['U']
values = outp.getSubset(region=sett).values
uv[:, :] = values_to_array(values=values, dim=2, item='data')
# Strains
eps = np.zeros((nbr_xy, 4), dtype=float)
tmp = np.zeros((nbr_xy, 1), dtype=float)
values_eps = odb.steps[step].frames[-1].fieldOutputs['E'].getSubset(
position=ELEMENT_NODAL,
region=sett,
).values
# Moving average, as ELEMENT_NODAL does no averaging
# and returns multiple values for nodes in sett
for ee in values_eps:
index = index_map[ee.nodeLabel]
tmp[index] += 1
eps[index] = (eps[index] * (tmp[index] - 1) + ee.data) / tmp[index]
odb.close()

Related

ValueError: Expected 2D array, got 1D array instead/ Signal Processing

Can someone help to fix this error: I am a beginner and finding it difficult to figure out how to fix it.
This is the error I am getting :
ValueError: Expected 2D array, got 1D array instead:
array=[ 282 561 837 ... 649442 649701 649957].
Reshape your data either using array.reshape(-1, 1) if your data has a single feature or array.reshape(1, -1) if it contains a single sample.
class MyDataset(Dataset):
def __init__(self, patient_ids,bih2aami=True):
self.patient_ids = patient_ids # list of patients ID
#self.directory=""
self.nb_qrs = 99 #number of beats
self.idx_tuples = flatten([[(patient_idx, rpeak_idx) for rpeak_idx in range(self.nb_qrs)]
for patient_idx in range(len(patient_ids))])
self.bih2aami=bih2aami
def __len__(self):#returns the size of the data set.
return len(self.idx_tuples) # length of the dataset
def __getitem__(self, idx): # get one sample from the dataset
patient_idx, rpeak_idx = self.idx_tuples[idx]
patient_id = self.patient_ids[patient_idx]
file = self.directory + patient_id
signal, normal_qrs_pos = get_signal(file)
# Create a range of windows positions
if (idx//2 == idx/2):
qrs_pos = normal_qrs_pos[rpeak_idx]
else:
qrs_pos = normal_qrs_pos[rpeak_idx] + randint(-round(.25*fs),round(.25*fs))
#win_pos = normal_qrs_pos # FIND CORRECT WIN_POS FOR THIS patient
beat, label = extract_beat(signal,qrs_pos,normal_qrs_pos)
if (label == 1):
print("==== FOUND ONE MATCHING QRS === pos = ", qrs_pos)
else:
print("==== NO MATCH === pos = ", qrs_pos)
X, y = torch.tensor(beat).float(), torch.tensor(label).float()
print(y.size())
return X,y
The code for beat extraction
def extract_beat(signal, win_pos, qrs_positions, win_msec=40, fs=360, start_beat=36, end_beat=108):
"""
win_pos position at which you place the window of your beat
qrs_positions (list) the qrs indices from the annotations (read them from the atr file)-->obtained from annotation.sample
win_msec in milliseconds
"""
#extract signal
signal = np.array(signal)
#print(signal.shape)
#beat_array = np.zeros(start_beat+end_beat)#number of channels
start = int(max(win_pos-start_beat,0))
stop = start+start_beat+end_beat+1
#print(beat_array.shape,signal.shape)
beat = signal[start:stop]
#print(" =========== BEAT = ",len(beat))
#compute the nearest neighbor of win_pos among qrs_positions
tolerance = (fs*win_msec)//1000 #samples at a distance <tolerance are matched
nbr = NearestNeighbors(n_neighbors=1).fit(qrs_positions)
distances, indices = nbr.kneighbors(np.array([[win_pos]]).reshape(-1,1))
#label
if distances[0][0] <= tolerance:
label = 1
else:
label = 0
print(distances[0],tolerance,label)
return beat, label
As sklearn docs says in: https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestNeighbors.html#sklearn.neighbors.NearestNeighbors.fit
You should send a 2d array ( of shape (n_samples, n_features) ) to fit method.
And as your error write you can just reshape the array use:
#compute the nearest neighbor of win_pos among qrs_positions
colerance = (fs*win_msec)//1000 #samples at a distance <tolerance are matched
nbr = NearestNeighbors(n_neighbors=1).fit(qrs_positions.reshape(-1,1))
distances, indices = nbr.kneighbors(np.array([[win_pos]]).reshape(-1,1))

Regridding from one irregular lat lon grid to another irregular lat lon grid

I have two sets of satellite data. For both sets, I have the pixel geometry (latitude and longitude of each corner of the pixel). I would like to regrid one set to the other. Thus, my goal is area-weighted regridding from an irregular grid to another irregular grid. I am aware of xESMF, but am unsure if that is the best tool for the job. Perhaps iris area weighting regrid would be appropriate?
I've ran into similar things in the past. I'm on Windows, and xEMSF wasn't really an option for me.
I've written this package, and added some methods for computing grid to grid weights:
https://github.com/Deltares/numba_celltree
(You can pip install it.)
The data structure can deal with fully unstructured 2D meshes, and expects the data in such a format. See the code below.
You will need to make some changes: your coordinates aren't named x and y most likely. You will also need to update the ugrid2d_topology function somewhat, since I'm assuming regular quadrilateral grids here (but they're irregular when seen in each others coordinate system).
It's still pretty straightforward, just make sure you have 2D array of vertices, and a face_node_connectivity array of shape (n_cell, 4) which maps for every face its four vertices. See this documention for a little more background:
https://ugrid-conventions.github.io/ugrid-conventions/
import numpy as np
import pandas as pd
import pyproj
import xarray as xr
from numba_celltree import CellTree2d
FloatArray = np.ndarray
IntArray = np.ndarray
def _coord(da, dim):
"""
Transform N xarray midpoints into N + 1 vertex edges
"""
delta_dim = "d" + dim # e.g. dx, dy, dz, etc.
# If empty array, return empty
if da[dim].size == 0:
return np.array(())
if delta_dim in da.coords: # equidistant or non-equidistant
dx = da[delta_dim].values
if dx.shape == () or dx.shape == (1,): # scalar -> equidistant
dxs = np.full(da[dim].size, dx)
else: # array -> non-equidistant
dxs = dx
_check_monotonic(dxs, dim)
else: # undefined -> equidistant
if da[dim].size == 1:
raise ValueError(
f"DataArray has size 1 along {dim}, so cellsize must be provided"
" as a coordinate."
)
dxs = np.diff(da[dim].values)
dx = dxs[0]
atolx = abs(1.0e-4 * dx)
if not np.allclose(dxs, dx, atolx):
raise ValueError(
f"DataArray has to be equidistant along {dim}, or cellsizes"
" must be provided as a coordinate."
)
dxs = np.full(da[dim].size, dx)
dxs = np.abs(dxs)
x = da[dim].values
if not da.indexes[dim].is_monotonic_increasing:
x = x[::-1]
dxs = dxs[::-1]
# This assumes the coordinate to be monotonic increasing
x0 = x[0] - 0.5 * dxs[0]
x = np.full(dxs.size + 1, x0)
x[1:] += np.cumsum(dxs)
return x
def _ugrid2d_dataset(
node_x: FloatArray,
node_y: FloatArray,
face_x: FloatArray,
face_y: FloatArray,
face_nodes: IntArray,
) -> xr.Dataset:
ds = xr.Dataset()
ds["mesh2d"] = xr.DataArray(
data=0,
attrs={
"cf_role": "mesh_topology",
"long_name": "Topology data of 2D mesh",
"topology_dimension": 2,
"node_coordinates": "node_x node_y",
"face_node_connectivity": "face_nodes",
"edge_node_connectivity": "edge_nodes",
},
)
ds = ds.assign_coords(
node_x=xr.DataArray(
data=node_x,
dims=["node"],
)
)
ds = ds.assign_coords(
node_y=xr.DataArray(
data=node_y,
dims=["node"],
)
)
ds["face_nodes"] = xr.DataArray(
data=face_nodes,
coords={
"face_x": ("face", face_x),
"face_y": ("face", face_y),
},
dims=["face", "nmax_face"],
attrs={
"cf_role": "face_node_connectivity",
"long_name": "Vertex nodes of mesh faces (counterclockwise)",
"start_index": 0,
"_FillValue": -1,
},
)
ds.attrs = {"Conventions": "CF-1.8 UGRID-1.0"}
return ds
def ugrid2d_topology(data: Union[xr.DataArray, xr.Dataset]) -> xr.Dataset:
"""
Derive the 2D-UGRID quadrilateral mesh topology from a structured DataArray
or Dataset, with (2D-dimensions) "y" and "x".
Parameters
----------
data: Union[xr.DataArray, xr.Dataset]
Structured data from which the "x" and "y" coordinate will be used to
define the UGRID-2D topology.
Returns
-------
ugrid_topology: xr.Dataset
Dataset with the required arrays describing 2D unstructured topology:
node_x, node_y, face_x, face_y, face_nodes (connectivity).
"""
# Transform midpoints into vertices
# These are always returned monotonically increasing
x = data["x"].values
xcoord = _coord(data, "x")
if not data.indexes["x"].is_monotonic_increasing:
xcoord = xcoord[::-1]
y = data["y"].values
ycoord = _coord(data, "y")
if not data.indexes["y"].is_monotonic_increasing:
ycoord = ycoord[::-1]
# Compute all vertices, these are the ugrid nodes
node_y, node_x = (a.ravel() for a in np.meshgrid(ycoord, xcoord, indexing="ij"))
face_y, face_x = (a.ravel() for a in np.meshgrid(y, x, indexing="ij"))
linear_index = np.arange(node_x.size, dtype=np.int32).reshape(
ycoord.size, xcoord.size
)
# Allocate face_node_connectivity
nfaces = (ycoord.size - 1) * (xcoord.size - 1)
face_nodes = np.empty((nfaces, 4))
# Set connectivity in counterclockwise manner
face_nodes[:, 0] = linear_index[:-1, 1:].ravel() # upper right
face_nodes[:, 1] = linear_index[:-1, :-1].ravel() # upper left
face_nodes[:, 2] = linear_index[1:, :-1].ravel() # lower left
face_nodes[:, 3] = linear_index[1:, 1:].ravel() # lower right
# Tie it together
ds = _ugrid2d_dataset(node_x, node_y, face_x, face_y, face_nodes)
return ds
def area_weighted_mean(
da: xr.DataArray,
destination_index: np.ndarray,
source_index: np.ndarray,
weights: np.ndarray,
):
"""
Area weighted mean.
Parameters
----------
da: xr.DataArray
Contains source data.
destination_index: np.ndarray
In which destination the overlap is located.
source_index: np.ndarray
In which source cell the overlap is located.
weights: np.ndarray
Area of each overlap.
Returns
-------
destination_index: np.ndarray
values: np.ndarray
"""
values = da.data.ravel()[source_index]
df = pd.DataFrame(
{"dst": destination_index, "area": weights, "av": weights * values}
)
aggregated = df.groupby("dst").sum("sum", min_count=1)
out = aggregated["av"] / aggregated["area"]
return out.index.values, out.values
class Regridder:
"""
Regridder to reproject and/or regrid rasters. When no ``crs_source`` and
``crs_destination`` are provided, it is assumed that ``source`` and
``destination`` share the same coordinate system.
Note that an area weighted regridding method only makes sense for projected
(Cartesian!) coordinate systems.
Parameters
----------
source: xr.DataArray
Source example. Must have dimensions ("y", "x").
destination: xr.DataArray
Destination example. Must have dimensions ("y", "x").
crs_source: optional, default: None
crs_destination: optional, default: None
"""
def __init__(
self,
source: xr.DataArray,
destination: xr.DataArray,
crs_source=None,
crs_destination=None,
):
src = ugrid2d_topology(source)
dst = ugrid2d_topology(destination)
src_yy = src["node_y"].values
src_xx = src["node_x"].values
if crs_source and crs_destination:
transformer = pyproj.Transformer.from_crs(
crs_from=crs_source, crs_to=crs_destination, always_xy=True
)
src_xx, src_yy = transformer.transform(xx=src_xx, yy=src_yy)
elif crs_source ^ crs_destination:
raise ValueError("Received only one of (crs_source, crs_destination)")
src_vertices = np.column_stack([src_xx, src_yy])
src_faces = src["face_nodes"].values.astype(int)
dst_vertices = np.column_stack((dst["node_x"].values, dst["node_y"].values))
dst_faces = dst["face_nodes"].values
celltree = CellTree2d(src_vertices, src_faces, fill_value=-1)
self.source = source.copy()
self.destination = destination.copy()
(
self.destination_index,
self.source_index,
self.weights,
) = celltree.intersect_faces(
dst_vertices,
dst_faces,
fill_value=-1,
)
def regrid(self, da: xr.DataArray, fill_value=np.nan):
"""
Parameters
----------
da: xr.DataArray
Data to regrid.
fill_value: optional, default: np.nan
Default value of the output grid, e.g. where no overlap occurs.
Returns
-------
regridded: xr.DataArray
Data of da, regridded using an area weighted mean.
"""
src = self.source
if not (np.allclose(da["y"], src["y"]) and np.allclose(da["x"], src["x"])):
raise ValueError("da does not match source")
index, values = area_weighted_mean(
da,
self.destination_index,
self.source_index,
self.weights,
)
data = np.full(self.destination.shape, fill_value)
data.ravel()[index] = values
out = self.destination.copy(data=data)
out.name = da.name
return out
# Example use
da = xr.open_dataarray("gw_abstraction_sum.nc")
like = xr.open_dataarray("example.nc")
regridder = Regridder(
source=da, destination=like, crs_source=4326, crs_destination=3035
)
result = regridder.regrid(da)
result.to_netcdf("area-weighted_sum.nc")

Inverse stationary wavelet transform with pywavelets

I am trying to reconstruct the approximations and details at all levels using the inverse stationary wavelet transform from the by wavelets package in python. My code is the following:
def UDWT(Btotal, wname, Lps, Hps, edge_eff):
Br = Btotal[0]; Bt = Btotal[1]; Bn = Btotal[2]
## Set parameters needed for UDWT
samplelength=len(Br)
# If length of data is odd, turn into even numbered sample by getting rid
# of one point
if np.mod(samplelength,2)>0:
Br = Br[0:-1]
Bt = Bt[0:-1]
Bn = Bn[0:-1]
samplelength = len(Br)
# edge extension mode set to periodic extension by default with this
# routine in the rice toolbox.
pads = 2**(np.ceil(np.log2(abs(samplelength))))-samplelength # for edge extension, This function
# returns 2^{ the next power of 2 }for input: samplelength
## Do the UDWT decompositon and reconstruction
keep_all = {}
for m in range(3):
# Gets the data size up to the next power of 2 due to UDWT restrictions
# Although periodic extension is used for the wavelet edge handling we are
# getting the data up to the next power of 2 here by extending the data
# sample with a constant value
if (m==0):
y = np.pad(Br,pad_width = int(pads/2) ,constant_values=np.nan)
elif (m==1):
y = np.pad(Bt,pad_width = int(pads/2) ,constant_values=np.nan)
else:
y = np.pad(Bn,pad_width = int(pads/2) ,constant_values=np.nan)
# Decompose the signal using the UDWT
nlevel = min(pywt.swt_max_level(y.shape[-1]), 8) # Level of decomposition, impose upper limit 10
Coeff = pywt.swt(y, wname, nlevel) # List of approximation and details coefficients
# pairs in order similar to wavedec function:
# [(cAn, cDn), ..., (cA2, cD2), (cA1, cD1)]
# Assign approx: swa and details: swd to
swa = np.zeros((len(y),nlevel))
swd = np.zeros((len(y),nlevel))
for o in range(nlevel):
swa[:,o] = Coeff[o][0]
swd[:,o] = Coeff[o][1]
# Reconstruct all the approximations and details at all levels
mzero = np.zeros(np.shape(swd))
A = mzero
coeffs_inverse = list(zip(swa.T,mzero.T))
invers_res = pywt.iswt(coeffs_inverse, wname)
D = mzero
for pp in range(nlevel):
swcfs = mzero
swcfs[:,pp] = swd[:,pp]
coeffs_inverse2 = list(zip(np.zeros((len(swa),1)).T , swcfs.T))
D[:,pp] = pywt.iswt(coeffs_inverse2, wname)
for jjj in range(nlevel-1,-1,-1):
if (jjj==nlevel-1):
A[:,jjj] = invers_res
# print(jjj)
else:
A[:,jjj] = A[:,jjj+1] + D[:,jjj+1]
# print(jjj)
# *************************************************************************
# VERY IMPORTANT: LINEAR PHASE SHIFT CORRECTION
# *************************************************************************
# Correct for linear phase shift in wavelet coefficients at each level. No
# need to do this for the low-pass filters approximations as they will be
# reconstructed and the shift will automatically be reversed. The formula
# for the shift has been taken from Walden's paper, or has been made up by
# me (can't exactly remember) -- but it is verified and correct.
# *************************************************************************
for j in range(1,nlevel+1):
shiftfac = Hps*(2**(j-1));
for l in range(1,j):
shiftfac = int(shiftfac + Lps*(2**(l-2))*((l-2)>=0)) ;
swd[:,j-1] = np.roll(swd[:,j-1],shiftfac)
flds = {"A": A.T,
"D": D.T,
"swd" : swd.T,
}
Btot = ['Br', 'Bt', 'Bn'] # Used Just to name files
keep_all[str(Btot[m])] = flds
# 1) Put all the files together into a cell structure
Apr = {}
Swd = {}
pads = int(pads)
names = ['Br', 'Bt', 'Bn']
for kk in range(3):
A = keep_all[names[kk]]['A']
Apr[names[kk]] = A[:,int(pads/2):len(A)-int(pads/2)]
swd = keep_all[names[kk]]['swd']
Swd[names[kk]] = swd[:,int(pads/2):len(A)-int(pads/2)]
# Returns filters list for the current wavelet in the following order
wavelet = pywt.Wavelet(wname)
[h_0,h_1,_,_] = wavelet.inverse_filter_bank
filterlength = len(h_0)
if edge_eff:
# 2) Getting rid of the edge effects; to keep edges skip this section
for j in range(1,nlevel+1):
extra = int((2**(j-2))*filterlength) # give some reasoning for this eq
for m in range(3):
# for approximations
Apr[names[m]][j-1][0:extra] = np.nan
Apr[names[m]][j-1][-extra:-1] = np.nan
# for details
Swd[names[m]][j-1][0:extra] = np.nan
Swd[names[m]][j-1][-extra:-1] = np.nan
return Apr, Swd, pads, nlevel
aa = np.sin(np.linspace(0,2*np.pi,100000))+0.05*np.random.rand(100000)
bb = np.cos(np.linspace(0,2*np.pi,100000))+0.05*np.random.rand(100000)
cc = np.cos(np.linspace(0,4*np.pi,100000))+0.05*np.random.rand(100000)
Btotal = [aa,bb,cc]
wname ='coif2'
Lps = 7; # Low pass filter phase shift for level 1 Coiflet2
Hps = 4; # High pass filter phase shift for level 1 Coiflet2
Apr, Swd, pads, nlevel = UDWT(Btotal, wname, Lps, Hps, edge_eff)
### Add the details at all levels with the highest level approximations
## to compare with the original timeseries. (The equation shown in website)
new = Swd['Br'][0]
for i in range(1,nlevel):
new = Swd['Br'][i]+new
sig = Apr['Br'][-1]+new
### Now plot to comapre ##
## Reconstructed signal 1
plt.plot(sig)
### Second way to get reconstructed signal
### aa first level details with approximations
plt.plot(Apr['Br'][-1] +Swd['Br'][-1] )
### Original signal
plt.plot(aa)
I am trying to follow the procedure described on this website:
http://matlab.izmiran.ru/help/toolbox/wavelet/ch01_i24.html
However, the reconstructed time-series does not seem to match the original exactly. As you can see here:
Any help?

How to delete elements from a numpy array using indecies returned by scipy.spatial.KDTree.query_ball_point method

I am trying to use Kdtree data structure to remove closest points from an array preferablly without for loops.
import sys
import time
import scipy.spatial
class KDTree:
"""
Nearest neighbor search class with KDTree
"""
def __init__(self, data):
# store kd-tree
self.tree = scipy.spatial.cKDTree(data)
def search(self, inp, k=1):
"""
Search NN
inp: input data, single frame or multi frame
"""
if len(inp.shape) >= 2: # multi input
index = []
dist = []
for i in inp.T:
idist, iindex = self.tree.query(i, k=k)
index.append(iindex)
dist.append(idist)
return index, dist
dist, index = self.tree.query(inp, k=k)
return index, dist
def search_in_distance(self, inp, r):
"""
find points with in a distance r
"""
index = self.tree.query_ball_point(inp, r)
return np.asarray(index)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
start = time.time()
fig, ar = plt.subplots()
t = 0
R = 50.0
u = R *np.cos(t)
v = R *np.sin(t)
x = np.linspace(-100,100,51)
y = np.linspace(-100,100,51)
xx, yy = np.meshgrid(x,y)
points =np.vstack((xx.ravel(),yy.ravel())).T
Tree = KDTree(points)
ind = Tree.search_in_distance([u, v],10.0)
ar.scatter(points[:,0],points[:,1],c='k',s=1)
infected = points[ind]
ar.scatter(infected[:,0],infected[:,1],c='r',s=5)
def animate(i):
global R,t,start,points
ar.clear()
u = R *np.cos(t)
v = R *np.sin(t)
ind = Tree.search_in_distance([u, v],10.0)
ar.scatter(points[:,0],points[:,1],c='k',s=1)
infected = points[ind]
ar.scatter(infected[:,0],infected[:,1],c='r',s=5)
#points = np.delete(points,ind)
t+=0.01
end = time.time()
if end - start != 0:
print((end - start), end="\r")
start = end
ani = animation.FuncAnimation(fig, animate, interval=20)
plt.show()
but no matter what i do i can't get np.delete to work with the indecies returned by the ball_query method. What am i missing?
I would like to make the red colored points vanish in each iteration from the points array.
Your points array is a Nx2 matrix. Your ind indices are a list of row indices. What you need is to specify the axis along which you need deletion, ultimately this:
points = np.delete(points,ind,axis=0)
Also, once you delete indices, watch out for missing indices in your next iteration/calculations. Maybe you want to have a copy to delete points and plot and another copy for calculations that you do not delete from it.

In a loop - match 'i' with an index value from a csv (Python/Networkx)

I'm currently trying to draw some edges in Networkx, my nodes have 2 patch properties, position and status which are used in a colonisation simulation algorithm. I've been trying to scale up my simulation which has meant turning away from working out euclidean distances between my nodes (and also away from code that works!).
I have a csv of the row number index of the nearest neighbours of each node, this index corresponding to the row of another csv which has the 3d co-ordinates of the nodes contained. i.e., on the nearest neighbour csv on row 0 may have 3 nearest neighbours on the same row in separate columns so it would be 0, 56, 76 if node 0 had nearest neighbours in node 56 and 76 which would correspond to rows 0, 56 and 76 on the co-ord csv.
I then need to draw edges between these nearest neighbour nodes for my algorithm to play with the nodes. So I have some pseudo-code:
import networkx as nx
import numpy as np
from sklearn.neighbors import BallTree
import csv
from itertools import izip_longest
import pandas as pd
density = 0.14 #Stellar density per cubic parsec
L = 100
Patches = int(0.056*density*L**3+15)
P_init = 0.0001 # Probability that a patch will be occupied at the beginning
Distance = 10
dat = np.random.uniform(low = -1, high = 1, size = (Patches,3)) * L
np.savetxt('nearand1.csv', dat, delimiter = ',')
nearand = np.genfromtxt('nearand1.csv', delimiter = ',',usecols=np.arange(0, 3))
tree = BallTree(nearand, leaf_size=2)
ind = tree.query_radius(nearand, r=10)
df = pd.DataFrame(ind)
df.to_csv('bobbington4.csv',sep='e',index=False, header=False)
xcoord = nearand[:,0]
ycoord = nearand[:,1]
zcoord = nearand[:,2]
bobbington = np.genfromtxt('bobbington4.csv', delimiter = ',', dtype = 'int')
bobbington0 = bobbington[:,0]
bobbington1 = bobbington[:,1]
bobbington2 = bobbington[:,2]
bobbington3 = bobbington[:,3]
bobbington4 = bobbington[:,4]
bobbington5 = bobbington[:,5]
bobbington6 = bobbington[:,6]
bobbington7 = bobbington[:,7]
bobbington8 = bobbington[:,8]
bobbington9 = bobbington[:,9]
bobbington10 = bobbington[:,10]
bobbington11 = bobbington[:,11]
bobbington12 = bobbington[:,12]
bobbington13 = bobbington[:,13]
class patch:
def __init__(self,status=0,pos=(0,0,0)):
self.status = status
self.pos = pos
def __str__(self):
return(str(self.status))
G = nx.Graph()
for i in xrange(Patches):
Stat = 1 if np.random.uniform() < P_init else 0
Pos = (xcoord[i], ycoord[i], zcoord[i])
G.add_node(patch(Stat,Pos))
for i in G.nodes():
for j in G.nodes():
if i.pos where i == bobbington0:
if j.pos where j == bobbington1:
G.add_edge(i,j)
pos = {}
for n in G.nodes():
pos[n] = n.pos
occup = [n.status for n in G]
Time = [0]
Occupancy = [np.sum([n.status for n in G])/float(Patches)]
Here bobbington0 is just a column of node indices going from 0 -> 7854 and bibbington1 is the first nearest neighbour for each of those nodes. What is is the best way to go about this? I'm struggling to find anything on this type of problem but I'm probably wording things poorly.
Thanks in advance for any help you can give me.
I've got it. Not particularly elegant but it works.
for i in G.nodes():
for j in G.nodes():
diff1 = j.boba[0] - i.bubu
if diff1 == 0:
G.add_edge(i, j)

Categories

Resources