my issues is with trying to work with arrays that for each elements is a tuple of 2 values.
specifically the problem is to generate a random 2-dimensional walk of 200 (but for testing say 2) steps with a max distance, then 100 (try just 2) of those walks per stage, and commencing each stage at the largest distance from the origin of the previous stage.
I can successfully generate the array of random steps and get them to return the final position (x,y) value and as well calculate the distance of them from the origin of each walk:
That's defined in these functions:
#............................................................getPositionInteger
def getPosInt(description) :
"""Asks the user to enter a positive integer"""
askAgain = False
while askAgain == False:
try:
posInt = eval(raw_input("\n %s : " %description))
assert 0 < posInt , "Not a positive integer"
assert type(posInt) == int , "Not a positive integer"
askAgain = True
except :
print "Input failed, not a positive integer"
return posInt
#...............................................................initialPosition
def initialPosition() :
"""Initial position of walker at the start of a random walk"""
return (0.0, 0.0)
#......................................................................distance
def distance(posA, posB) :
"""Distance between two positions"""
xi = posA[0] ; yi = posA[1]
xf = posB[0] ; yf = posB[1]
return np.sqrt((xf-xi)**2+(yf-yi)**2)
#..................................................................getPositions
def getPositions(start, nSteps, maxStep):
xArray = maxStep * np.random.random(nSteps+1)* np.cos(2.0 * np.pi * random.random())
yArray = maxStep * np.random.random(nSteps+1)* np.sin(2.0 * np.pi * random.random())
xArray[0] = start[0]
yArray[0] = start[-1]
xArray = np.cumsum(xArray)
yArray = np.cumsum(yArray)
return (xArray[-1], yArray[-1])
But I can't get the array of the final position of each walk per stage in (x,y) form per stage
Here's the main script and where I'm having trouble:
import numpy as np
import matplotlib.pylab as plt
import random
import time
MAX_STEP_SIZE = 0.90 # maximum size of a single step [m]
random.seed(12345)
#..........................................................................main
def main ():
''''''
print "RANDOM WALK IN STAGES IN TWO DIMENSIONS"
userdata = getDetails()
print "\nPlease wait while random walks are generated and analyzed..."
NUM_STAGES = userdata[0]
NUM_WALKS = userdata[1]
NUM_STEPS = userdata[2]
stageStart = initialPosition()
for stage in np.arange(NUM_STAGES):
walks = np.zeros((NUM_WALKS, NUM_WALKS), dtype=np.ndarray)
for walk in walks:
walk = getPositions(stageStart, NUM_STEPS, MAX_STEP_SIZE)
print walk
print walks
You will see I'm having trouble making a an (x,y) style array, where the [0 0] should be [0.0 , 0.0] and its printed twice and additionally, its not changing to the final position.
I really appreciate and help, advice or references you can provide.
Thanks
-Sid
Related
I've rewritten a bit of what was done here in an attempt to not have to use recursion so as to produce the images. While I can get what appears to be the correct string of random functions, I am unable to get the correct output arrays so as to build the image.
You'll notice I've put the xVar function first in the random functions because it will operate on an empty string and give me back values. This is similar to what the original code does except that (by recursion) uses the value 0 to pick out one of three functions that will operate on empty strings. I am thinking that the results are passed back in so that functions such as np.sin will work.
I think the issue might lie in my usage of the identity decorator func(*testlist), perhaps I'm using it incorrectly.
import numpy as np, random
from PIL import Image
width, height = 256,256
xArray = np.linspace(0.0, 1.0, width).reshape((1, width, 1))
yArray = np.linspace(0.0, 1.0, height).reshape((height, 1, 1))
def xVar(): return xArray
def yVar(): return yArray
def safeDivide(a, b): return np.divide(a, np.maximum(b, 0.001))
def add(x,y):
added = np.add(x, y)
return added
def Color():
randColorarray = np.array([random.random(), random.random(), random.random()]).reshape((1, 1, 3))
return randColorarray
# def circle(x,y):
# circles = (x- 100) ** 2 + (y - 100) ** 2
# return circles
functions = (Color, xVar, yVar, np.sin, np.multiply, safeDivide)
depth = 5
def functionArray(depth = 0):
FunctList = []
FunctList.append(xVar)
for x in range(depth):
func = random.choice(functions)
FunctList.append(func)
return FunctList
def ImageBuilder():
FunctionList = functionArray(depth)
testlist = []
for func in FunctionList:
values = func(*testlist)
return values
vals = ImageBuilder()
repetitions = (int(xArray / vals.shape[0]), int(yArray / vals.shape[1]), int(3 / vals.shape[2]))
img = np.tile(vals, repetitions)
# Convert to 8-bit, send to PIL and save
img8Bit = np.uint8(np.rint(img.clip(0.0, 1.0) * 255.0))
Image.fromarray(img8Bit).save('Images/' + '.png', "PNG")
Depending on which random function is chosen, I'll either get
values = func(*testlist)
ValueError: invalid number of arguments
or
TypeError: safeDivide() missing 2 required positional arguments: 'a' and 'b'
Note however that the linked program does not get a safe divide error and both a and b are not being explicitly passed in (as is the same with np.multiply).
Thanks for any help.
im trying to plot the eigenvalues of randomly generated adjacency matrices to obtain what looks like a gaussian distribution, im trying to change and fix the probability that the graphs are generated and plot the largest eigenvalue by its frequency, however im not sure how to do these two, here is my code:
import numpy as np
import random
import matplotlib.pyplot as plt
import scipy.linalg as la
print("Please input the amount of times you want to repeat this: ")
userInput = int(input())
print("This will repeat {} times".format(userInput))
print("--------------------------------------------")
largestEig = []
for x in range(userInput):
n = 3
print("Random number is: {}".format(n))
adjMatrix = np.random.randn(0,2,(n,n))
np.fill_diagonal(adjMatrix, 0)
i_lower = np.tril_indices(n, -1)
adjMatrix[i_lower] = adjMatrix.T[i_lower]
eigvals, eigvecs = la.eig(adjMatrix)
m = max(eigvals)
largestEig.append(m)
print("For {}, M = {}".format(n, m))
print(adjMatrix)
print("---------------------------------------------")
print("The List:")
print(largestEig)
plt.plot(largestEig)
plt.show()
When trying to run your code, I get an error in the line that generates the matrix.
I assume you want to create a square matrix with random values.
In this case you might want to look at np.random.uniform:
n=3
adjMatrix = np.random.uniform(0,10,(n,n))
For the plotting of the distribution, you might want to look at plt.hist():
plt.hist(largestEig, bins=50)
Adding these changes, and removing the input and print statements gives this:
largestEig = []
userInput=10000
for x in range(userInput):
n = 3
adjMatrix = np.random.uniform(0,10,(n,n))
np.fill_diagonal(adjMatrix, 0)
i_lower = np.tril_indices(n, -1)
adjMatrix[i_lower] = adjMatrix.T[i_lower]
eigvals, eigvecs = la.eig(adjMatrix)
m = max(eigvals)
largestEig.append(m)
plt.hist(largestEig, bins=50)
plt.show()
I need to find where a smaller 2d array, array1 matches the closest inside another 2d array, array2.
array1 with have the size of grid_size 46x46 to 96x96.
array2 will be larger (184x184).
I only have access to numpy.
I am currently trying to use the Tversky formula but am not tied to it.
Efficiency is the most important part as this will run many times. My current solution shown below is very slow.
for i in range(array2.shape[0] - grid_size):
for j in range(array2.shape[1] - grid_size):
r[i, j] = np.sum(array2[i:i+grid_size, j:j+grid_size] == array1 ) / (np.sum(array2[i:i+grid_size, j:j+grid_size] != array1 ) + np.sum(Si[i:i+grid_size, j:j+grid_size] == array1 ))
Edit:
The goal is to find the location where a smaller image matches another image.
Here is an FFT/convolution based approach that minimizes Euclidean distance:
import numpy as np
from numpy import fft
N = 184
n = 46
pad = 192
def best_offs(A,a):
A,a = A.astype(float),a.astype(float)
Ap,ap = (np.zeros((pad,pad)) for _ in "Aa")
Ap[:N,:N] = A
ap[:n,:n] = a
sim = fft.irfft2(fft.rfft2(ap).conj()*fft.rfft2(Ap))[:N-n+1,:N-n+1]
Ap[:N,:N] = A*A
ap[:n,:n] = 1
ref = fft.irfft2(fft.rfft2(ap).conj()*fft.rfft2(Ap))[:N-n+1,:N-n+1]
return np.unravel_index((ref-2*sim).argmin(),sim.shape)
# example
# random picture
A = np.random.randint(0,256,(N,N),dtype=np.uint8)
# random offset
offy,offx = np.random.randint(0,N-n+1,2)
# sub pic at random offset
# randomly flip half of the least significant 75% of all bits
a = A[offy:offy+n,offx:offx+n] ^ np.random.randint(0,64,(n,n))
# reconstruct offset
oyrec,oxrec = best_offs(A,a)
assert offy==oyrec and offx==oxrec
# speed?
from timeit import timeit
print(timeit(lambda:best_offs(A,a),number=100)*10,"ms")
# example with zero a
a[...] = 0
# make A smaller in a matching subsquare
A[offy:offy+n,offx:offx+n]>>=1
# reconstruct offset
oyrec,oxrec = best_offs(A,a)
assert offy==oyrec and offx==oxrec
Sample run:
3.458537160186097 ms
I have been working through the book "A student's guide to Python for Physical Modeling" by Jesse M. Kinder & Philip Nelson and there is an exercise where I'm instructed to build a Brownian motion simulator/ random walk simulator and plot it. I don't know why my code is not working and I was hoping I could get some help from you:
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import random as rng
def Brownian_motion(steps):
"""
this is a random walk function
define the number of steps to be taken as a integer value
"""
#these are the random numbers
steps_x = rng(steps)
steps_y = rng(steps)
#Here I change the random numbers to 1 or -1
pace_x = (steps_x < 0.5)
for i in pace_x:
if i == False:
pace_x[i] = -1
else:
pace_x[i] = 1
return pace_x
pace_y = (steps_y < 0.5)
for i in pace_y:
if i == False:
pace_y[i] = -1
else:
pace_x[i] = 1
return pace_y
plt.plot(np.cumsum(pace_x), np.cumsum(pace_y))
plt.show()
Brownian_motion(500)
It does not throw and error but I can't get it to plot
EDIT:
This is similar to what I'm expecting to see:
http://people.sc.fsu.edu/~jburkardt/m_src/random_walk_2d_simulation/walks_1_steps_1000_plot.png
With numpy you can create boolean slices which are more efficient. Note that this does not work with Python Lists/Tuples.
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import random as rng
def Brownian_motion(steps):
"""
this is a random walk function
define the number of steps to be taken as a integer value
"""
#these are the random numbers
steps_x = rng(steps)
steps_y = rng(steps)
pace_x = np.ones_like(steps_x)
idx = steps_x < 0.5
pace_x[idx] = -1
idy = steps_y < 0.5
pace_y = np.ones_like(steps_y)
pace_y[idy] = -1
plt.plot(np.cumsum(pace_x), np.cumsum(pace_y))
# plt.axis("equal")
# I would also add this. This way your plot won't be
# distorted.
plt.show()
a = Brownian_motion(500)
You have unnecessary return statements at the end of your loops, so your code never gets to the plot. Remove those and the Brownian_motion function should have a chance to complete execution.
Try to remove the return from your function, and cast your booleans to integers
%matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import random as rng
def Brownian_motion(steps):
"""
this is a random walk function
define the number of steps to be taken as a integer value
"""
#these are the random numbers
steps_x = rng(steps)
steps_y = rng(steps)
#Here I change the random numbers to 1 or -1
pace_x = (steps_x < 0.5)*1
for i in pace_x:
if i == False:
pace_x[i] = -1
else:
pace_x[i] = 1
#return pace_x
pace_y = (steps_y < 0.5)*1
for i in pace_y:
if i == False:
pace_y[i] = -1
else:
pace_x[i] = 1
#return pace_y
plt.plot(np.cumsum(pace_x), np.cumsum(pace_y))
plt.show()
Brownian_motion(500)
I do not know what a Brownian motion simulator/ random walk simulator is but the problem in your code is that in your function you have a return statement (actually 2) that makes your function stop without executing the the plot.
Commenting it seems to work and it plots something (I do not know it it is what you are expecting).
The code:
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import random as rng
def Brownian_motion(steps):
"""
this is a random walk function
define the number of steps to be taken as a integer value
"""
#these are the random numbers
steps_x = rng(steps)
steps_y = rng(steps)
#Here I change the random numbers to 1 or -1
pace_x = (steps_x < 0.5)
#print(pace_x)
x_list = list()
y_list = list()
for i in pace_x:
if i == False:
#pace_x[i] = -1
x_list.append(-1)
else:
#pace_x[i] = 1
x_list.append(1)
#return pace_x
print("Hello there")
pace_y = (steps_y < 0.5)
for i in pace_y:
if i == False:
#pace_y[i] = -1
y_list.append(-1)
else:
#pace_x[i] = 1
y_list.append(1)
#return pace_y
plt.plot(x_list, y_list)
plt.show()
Brownian_motion(500)
Piece of advice: when something is wrong in Python try to put print function calls in your code to check what you are expecting is correct. For example I put the line print("Hello there") after the return and have seen that it was never executed (now it's commented).
How can i generate a random walk data between a start-end values
while not passing over the maximum value and not going under the minimum value?
Here is my attempt to do this but for some reason sometimes the series goes over the max or under the min values. It seems that the Start and the End value are respected but not the minimum and the maximum value. How can this be fixed? Also i would like to give the standard deviation for the fluctuations but don't know how. I use a randomPerc for fluctuation but this is wrong as i would like to specify the std instead.
import numpy as np
import matplotlib.pyplot as plt
def generateRandomData(length,randomPerc, min,max,start, end):
data_np = (np.random.random(length) - randomPerc).cumsum()
data_np *= (max - min) / (data_np.max() - data_np.min())
data_np += np.linspace(start - data_np[0], end - data_np[-1], len(data_np))
return data_np
randomData=generateRandomData(length = 1000, randomPerc = 0.5, min = 50, max = 100, start = 66, end = 80)
## print values
print("Max Value",randomData.max())
print("Min Value",randomData.min())
print("Start Value",randomData[0])
print("End Value",randomData[-1])
print("Standard deviation",np.std(randomData))
## plot values
plt.figure()
plt.plot(range(randomData.shape[0]), randomData)
plt.show()
plt.close()
Here is a simple loop which checks for series that go under the minimum or over the maximum value. This is exactly what i am trying to avoid. The series should be distributed between the given limits for min and max values.
## generate 1000 series and check if there are any values over the maximum limit or under the minimum limit
for i in range(1000):
randomData = generateRandomData(length = 1000, randomPerc = 0.5, min = 50, max = 100, start = 66, end = 80)
if(randomData.min() < 50):
print(i, "Value Lower than Min limit")
if(randomData.max() > 100):
print(i, "Value Higher than Max limit")
As you impose conditions on your walk, it can not be considered purely random. Anyway, one way is to generate the walk iteratively, and check the boundaries on each iteration. But if you wanted a vectorized solution, here it is:
def bounded_random_walk(length, lower_bound, upper_bound, start, end, std):
assert (lower_bound <= start and lower_bound <= end)
assert (start <= upper_bound and end <= upper_bound)
bounds = upper_bound - lower_bound
rand = (std * (np.random.random(length) - 0.5)).cumsum()
rand_trend = np.linspace(rand[0], rand[-1], length)
rand_deltas = (rand - rand_trend)
rand_deltas /= np.max([1, (rand_deltas.max()-rand_deltas.min())/bounds])
trend_line = np.linspace(start, end, length)
upper_bound_delta = upper_bound - trend_line
lower_bound_delta = lower_bound - trend_line
upper_slips_mask = (rand_deltas-upper_bound_delta) >= 0
upper_deltas = rand_deltas - upper_bound_delta
rand_deltas[upper_slips_mask] = (upper_bound_delta - upper_deltas)[upper_slips_mask]
lower_slips_mask = (lower_bound_delta-rand_deltas) >= 0
lower_deltas = lower_bound_delta - rand_deltas
rand_deltas[lower_slips_mask] = (lower_bound_delta + lower_deltas)[lower_slips_mask]
return trend_line + rand_deltas
randomData = bounded_random_walk(1000, lower_bound=50, upper_bound =100, start=50, end=100, std=10)
You can see it as a solution of geometric problem. The trend_line is connecting your start and end points, and have margins defined by lower_bound and upper_bound. rand is your random walk, rand_trend it's trend line and rand_deltas is it's deviation from the rand trend line. We collocate the trend lines, and want to make sure that deltas don't exceed margins. When rand_deltas exceeds the allowed margin, we "fold" the excess back to the bounds.
At the end you add the resulting random deltas to the start=>end trend line, thus receiving the desired bounded random walk.
The std parameter corresponds to the amount of variance of the random walk.
update : fixed assertions
In this version "std" is not promised to be the "interval".
I noticed you used built in functions as arguments (min and max) which is not reccomended (I changed these to max_1 and min_1). Other than this your code should work as expected:
def generateRandomData(length,randomPerc, min_1,max_1,start, end):
data_np = (np.random.random(length) - randomPerc).cumsum()
data_np *= (max_1 - min_1) / (data_np.max() - data_np.min())
data_np += np.linspace(start - data_np[0], end - data_np[-1],len(data_np))
return data_np
randomData=generateRandomData(1000, 0.5, 50, 100, 66, 80)
If you are willing to modify your code this will work:
import random
for_fill=[]
# generate 1000 samples within the specified range and save them in for_fill
for x in range(1000):
generate_rnd_df=random.uniform(50,100)
for_fill.append(generate_rnd_df)
#set starting and end point manually
for_fill[0]=60
for_fill[999]=80
Here is one way, very crudely expressed in code.
>>> import random
>>> steps = 1000
>>> start = 66
>>> end = 80
>>> step_size = (50,100)
Generate 1,000 steps assured to be within the required range.
>>> crude_walk_steps = [random.uniform(*step_size) for _ in range(steps)]
>>> import numpy as np
Turn these steps into a walk but notice that they fail to meet the requirements.
>>> crude_walk = np.cumsum(crude_walk_steps)
>>> min(crude_walk)
57.099056617839288
>>> max(crude_walk)
75048.948693623403
Calculate a simple linear transformation to scale the steps.
>>> from sympy import *
>>> var('a b')
(a, b)
>>> solve([57.099056617839288*a+b-66,75048.948693623403*a+b-80])
{b: 65.9893403510312, a: 0.000186686954219243}
Scales the steps.
>>> walk = [0.000186686954219243*_+65.9893403510312 for _ in crude_walk]
Verify that the walk now starts and stops where intended.
>>> min(walk)
65.999999999999986
>>> max(walk)
79.999999999999986
You can also generate a stream of random walks and filter out those that do not meet your constraints. Just be aware that by filtering they are not really 'random' anymore.
The code below creates an infinite stream of 'valid' random walks. Be careful with
very tight constraints, the 'next' call might take a while ;).
import itertools
import numpy as np
def make_random_walk(first, last, min_val, max_val, size):
# Generate a sequence of random steps of lenght `size-2`
# that will be taken bewteen the start and stop values.
steps = np.random.normal(size=size-2)
# The walk is the cumsum of those steps
walk = steps.cumsum()
# Performing the walk from the start value gives you your series.
series = walk + first
# Compare the target min and max values with the observed ones.
target_min_max = np.array([min_val, max_val])
observed_min_max = np.array([series.min(), series.max()])
# Calculate the absolute 'overshoot' for min and max values
f = np.array([-1, 1])
overshoot = (observed_min_max*f - target_min_max*f)
# Calculate the scale factor to constrain the walk within the
# target min/max values.
# Don't upscale.
correction_base = [walk.min(), walk.max()][np.argmax(overshoot)]
scale = min(1, (correction_base - overshoot.max()) / correction_base)
# Generate the scaled series
new_steps = steps * scale
new_walk = new_steps.cumsum()
new_series = new_walk + first
# Check the size of the final step necessary to reach the target endpoint.
last_step_size = abs(last - new_series[-1]) # step needed to reach desired end
# Is it larger than the largest previously observed step?
if last_step_size > np.abs(new_steps).max():
# If so, consider this series invalid.
return None
else:
# Else, we found a valid series that meets the constraints.
return np.concatenate((np.array([first]), new_series, np.array([last])))
start = 66
stop = 80
max_val = 100
min_val = 50
size = 1000
# Create an infinite stream of candidate series
candidate_walks = (
(i, make_random_walk(first=start, last=stop, min_val=min_val, max_val=max_val, size=size))
for i in itertools.count()
)
# Filter out the invalid ones.
valid_walks = ((i, w) for i, w in candidate_walks if w is not None)
idx, walk = next(valid_walks) # Get the next valid series
print(
"Walk #{}: min/max({:.2f}/{:.2f})"
.format(idx, walk.min(), walk.max())
)