Excuse me for the length of the title please but this is a pretty specific question. I'm currently simulating a launch of a rocket to mars in the 2022 launch window and I noticed that my rocket is a far distance away from Mars, even though it's traveling in the right direction. After simplifying my code to narrow down the problem, I simply plotted the orbits of the Earth and Mars (Using data from NASA's SPICE library) and propagated the position and velocity given to me by the lambert solver I implemented (Universal variables) to plot the final orbit of the rocket.
I'm only letting the Sun's gravity effect the rocket, not the Earth or Mars, to minimize my problem space. Yet even though I've simplified my problem so far, the intersection between Mars' and my rocket's orbits happens well before the time of flight has been simulated all the way, and the minimum distance between the two bodies is more than a million kilometers at all times.
That being said, something must be wrong but I cannot find the problem. I've made sure the lambert solver code I copied is correct by comparing it to Dario Izzo's method and both gave the same results. Furthermore, I've also checked that my orbit propagator works by propagating Mars' and the Earth's orbits and comparing those ellipses to the data from SPICE.
In conclusion, I assume this must be a stupid little mistake I made somewhere, but cannot find because I lack experience in this field. Thank you for any help! :)
This is the JupyterLab notebook I used:
import numpy as np
import matplotlib.pyplot as plt
import json
import math
import spiceypy as spice
# Physics
G = 6.6741e-11
class Entity:
def __init__(self, x, v, m, do_gravity):
self.x = x
self.v = v
self.a = np.array([0,0,0])
self.m = m
self.do_gravity = do_gravity
def apply_force(self, f):
self.a = np.add(self.a, f / self.m);
def step(self, dt):
self.v = np.add(self.v, self.a * dt)
self.x = np.add(self.x, self.v * dt)
self.a = np.array([0,0,0])
class StaticEntity(Entity):
def __init__(self, body, t, do_gravity):
super().__init__(self.get_state(body, t)[:3], self.get_state(body, t)[3:], self.get_mass(body), do_gravity)
self.body = body
self.t = t
def step(self, dt):
self.t += dt
state = self.get_state(self.body, self.t)
self.x = state[:3]
self.v = state[3:]
#classmethod
def get_state(self, body, t):
[state, lt] = spice.spkezr(body, t, "J2000", "NONE", "SSB")
return state * 1000
#classmethod
def get_mass(self, body):
[dim, gm] = spice.bodvrd(body, "GM", 1)
return gm * 1e9 / G
def get_position(self, t):
return self.get_state(self.body, t)[:3]
def get_velocity(self, t):
return self.get_state(self.body, t)[3:]
class Propagator:
def __init__(self, entities):
self.entities = entities
def step(self, dt):
for e1 in self.entities:
for e2 in self.entities:
if (e1 is e2) or (not e1.do_gravity) or isinstance(e2, StaticEntity):
continue
diff = np.subtract(e1.x, e2.x)
fg = G * e1.m * e2.m / np.dot(diff, diff)
force = fg * diff / np.linalg.norm(diff)
e2.apply_force(force)
for entity in self.entities:
entity.step(dt)
# Lambert solver
def C2(psi):
if psi >= 0.0:
sp = math.sqrt(psi)
return (1 - math.cos(sp)) / psi
else:
sp = math.sqrt(-psi)
return (1 - math.cosh(sp)) / psi
def C3(psi):
if psi >= 0.0:
sp = math.sqrt(psi)
return (sp - math.sin(sp)) / (psi * sp)
else:
sp = math.sqrt(-psi)
return (sp - math.sinh(sp)) / (psi * sp)
def lambert_solve(r1, r2, tof, mu, iterations, tolerance):
R1 = np.linalg.norm(r1)
R2 = np.linalg.norm(r2)
cos_a = np.dot(r1, r2) / (R1 * R2)
A = math.sqrt(R1 * R2 * (1.0 + cos_a))
sqrt_mu = math.sqrt(mu)
if A == 0.0:
return None
psi = 0.0
psi_lower = -4.0 * math.pi * math.pi
psi_upper = 4.0 * math.pi * math.pi
c2 = 1.0 / 2.0
c3 = 1.0 / 6.0
for i in range(iterations):
B = R1 + R2 + A * (psi * c3 - 1.0) / math.sqrt(c2)
if A > 0.0 and B < 0.0:
psi_lower += math.pi
B = -B
chi = math.sqrt(B / c2)
chi3 = chi * chi * chi
tof_new = (chi3 * c3 + A * math.sqrt(B)) / sqrt_mu
if math.fabs(tof_new - tof) < tolerance:
f = 1.0 - B / R1
g = A * math.sqrt(B / mu)
g_dot = 1.0 - B / R2
v1 = (r2 - f * r1) / g
v2 = (g_dot * r2 - r1) / g
return (v1, v2)
if tof_new <= tof:
psi_lower = psi
else:
psi_upper = psi
psi = (psi_lower + psi_upper) * 0.5
c2 = C2(psi)
c3 = C3(psi)
return None
# Set up solar system
spice.furnsh('solar_system.tm')
inject_time = spice.str2et('2022 Sep 28 00:00:00')
exit_time = spice.str2et('2023 Jun 1 00:00:00')
sun = StaticEntity("Sun", inject_time, True)
earth = StaticEntity("Earth", inject_time, False)
mars = StaticEntity("Mars Barycenter", inject_time, False)
(v1, v2) = lambert_solve(earth.get_position(inject_time), mars.get_position(exit_time), exit_time - inject_time, G * sun.m, 1000, 1e-4)
rocket = Entity(earth.x, v1, 100000, False)
propagator = Propagator([sun, earth, mars, rocket])
# Generate data
earth_pos = [[], [], []]
mars_pos = [[], [], []]
rocket_pos = [[], [], []]
t = inject_time
dt = 3600 # seconds
while t < exit_time:
propagator.step(dt)
earth_pos[0].append(earth.x[0])
earth_pos[1].append(earth.x[1])
earth_pos[2].append(earth.x[2])
mars_pos[0].append(mars.x[0])
mars_pos[1].append(mars.x[1])
mars_pos[2].append(mars.x[2])
rocket_pos[0].append(rocket.x[0])
rocket_pos[1].append(rocket.x[1])
rocket_pos[2].append(rocket.x[2])
t += dt
# Plot data
plt.figure()
plt.title('Transfer orbit')
plt.xlabel('x-axis')
plt.ylabel('y-axis')
plt.plot(earth_pos[0], earth_pos[1], color='blue')
plt.plot(mars_pos[0], mars_pos[1], color='orange')
plt.plot(rocket_pos[0], rocket_pos[1], color='green')
EDIT:
I recently remodeled my code so that it uses orbit class to represent the entities. This actually gave me acceptable results, even though the code is, in theory, not doing anything differently (as far as I can tell; obviously something must be different)
def norm(a):
return np.dot(a, a)**0.5
def fabs(a):
return -a if a < 0 else a
def newton_raphson(f, f_dot, x0, n):
res = x0
for i in range(n):
res = res - f(res) / f_dot(res)
return res
def get_ephemeris(body, time):
state, _ = sp.spkezr(body, time, "J2000", "NONE", "SSB")
return np.array(state[:3]) * ap.units.km, np.array(state[3:]) * ap.units.km / ap.units.s
def get_mu(body):
_, mu = sp.bodvrd(body, "GM", 1)
return mu * ap.units.km**3 / ap.units.s**2
class orbit:
def __init__(self, position, velocity, mu):
self.position = position
self.velocity = velocity
self.mu = mu
#staticmethod
def from_body(name, center, time):
return static_orbit(name, center, time)
def get_ephemerides(self, t, dt):
time = 0
positions = []
velocities = []
#M = self.M
position = self.position
velocity = self.velocity
delta_t = dt * ap.units.s
t1 = t * ap.units.s
while time < t1:
g = self.mu / np.dot(position, position)
g_vec = g * -position / norm(position)
velocity = np.add(velocity, g_vec * delta_t)
position = np.add(position, velocity * delta_t)
positions.append(position)
velocities.append(velocity)
time = time + delta_t
return positions, velocities
class static_orbit(orbit):
def __init__(self, name, center, time):
p, v = get_ephemeris(name, time)
pc, vc = get_ephemeris(center, time)
super().__init__(p - pc, v - vc, get_mu(center))
self.name = name
self.center = center
self.time = time
def get_ephemerides(self, t, dt):
time = 0
positions = []
velocities = []
while time < t:
p, v = get_ephemeris(self.name, time + self.time)
pc, vc = get_ephemeris(self.center, time + self.time)
positions.append(p - pc)
velocities.append(v - vc)
time += dt
return positions, velocities
sp.furnsh('solar_system.tm')
t1 = sp.str2et('2022 Sep 28 00:00:00')
t2 = sp.str2et('2023 Jun 10 00:00:00')
eo = orbit.from_body("Earth", "Sun", t1)
mo = orbit.from_body("Mars Barycenter", "Sun", t1)
earth_x, earth_v = eo.get_ephemerides(t2 - t1, 3600)
mars_x, mars_v = mo.get_ephemerides(t2 - t1, 3600)
l = lambert(earth_x[0], mars_x[-1], t2 - t1, get_mu("Sun"), 1000, 1e-6)
ro = orbit(earth_x[0], l.v1, get_mu("Sun"))
rocket_x, rocket_v = ro.get_ephemerides(t2 - t1, 60)
earth_x = np.array(earth_x)
mars_x = np.array(mars_x)
rocket_x = np.array(rocket_x)
fig = go.Figure()
fig.add_trace(go.Scatter3d(x=earth_x[:,0], y=earth_x[:,1], z=earth_x[:,2], marker_size=1, marker_color='blue'))
fig.add_trace(go.Scatter3d(x=mars_x[:,0], y=mars_x[:,1], z=mars_x[:,2], marker_size=1, marker_color='orange'))
fig.add_trace(go.Scatter3d(x=rocket_x[:,0], y=rocket_x[:,1], z=rocket_x[:,2], marker_size=1, marker_color='green'))
fig.show()
This method generated following plot:
Also, before this is mentioned again, I have varied my integration step size and lambert solver tolerance to no avail, the result was qualitatively different.
So, I managed to figure out what the problem was after much head-scratching. I was simply not taking into account that the Sun is not located at (0,0,0) in my coordinate system. I thought this was negligible, but that is what made the difference. In the end, I simply passed the difference between the Earth and Mars's and the Sun's position vectors and passed those into the Lambert solver. This finally gave me the desired results.
The reason that the error ended up being so "small" (It didn't seem like an obvious bug at first) was because my coordinates are centered at the solar system barycenter which is a few million kilometers away from the Sun, as one would expect.
Thanks for the comments!
Here is my code. In the calculateOptimalLambda() function, I am attempting to declare a copy of n and
store it as m, remove one point from m, and make some calculations and a graph. Then, the loop should
restart, make a fresh copy of m, remove the next point, and so on.
However, when in the next iteration
of the loop, a point has been removed. Eventually, I run out of points to remove, and I get an error.
How do I declare a fresh copy of m so I can remove the next point?
import numpy as np
from matplotlib import pyplot as plt
class Data:
def __init__(self, points, sigma, lamda):
self.points = points
self.sigma = sigma
self.sample = np.random.uniform(-1,1, (points, 2))
self.transformedData = np.ones((points, 5))
self.weight = np.zeros((5,1))
self.lamda = lamda
def changeLamda(self,x):
self.lamda = x
def removePoint(self, x):
self.points = self.points - 1
self.sample = np.delete(self.sample, x, 0)
self.transformedData = np.delete(self.transformedData, x, 0)
def transformedFunction(self, x):
transformedData = np.ones((1, 5))
transformedData[0,1] = x
transformedData[0,2] = 0.5 * (3*x**2 -1)
transformedData[0,3]= 0.5 * (5*x**3 - 3*x)
transformedData[0,4] = 0.125 * (35*x**4 -30*x**2 + 3)
return np.dot(transformedData, self.weight)
def setY(self):
for i in range(len(self.sample[0:,0])):
self.sample[i,1] = np.random.normal(0, self.sigma) + self.sample[i,0]**2
def transform(self):
for i in range(len(self.sample[0:,0])):
self.transformedData[i,1] = self.sample[i,0]
self.transformedData[i,2] = 0.5 * (3*self.sample[i,0]**2 -1)
self.transformedData[i,3]= 0.5 * (5*self.sample[i,0]**3 - 3*self.sample[i,0])
self.transformedData[i,4] = 0.125 * (35*self.sample[i,0]**4 -30*self.sample[i,0]**2 + 3)
def calculateWeight(self):
z = n.transformedData
zProd = np.linalg.inv(np.matmul(np.transpose(z), z) + np.identity(5)*self.lamda)
next1 = np.matmul(zProd,np.transpose(z))
a = self.sample[0:,1]
a = a.reshape((-1, 1))
print(a)
self.weight = np.matmul(next1,a)
def calculateError(self):
error= (np.matmul(self.transformedData, self.weight) - self.sample[1,0:])
return error/self.points
def calculateOptimalLambda(n, L):
a = 0
for i in range(len(L)):
n.changeLamda(L[i])
for x in range(n.getPoints()):
a+=1
plt.subplot(4,5,a)
m = n
m.removePoint(x)
m.calculateWeight()
weight = m.getWeight()
error = m.calculateError()
twoD_plot(m)
print(error)
def twoD_plot(n):
t = np.linspace(-1, 1, 400)
x = np.square(t)
plt.plot(t,x,'b')
error = 0
y = x
for i in range(len(t)):
y[i] = n.transformedFunction(t[i])
error += (y[i] - t[i]**2)**2
"""print(error/len(t))"""
plt.plot(t,y,'r')
plt.scatter(n.getSample()[0:,0],n.getSample()[0:,1], c = 'g', marker = 'o')
n = Data(5,0.1,0)
n.setY()
n.transform()
n.calculateWeight()
L = [1, 0.01, 0.00001, 0]
calculateOptimalLambda(n, L)
plt.show()
I am trying to build a logistic regression model for a dataset consisting of two parameters
x1 and x2, but instead of analyzing just the two of them, I have added their squares as well - x12, x22 and x1· x2.
At the first glance everything looks fine and the error function is decreasing, but whilist drawing the plot of the decision boundary I have noticed, that after circa 500 iterations something strange happens to it.
Here is an animation of the error function as a function of iterations and a respective plot of the decision boundary:
Now,I interpret the decision boundary as a quadratic function
x2=f(x1), where
the relation between both parameters is given like this:
0.5 = θ0 + θ1x1 + θ2x2 + θ3x12 + θ4x1x2
+ θ5x22
Here is the python code I use to do everything:
#!/usr/bin/python3
import numpy as np
import matplotlib.pyplot as plt
from math import log
from matplotlib.animation import FuncAnimation
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def loadData(filepath):
source=""
try:
f = open(filepath, "r")
source = f.read()
f.close()
except IOError:
print("Error while reading file (" + filepath + ")")
return ""
raw_data = source.split("\n")
raw_data = [x.split(",") for x in raw_data if x !=""]
raw_data = np.matrix(raw_data).astype(float)
return (raw_data[:,:np.size(raw_data,1)-1], raw_data[:,np.size(raw_data, 1)-1:])
def standardize(dataset, skipfirst=True):
means = np.amin(dataset, 0)
deviation = np.std(dataset, 0)
if skipfirst:
dataset[:,1:] -= means[:,1:]
dataset[:,1:] /= deviation[:,1:]
return dataset
else:
dataset -= means
dataset /= deviation
return dataset
def error(X, Y, Theta):
"Calculates error values"
v_sigm = np.vectorize(sigmoid)
h_x = X # Theta
sigmo = v_sigm(h_x)
partial_vect = (Y-1).T # np.log(1-sigmo) - Y.T # np.log(sigmo)
return 1/(2*np.size(Y, axis=0))*np.sum(partial_vect)
def gradientStep(X, Y, Theta, LR):
"Returns new theta Values"
v_sigm = np.vectorize(sigmoid)
h_x = X # Theta
modif = -1*LR/np.size(Y, 0)*(h_x-Y)
sums = np.sum(modif.T # X, axis = 0)
return Theta + sums.T
X, Y = loadData("ex2data1.txt")
#add bias to X
X = np.append(np.ones((np.size(X, 0), 1)), X, axis=1)
added_params = [[x[1]**2, x[1]*x[2], x[2]**2] for x in np.array(X)]
X = np.append(X, np.matrix(added_params), axis=1)
#standardize X
X = standardize(X)
#create vector of parameters
Theta=np.zeros((np.size(X, 1), 1))
iterations = 3000
Theta_vals = []
Error_vals = []
for i in range(0, iterations):
Theta_vals.append(np.asarray(Theta).flatten())
Error_vals.append(error(X, Y, Theta))
Theta = gradientStep(X, Y, Theta, 0.07)
#CALCULATING FINISHES HERE
#plot data:
fig = plt.figure()
def_ax = fig.add_subplot(211)
def_ax.set_xlim(np.amin(X[:,1:2]), np.amax(X[:,1:2]))
def_ax.set_ylim(np.amin(X[:,2:3]), np.amax(X[:,2:3]))
err_ax = fig.add_subplot(212)
err_ax.set_ylim(0, error(X, Y, Theta))
err_ax.set_xlim(0, iterations)
positive_X1 = []
positive_X2 = []
negative_X1 = []
negative_X2 = []
for i in range(0, np.size(Y, 0)):
if(Y[i, 0] == 1):
positive_X1.append(X[i, 1])
positive_X2.append(X[i, 2])
else:
negative_X1.append(X[i, 1])
negative_X2.append(X[i, 2])
err_ax.set_ylim(np.amin(Error_vals), np.amax(Error_vals))
def animation(frame):
global Theta_vals, Error_vals, def_ax, err_ax, positive_X1, positive_X2, negative_X1, negative_X2
def_limX = def_ax.get_xlim()
def_limY = def_ax.get_ylim()
err_limX = err_ax.get_xlim()
err_limY = err_ax.get_ylim()
def_ax.clear()
err_ax.clear()
def_ax.set_xlim(def_limX)
def_ax.set_ylim(def_limY)
err_ax.set_xlim(err_limX)
err_ax.set_ylim(err_limY)
def_ax.scatter(positive_X1, positive_X2, marker="^")
def_ax.scatter(negative_X1, negative_X2, marker="o")
Theta = Theta_vals[frame]
res_x = np.linspace(*def_ax.get_xlim(), num=5)
delta_x = [(Theta[4]*x+Theta[2])**2-4*Theta[5]*(Theta[3]*x**2+Theta[1]*x+Theta[0]-0.5) for x in res_x]
delta_x = [np.sqrt(x) if x >= 0 else 0 for x in delta_x]
minb = [-(Theta[4]*x+Theta[2]) for x in res_x]
res_1 = []
res_2 = []
for i in range(0, len(res_x)):
if Theta[5] == 0:
res_1.append(0)
res_2.append(0)
else:
res_1.append((minb[i]+delta_x[i])/(2*Theta[5]))
res_2.append((minb[i]-+delta_x[i])/(2*Theta[5]))
def_ax.plot(res_x, res_1)
def_ax.plot(res_x, res_2)
err_x = np.linspace(0, frame, frame)
err_y = Error_vals[0:frame]
err_ax.plot(err_x, err_y)
anim = FuncAnimation(fig, animation, frames=iterations, interval=3, repeat_delay=2000)
print(error(X, Y, Theta))
anim.save("anim.mp4")
What could be the reason of such a strange behaviour?
I am trying to accelerate and/or decelerate a movie clip with the help of Python's moviepy module, but I can't seem to work it out properly. The script runs quite smoothly, and without any errors, but I do not see any effects. Might be my script is wrong and I can't detect the problem. Looking for help/tips from you. I do not need a complete solution, any hints will be of great help. I have been working on this solution for sometime and I think I should post my problem here. Any help, tips, guidance will be greatly appreciated. Thank you.
from moviepy.editor import *
from moviepy.video.tools.drawing import color_split
import os
dir = os.path.split(os.path.realpath(__file__))[0]
img = os.path.join('tmp', 'stock.jpg')
folder = 'tmp'
def f_accel_decel(t, old_d, new_d, abruptness=1, soonness=1.0):
"""
abruptness
negative abruptness (>-1): speed up down up
zero abruptness : no effect
positive abruptness: speed down up down
soonness
for positive abruptness, determines how soon the
speedup occurs (0<soonness < inf)
"""
a = 1.0+abruptness
def _f(t):
f1 = lambda t: (0.5)**(1-a)*(t**a)
f2 = lambda t: (1-f1(1-t))
return (t<.5)*f1(t) + (t>=.5)*f2(t)
return old_d*_f((t/new_d)**soonness)
def accel_decel(clip, new_duration=None, abruptness=1.0, soonness=1.0):
"""
new_duration
If None, will be that of the current clip.
abruptness
negative abruptness (>-1): speed up down up
zero abruptness : no effect
positive abruptness: speed down up down
soonness
for positive abruptness, determines how soon the
speedup occurs (0<soonness < inf)
"""
if new_duration is None:
new_duration = clip.duration
fl = lambda t : f_accel_decel(t, clip.duration, new_duration,
abruptness, soonness)
return clip.fl_time(fl).set_duration(new_duration)
duration = 30
main_clip = ImageClip(img, duration=30)
W,H = main_clip.size
print W,H
clip1 = (main_clip
.subclip(0,duration)
.set_pos(lambda t:(max((0), (int(W-0.5*W*t))), "center"))
)
modifiedClip1 = accel_decel(clip1, abruptness=5, soonness=1.3)
cc = CompositeVideoClip([modifiedClip1], size=(1920,1080), bg_color=(232,54,18)).resize(0.5)
cc.preview(fps=24)
#cc.write_videofile("mask.avi", fps=25, codec="libx264", bitrate="1000K", threads=3)
I think the best way of accelerating and decelerating clip objects is using easing functions.
Some reference sites:
http://easings.net
http://www.gizma.com/easing/
http://gsgd.co.uk/sandbox/jquery/easing/
Here's part of a script I made when trying to understand these functions.
Maybe you can use some of this concepts to solve your issue.
from __future__ import division
from moviepy.editor import TextClip, CompositeVideoClip
import math
def efunc(x0, x1, dur, func='linear', **kwargs):
# Return an easing function.
# It will control a single dimention of the clip movement.
# http://www.gizma.com/easing/
def linear(t):
return c*t/d + b
def out_quad(t):
t = t/d
return -c * t*(t-2) + b
def in_out_sine(t):
return -c/2 * (math.cos(math.pi*t/d) - 1) + b
def in_quint(t):
t = t/d
return c*t*t*t*t*t + b
def in_out_circ(t):
t /= d/2;
if t < 1:
return -c/2 * (math.sqrt(1 - t*t) - 1) + b
t -= 2;
return c/2 * (math.sqrt(1 - t*t) + 1) + b;
def out_bounce(t):
# http://gsgd.co.uk/sandbox/jquery/easing/jquery.easing.1.3.js
t = t/d
if t < 1/2.75:
return c*(7.5625*t*t) + b
elif t < 2/2.75:
t -= 1.5/2.75
return c*(7.5625*t*t + .75) + b
elif t < 2.5/2.75:
t -= 2.25/2.75
return c*(7.5625*t*t + .9375) + b
else:
t -= 2.625/2.75
return c*(7.5625*t*t + .984375) + b
# Kept the (t, b, c, d) notation found everywhere.
b = x0
c = x1 - x0
d = dur
return locals()[func]
def particle(x0, x1, y0, y1, d, func='linear', color='black', **kwargs):
# Dummy clip for testing.
def pos(t):
return efunc(x0, x1, d, func=func)(t), efunc(y0, y1, d, func=func)(t)
return (
TextClip('*', fontsize=80, color=color)
.set_position(pos)
.set_duration(d)
)
# Make a gif to visualize the behaviour of the functions:
easing_functions = [
('linear', 'red'),
('in_out_sine', 'green'),
('in_out_circ', 'violet'),
('out_quad', 'blue'),
('out_bounce', 'brown'),
('in_quint', 'black'),
]
d = 4
x0, x1 = 0, 370
clips = []
for i, (func, c) in enumerate(easing_functions):
y = 40*i
clips.append(particle(x0, x1, y, y, d=d, func=func, color=c))
clips.append(particle(x1, x0, y, y, d=d, func=func, color=c).set_start(d))
clip = CompositeVideoClip(clips, size=(400,250), bg_color=(255,255,255))
clip.write_gif('easing.gif', fps=12)
The output of the script:
I have a need for a Python module/package that provides a mesh on which I can do computational science? I am not doing graphics, so I don't think the blender package is what I want.
Does anyone know of a good package?
The most useful packages out there are perhaps
mshr,
pygalmesh,
dmsh,
pygmsh, and
MeshPy,
meshzoo.
In addition, there is optimesh for improving the quality of any mesh.
(Disclaimer: I'm the author of pygmsh, pygalmesh, dmsh, meshzoo, and optimesh.)
If you're trying to solve FE or CFD style equations on a mesh you can use MeshPy in 2 and 3 dimensions. Meshpy is a nice wrapper around the existing tools tetgen and triangle.
If you're looking for more typical graphics style meshes, there was an interesting talk at PyCon 2011 "Algorithmic Generation of OpenGL Geometry", which described a pragmatic approach to procedural mesh generation. The code from the presentation is available online
If you're interested in reconstruction of surfaces from data, you can't go past the Standford 3D Scanning Repository, home of the Stanford Bunny
Edit:
A dependancy free alternative may be to use something like gmsh, which is platform independent, and uses similar tools to meshpy in its back-end.
I recommend using NumPy (especially if you've used MATLAB before). Many computational scientists / mechanical engineers working in python might agree, but I'm biased as it found it's way into much of the last year of my research. It's part of SciPy: http://numpy.scipy.org/
I was fond of numpy.linspace(a,b,N) which makes an N length vector of equally spaced values from a to b. You can use numpy.ndarray to make a N x M matrix, or if you want 2D arrays use numpy.meshgrid.
Here is code adapted from Kardontchik's port,
import numpy as np
from numpy import pi as pi
from scipy.spatial import Delaunay
import matplotlib.pylab as plt
from scipy.optimize import fmin
import matplotlib.pylab as plt
def ktrimesh(p,bars,pflag=0):
# create the (x,y) data for the plot
xx1 = p[bars[:,0],0]; yy1 = p[bars[:,0],1]
xx2 = p[bars[:,1],0]; yy2 = p[bars[:,1],1]
xmin = np.min(p[:,0])
xmax = np.max(p[:,0])
ymin = np.min(p[:,1])
ymax = np.max(p[:,1])
xmin = xmin - 0.05*(xmax - xmin)
xmax = xmax + 0.05*(xmax - xmin)
ymin = ymin - 0.05*(ymax - ymin)
ymax = ymax + 0.05*(ymax - ymin)
plt.figure()
for i in range(len(xx1)):
xp = np.array([xx1[i],xx2[i]])
yp = np.array([yy1[i],yy2[i]])
plt.plot(xmin,ymin,'.',xmax,ymax,'.',markersize=0.1)
plt.plot(xp,yp,'k')
plt.axis('equal')
if pflag == 0:
stitle = 'Triangular Mesh'
if pflag == 1:
stitle = 'Visual Boundary Integrity Check'
#plt.title('Triangular Mesh')
plt.title(stitle)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
return 1
def ccw_tri(p,t):
"""
orients all the triangles counterclockwise
"""
# vector A from vertex 0 to vertex 1
# vector B from vertex 0 to vertex 2
A01x = p[t[:,1],0] - p[t[:,0],0]
A01y = p[t[:,1],1] - p[t[:,0],1]
B02x = p[t[:,2],0] - p[t[:,0],0]
B02y = p[t[:,2],1] - p[t[:,0],1]
# if vertex 2 lies to the left of vector A the component z of
# their vectorial product A^B is positive
Cz = A01x*B02y - A01y*B02x
a = t[np.where(Cz<0)]
b = t[np.where(Cz>=0)]
a[:,[1,2]] = a[:,[2,1]]
t = np.concatenate((a, b))
return t
def triqual_flag(p,t):
# a(1,0), b(2,0), c(2,1)
a = np.sqrt((p[t[:,1],0] - p[t[:,0],0])**2 + (p[t[:,1],1] - p[t[:,0],1])**2)
b = np.sqrt((p[t[:,2],0] - p[t[:,0],0])**2 + (p[t[:,2],1] - p[t[:,0],1])**2)
c = np.sqrt((p[t[:,2],0] - p[t[:,1],0])**2 + (p[t[:,2],1] - p[t[:,1],1])**2)
A = 0.25*np.sqrt((a+b+c)*(b+c-a)*(a+c-b)*(a+b-c))
R = 0.25*(a*b*c)/A
r = 0.5*np.sqrt( (a+b-c)*(b+c-a)*(a+c-b)/(a+b+c) )
q = 2.0*(r/R)
min_edge = np.minimum(np.minimum(a,b),c)
min_angle_deg = (180.0/np.pi)*np.arcsin(0.5*min_edge/R)
min_q = np.min(q)
min_ang = np.min(min_angle_deg)
return min_q, min_ang
def triqual(p,t,fh,qlim=0.2):
# a(1,0), b(2,0), c(2,1)
a = np.sqrt((p[t[:,1],0] - p[t[:,0],0])**2 + (p[t[:,1],1] - p[t[:,0],1])**2)
b = np.sqrt((p[t[:,2],0] - p[t[:,0],0])**2 + (p[t[:,2],1] - p[t[:,0],1])**2)
c = np.sqrt((p[t[:,2],0] - p[t[:,1],0])**2 + (p[t[:,2],1] - p[t[:,1],1])**2)
A = 0.25*np.sqrt((a+b+c)*(b+c-a)*(a+c-b)*(a+b-c))
R = 0.25*(a*b*c)/A
r = 0.5*np.sqrt( (a+b-c)*(b+c-a)*(a+c-b)/(a+b+c) )
q = 2.0*(r/R)
pmid = (p[t[:,0]] + p[t[:,1]] + p[t[:,2]])/3.0
hmid = fh(pmid)
Ah = A/hmid
Anorm = Ah/np.mean(Ah)
min_edge = np.minimum(np.minimum(a,b),c)
min_angle_deg = (180.0/np.pi)*np.arcsin(0.5*min_edge/R)
plt.figure()
plt.subplot(3,1,1)
plt.hist(q)
plt.title('Histogram;Triangle Statistics:q-factor,Minimum Angle and Area')
plt.subplot(3,1,2)
plt.hist(min_angle_deg)
plt.ylabel('Number of Triangles')
plt.subplot(3,1,3)
plt.hist(Anorm)
plt.xlabel('Note: for equilateral triangles q = 1 and angle = 60 deg')
plt.show()
indq = np.where(q < qlim) # indq is a tuple: len(indq) = 1
if list(indq[0]) != []:
print ('List of triangles with q < %5.3f and the (x,y) location of their nodes' % qlim)
print ('')
print ('q t[i] t[nodes] [x,y][0] [x,y][1] [x,y][2]')
for i in indq[0]:
print ('%.2f %4d [%4d,%4d,%4d] [%+.2f,%+.2f] [%+.2f,%+.2f] [%+.2f,%+.2f]' % \
(q[i],i,t[i,0],t[i,1],t[i,2],p[t[i,0],0],p[t[i,0],1],p[t[i,1],0],p[t[i,1],1],p[t[i,2],0],p[t[i,2],1]))
print ('')
# end of detailed data on worst offenders
return q,min_angle_deg,Anorm
class Circle:
def __init__(self,xc,yc,r):
self.xc, self.yc, self.r = xc, yc, r
def __call__(self,p):
xc, yc, r = self.xc, self.yc, self.r
d = np.sqrt((p[:,0] - xc)**2 + (p[:,1] - yc)**2) - r
return d
class Rectangle:
def __init__(self,x1,x2,y1,y2):
self.x1, self.x2, self.y1, self.y2 = x1,x2,y1,y2
def __call__(self,p):
x1,x2,y1,y2 = self.x1, self.x2, self.y1, self.y2
d1 = p[:,1] - y1 # if p inside d1 > 0
d2 = y2 - p[:,1] # if p inside d2 > 0
d3 = p[:,0] - x1 # if p inside d3 > 0
d4 = x2 - p[:,0] # if p inside d4 > 0
d = -np.minimum(np.minimum(np.minimum(d1,d2),d3),d4)
return d
class Polygon:
def __init__(self,verts):
self.verts = verts
def __call__(self,p):
verts = self.verts
# close the polygon
cverts = np.zeros((len(verts)+1,2))
cverts[0:-1] = verts
cverts[-1] = verts[0]
# initialize
inside = np.zeros(len(p))
dist = np.zeros(len(p))
Cz = np.zeros(len(verts)) # z-components of the vectorial products
dist_to_edge = np.zeros(len(verts))
in_ref = np.ones(len(verts))
# if np.sign(Cz) == in_ref then point is inside
for j in range(len(p)):
Cz = (cverts[1:,0] - cverts[0:-1,0])*(p[j,1] - cverts[0:-1,1]) - \
(cverts[1:,1] - cverts[0:-1,1])*(p[j,0] - cverts[0:-1,0])
dist_to_edge = Cz/np.sqrt( \
(cverts[1:,0] - cverts[0:-1,0])**2 + \
(cverts[1:,1] - cverts[0:-1,1])**2)
inside[j] = int(np.array_equal(np.sign(Cz),in_ref))
dist[j] = (1 - 2*inside[j])*np.min(np.abs(dist_to_edge))
return dist
class Union:
def __init__(self,fd1,fd2):
self.fd1, self.fd2 = fd1, fd2
def __call__(self,p):
fd1,fd2 = self.fd1, self.fd2
d = np.minimum(fd1(p),fd2(p))
return d
class Diff:
def __init__(self,fd1,fd2):
self.fd1, self.fd2 = fd1, fd2
def __call__(self,p):
fd1,fd2 = self.fd1, self.fd2
d = np.maximum(fd1(p),-fd2(p))
return d
class Intersect:
def __init__(self,fd1,fd2):
self.fd1, self.fd2 = fd1, fd2
def __call__(self,p):
fd1,fd2 = self.fd1, self.fd2
d = np.maximum(fd1(p),fd2(p))
return d
class Protate:
def __init__(self,phi):
self.phi = phi
def __call__(self,p):
phi = self.phi
c = np.cos(phi)
s = np.sin(phi)
temp = np.copy(p[:,0])
rp = np.copy(p)
rp[:,0] = c*p[:,0] - s*p[:,1]
rp[:,1] = s*temp + c*p[:,1]
return rp
class Pshift:
def __init__(self,x0,y0):
self.x0, self.y0 = x0,y0
def __call__(self,p):
x0, y0 = self.x0, self.y0
p[:,0] = p[:,0] + x0
p[:,1] = p[:,1] + y0
return p
def Ellipse_dist_to_minimize(t,p,xc,yc,a,b):
x = xc + a*np.cos(t) # coord x of the point on the ellipse
y = yc + b*np.sin(t) # coord y of the point on the ellipse
dist = (p[0] - x)**2 + (p[1] - y)**2
return dist
class Ellipse:
def __init__(self,xc,yc,a,b):
self.xc, self.yc, self.a, self.b = xc, yc, a, b
self.t, self.verts = self.pick_points_on_shape()
def pick_points_on_shape(self):
xc, yc, a, b = self.xc, self.yc, self.a, self.b
c = np.array([xc,yc])
t = np.linspace(0,(7.0/4.0)*pi,8)
verts = np.zeros((8,2))
verts[:,0] = c[0] + a*np.cos(t)
verts[:,1] = c[1] + b*np.sin(t)
return t, verts
def inside_ellipse(self,p):
xc, yc, a, b = self.xc, self.yc, self.a, self.b
c = np.array([xc,yc])
r, phase = self.rect_to_polar(p-c)
r_ellipse = self.rellipse(phase)
in_ref = np.ones(len(p))
inside = 0.5 + 0.5*np.sign(r_ellipse-r)
return inside
def rect_to_polar(self,p):
r = np.sqrt(p[:,0]**2 + p[:,1]**2)
phase = np.arctan2(p[:,1],p[:,0])
# note: np.arctan2(y,x) order; phase in +/- pi (+/- 180deg)
return r, phase
def rellipse(self,phi):
a, b = self.a, self.b
r = a*b/np.sqrt((b*np.cos(phi))**2 + (a*np.sin(phi))**2)
return r
def find_closest_vertex(self,point):
t, verts = self.t, self.verts
dist = np.zeros(len(t))
for i in range(len(t)):
dist[i] = (point[0] - verts[i,0])**2 + (point[1] - verts[i,1])**2
ind = np.argmin(dist)
t0 = t[ind]
return t0
def __call__(self,p):
xc, yc, a, b = self.xc, self.yc, self.a, self.b
t, verts = self.t, self.verts
dist = np.zeros(len(p))
inside = self.inside_ellipse(p)
for j in range(len(p)):
t0 = self.find_closest_vertex(p[j]) # initial guess to minimizer
opt = fmin(Ellipse_dist_to_minimize,t0, \
args=(p[j],xc,yc,a,b),full_output=1,disp=0)
# add full_output=1 so we can retrieve the min dist(squared)
# (2nd argument of opt array, 1st argument is the optimum t)
min_dist = np.sqrt(opt[1])
dist[j] = min_dist*(1 - 2*inside[j])
return dist
def distmesh(fd,fh,h0,xmin,ymin,xmax,ymax,pfix,ttol=0.1,dptol=0.001,Iflag=1,qmin=1.0):
geps = 0.001*h0; deltat = 0.2; Fscale = 1.2
deps = h0 * np.sqrt(np.spacing(1))
random_seed = 17
h0x = h0; h0y = h0*np.sqrt(3)/2 # to obtain equilateral triangles
Nx = int(np.floor((xmax - xmin)/h0x))
Ny = int(np.floor((ymax - ymin)/h0y))
x = np.linspace(xmin,xmax,Nx)
y = np.linspace(ymin,ymax,Ny)
# create the grid in the (x,y) plane
xx,yy = np.meshgrid(x,y)
xx[1::2] = xx[1::2] + h0x/2.0 # shifts even rows by h0x/2
p = np.zeros((np.size(xx),2))
p[:,0] = np.reshape(xx,np.size(xx))
p[:,1] = np.reshape(yy,np.size(yy))
p = np.delete(p,np.where(fd(p) > geps),axis=0)
np.random.seed(random_seed)
r0 = 1.0/fh(p)**2
p = np.concatenate((pfix,p[np.random.rand(len(p))<r0/max(r0),:]))
pold = np.inf
Num_of_Delaunay_triangulations = 0
Num_of_Node_movements = 0 # dp = F*dt
while (1):
Num_of_Node_movements += 1
if Iflag == 1 or Iflag == 3: # Newton flag
print ('Num_of_Node_movements = %3d' % (Num_of_Node_movements))
if np.max(np.sqrt(np.sum((p - pold)**2,axis = 1))) > ttol:
Num_of_Delaunay_triangulations += 1
if Iflag == 1 or Iflag == 3: # Delaunay flag
print ('Num_of_Delaunay_triangulations = %3d' % \
(Num_of_Delaunay_triangulations))
pold = p
tri = Delaunay(p) # instantiate a class
t = tri.vertices
pmid = (p[t[:,0]] + p[t[:,1]] + p[t[:,2]])/3.0
t = t[np.where(fd(pmid) < -geps)]
bars = np.concatenate((t[:,[0,1]],t[:,[0,2]], t[:,[1,2]]))
bars = np.unique(np.sort(bars),axis=0)
if Iflag == 4:
min_q, min_angle_deg = triqual_flag(p,t)
print ('Del iter: %3d, min q = %5.2f, min angle = %3.0f deg' \
% (Num_of_Delaunay_triangulations, min_q, min_angle_deg))
if min_q > qmin:
break
if Iflag == 2 or Iflag == 3:
ktrimesh(p,bars)
# move mesh points based on bar lengths L and forces F
barvec = p[bars[:,0],:] - p[bars[:,1],:]
L = np.sqrt(np.sum(barvec**2,axis=1))
hbars = 0.5*(fh(p[bars[:,0],:]) + fh(p[bars[:,1],:]))
L0 = hbars*Fscale*np.sqrt(np.sum(L**2)/np.sum(hbars**2))
F = np.maximum(L0-L,0)
Fvec = np.column_stack((F,F))*(barvec/np.column_stack((L,L)))
Ftot = np.zeros((len(p),2))
n = len(bars)
for j in range(n):
Ftot[bars[j,0],:] += Fvec[j,:] # the : for the (x,y) components
Ftot[bars[j,1],:] -= Fvec[j,:]
# force = 0 at fixed points, so they do not move:
Ftot[0: len(pfix),:] = 0
# update the node positions
p = p + deltat*Ftot
# bring outside points back to the boundary
d = fd(p); ix = d > 0 # find points outside (d > 0)
dpx = np.column_stack((p[ix,0] + deps,p[ix,1]))
dgradx = (fd(dpx) - d[ix])/deps
dpy = np.column_stack((p[ix,0], p[ix,1] + deps))
dgrady = (fd(dpy) - d[ix])/deps
p[ix,:] = p[ix,:] - np.column_stack((dgradx*d[ix], dgrady*d[ix]))
# termination criterium: all interior nodes move less than dptol:
if max(np.sqrt(np.sum(deltat*Ftot[d<-geps,:]**2,axis=1))/h0) < dptol:
break
final_tri = Delaunay(p) # another instantiation of the class
t = final_tri.vertices
pmid = (p[t[:,0]] + p[t[:,1]] + p[t[:,2]])/3.0
# keep the triangles whose geometrical center is inside the shape
t = t[np.where(fd(pmid) < -geps)]
bars = np.concatenate((t[:,[0,1]],t[:,[0,2]], t[:,[1,2]]))
# delete repeated bars
#bars = unique_rows(np.sort(bars))
bars = np.unique(np.sort(bars),axis=0)
# orient all the triangles counterclockwise (ccw)
t = ccw_tri(p,t)
# graphical output of the current mesh
ktrimesh(p,bars)
triqual(p,t,fh)
return p,t,bars
def boundary_bars(t):
# create the bars (edges) of every triangle
bars = np.concatenate((t[:,[0,1]],t[:,[0,2]], t[:,[1,2]]))
# sort all the bars
data = np.sort(bars)
# find the bars that are not repeated
Delaunay_bars = dict()
for row in data:
row = tuple(row)
if row in Delaunay_bars:
Delaunay_bars[row] += 1
else:
Delaunay_bars[row] = 1
# return the keys of Delaunay_bars whose value is 1 (non-repeated bars)
bbars = []
for key in Delaunay_bars:
if Delaunay_bars[key] == 1:
bbars.append(key)
bbars = np.asarray(bbars)
return bbars
def plot_shapes(xc,yc,r):
# circle for plotting
t_cir = np.linspace(0,2*pi)
x_cir = xc + r*np.cos(t_cir)
y_cir = yc + r*np.sin(t_cir)
plt.figure()
plt.plot(x_cir,y_cir)
plt.grid()
plt.title('Shapes')
plt.xlabel('x')
plt.ylabel('y')
plt.axis('equal')
#plt.show()
return
plt.close('all')
xc = 0; yc = 0; r = 1.0
x1,y1 = -1.0,-2.0
x2,y2 = 2.0,3.0
plot_shapes(xc,yc,r)
xmin = -1.5; ymin = -1.5
xmax = 1.5; ymax = 1.5
h0 = 0.4
pfix = np.zeros((0,2)) # null 2D array, no fixed points provided
fd = Circle(xc,yc,r)
fh = lambda p: np.ones(len(p))
p,t,bars = distmesh(fd,fh,h0,xmin,ymin,xmax,ymax,pfix,Iflag=4)