Can anyone offer any information why this contour-graph gets distorted when the x-values are large? At least in my mind the figure should stay exactly the same, but perhaps I'm misunderstanding something.
The figure when using shift = 0 (or up to shift = 100000):
The figure when using shift = 1000000:
The code:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as ml
shift = 0 # works
# shift = 1000000 # distorts the graph because of ?
x = np.array([7, 3, 3, 2, 6, 7, 2, 7, 2, 7]) + shift
y = np.array([1, 8, 1, 5, 6, 5, 8, 4, 1, 8])
z = np.array([0.5485118, 0.53838622, 0.93582844, 0.13831005, 0.45559201,
0.4032324, 0.73419538, 0.03208198, 0.55323105, 0.95654892])
xi = np.linspace(min(x), max(x), 200)
yi = np.linspace(min(y), max(y), 200)
zi = ml.griddata(x, y, z, xi, yi, interp='linear')
plt.contour(xi, yi, zi, 15, linewidths=0.5, colors='k')
plt.pcolormesh(xi, yi, zi, cmap=plt.get_cmap('rainbow'))
plt.colorbar()
plt.scatter(x, y, marker='o', c='b', s=5, zorder=10)
plt.show()
I'm using matplotlib==1.4.3, numpy==1.9.2, python==2.7.11
In case you're wondering, this code is just a demonstration of the problem. I noticed this odd behaviour when trying to plot a contour with x-axis consisting of timedeltas (in seconds) to Unix UTC epoch, and I'm now trying to figure out the right way to deal with it.
Thanks!
Related
I have some unstructured 2D data that I would like to interpolate on a unit offset grid (ie grid indices start at 1 not 0) using scipy and plot using matplotlib. The code is below
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate
# X Y Z
data = [[ 9, 2, 2.0],
[ 3, 3, 5.0],
[ 6, 4, 1.0],
[ 2, 6, 3.0],
[10, 7, 4.5],
[ 5, 8, 2.0]]
data = np.array(data)
coords = data[:, 0:2]
zvals = data[:, 2]
# Create the grid on which to interpolate (unit offset)
nx = 10
ny = 10
x = np.arange(nx)
x += 1
y = np.arange(ny)
y += 1
grid_x, grid_y = np.meshgrid(x, y, indexing='xy')
# Interpolate
grid_z1 = scipy.interpolate.griddata(coords, zvals, (grid_x, grid_y), method='linear')
# Plot the results
fig, axs = plt.subplots()
plt.imshow(grid_z1)
plt.plot(coords[:,0], coords[:,1], 'k.', ms=10)
plt.show()
The point data seem to be in the right place but matplotlib seems to be plotting the gridded data as zero-offset not unit-offset. I am obviously missing something - just not sure what. Thanks in advance!
Update
fig, axs = plt.subplots()
plt.imshow(grid_z1, origin='lower', extent=[1, 10, 1, 10])
plt.plot(coords[:,0], coords[:,1], 'k.', ms=10)
plt.show()
IIUC, you want to define xlim and ylim equal to the plot and not the heatmap:
plt.plot(coords[:,0], coords[:,1], 'k.', ms=10)
ax = plt.gca()
xlim, ylim = ax.get_xlim(), ax.get_ylim()
plt.imshow(grid_z1)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
Output:
I want to code a program to generate an array with coordinates to follow for drawing a shape like the white here, given are the blue points. Does anyone know how to do something like that or at least can give me a tip?
You could use e.g. InterpolatedUnivariateSpline to interpolate the points. As these spline functions are usually 1D, you could calculate x and y positions separately, depending on a new variable t going from 0 to 1.
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
# positions of the given points
px = [1, 4, 3, 2, 5]
py = [1, 3, 4, 3, 1]
# 5 t-values, at t=0 in point 1, at t=1 reaching point 5
pt = np.linspace(0, 1, len(px))
# sx and sy are functions that interpolate the points at the given t-values
sx = interpolate.InterpolatedUnivariateSpline(pt, px)
sy = interpolate.InterpolatedUnivariateSpline(pt, py)
# calculate many intermediate values
t = np.linspace(0, 1, 500)
x = sx(t)
y = sy(t)
# show the original points together with the spline
fig, ax = plt.subplots(facecolor='black')
ax.axis('off')
plt.scatter(px, py, s=80, color='skyblue')
plt.plot(x, y, color='white')
for i, (xi, yi) in enumerate(zip(px, py), start=1):
ax.text(xi, yi, f'\n {i}', ha='left', va='center', size=30, color='yellow')
plt.show()
In Python matplotlib, how can you get the line in a line or step plot to display a gradient based on the y-value?
Example plot (made in Tableau):
Code for step plot with a line that changes gradient according to x-value, adapted from this answer:
fig, ax = plt.subplots(figsize=(10, 4))
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
y = [2, 3, 9, 10, 2, 9, 0, 1, 9, 1, -8]
T = np.linspace(0,1,np.size(x))**2
s = 1
for i in range(0, len(x)-s, s):
ax.step(x[i:i+s+1], y[i:i+s+1], marker='.', color=(0.0,0.5,T[i]))
ax.tick_params(axis='both', colors='lightgray', labelsize=8)
The following code is inspired by the multicolored-line example from the matplotlib docs. First the horizontal line segments are drawn and colored using their y-value. The vertical segments are subdivided in small chunks to colored individually.
vmin of the norm is set a bit lower to avoid the too-light range of the colormap.
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
import numpy as np
x = np.arange(50)
y = np.random.randint(-3, 4, x.size).cumsum()
fig, ax = plt.subplots()
norm = plt.Normalize(y.min() - y.ptp() * .2, y.max())
cmap = 'inferno_r' # 'Reds'
horizontal_lines = np.array([x[:-1], y[:-1], x[1:], y[:-1]]).T.reshape(-1, 2, 2)
hor_lc = LineCollection(horizontal_lines, cmap=cmap, norm=norm)
hor_lc.set_array(y[:-1])
ax.add_collection(hor_lc)
factor = 10
long_y0 = np.linspace(y[:-1], y[1:], factor)[:-1, :].T.ravel()
long_y1 = np.linspace(y[:-1], y[1:], factor)[1:, :].T.ravel()
long_x = np.repeat(x[1:], factor - 1)
vertical_lines = np.array([long_x, long_y0, long_x, long_y1]).T.reshape(-1, 2, 2)
ver_lc = LineCollection(vertical_lines, cmap=cmap, norm=norm)
ver_lc.set_array((long_y0 + long_y1) / 2)
ax.add_collection(ver_lc)
ax.scatter(x, y, c=y, cmap=cmap, norm=norm)
plt.autoscale() # needed in case the scatter plot would be omited
plt.show()
Here is another example, with a black background. In this case the darkest part of the colormap is avoided. The changed code parts are:
y = np.random.randint(-9, 10, x.size)
ax.patch.set_color('black')
norm = plt.Normalize(y.min(), y.max() + y.ptp() * .2)
cmap = 'plasma_r'
Here is an example with a TwoSlopeNorm and the blue-white-red colormap:
from matplotlib.colors import TwoSlopeNorm
y = np.random.uniform(-1, 1, x.size * 10).cumsum()[::10]
y = (y - y.min()) / y.ptp() * 15 - 5
norm = TwoSlopeNorm(vmin=-5, vcenter=0, vmax=10)
cmap = 'bwr'
How can I interpolate a hysteresis loop at specific x points? Multiple related questions/answers are available on SOF regarding B-spline interpolation using scipy.interpolate.splprep (other questions here or here). However, I have hundreds of hysteresis loops at very similar (but not exactly same) x positions and I would like to perform B-spline interpolation on all of them at specific x coordinates.
Taking a previous example:
import numpy as np
from scipy import interpolate
from matplotlib import pyplot as plt
x = np.array([23, 24, 24, 25, 25])
y = np.array([13, 12, 13, 12, 13])
# append the starting x,y coordinates
x = np.r_[x, x[0]]
y = np.r_[y, y[0]]
# fit splines to x=f(u) and y=g(u), treating both as periodic. also note that s=0
# is needed in order to force the spline fit to pass through all the input points.
tck, u = interpolate.splprep([x, y], s=0, per=True)
# evaluate the spline fits for 1000 evenly spaced distance values
xi, yi = interpolate.splev(np.linspace(0, 1, 1000), tck)
# plot the result
fig, ax = plt.subplots(1, 1)
ax.plot(x, y, 'or')
ax.plot(xi, yi, '-b')
plt.show()
Is it possible to provide specific x values to interpolate.splev? I get unexpected results:
x2, y2 = interpolate.splev(np.linspace(start=23, stop=25, num=30), tck)
fig, ax = plt.subplots(1, 1)
ax.plot(x, y, 'or')
ax.plot(x2, y2, '-b')
plt.show()
The b-spline gives x and y positions for a given u (between 0 and 1).
Getting y positions for a given x position involves solving for the inverse. As there can be many y's corresponding to one x (in the given example there are places with 4 y's, for example at x=24).
A simple way to get a list of (x,y)'s for x between two limits, is to create a filter:
import numpy as np
from scipy import interpolate
from matplotlib import pyplot as plt
x = np.array([23, 24, 24, 25, 25])
y = np.array([13, 12, 13, 12, 13])
# append the starting x,y coordinates
x = np.r_[x, x[0]]
y = np.r_[y, y[0]]
tck, u = interpolate.splprep([x, y], s=0, per=True)
# evaluate the spline fits for 1000 evenly spaced distance values
xi, yi = interpolate.splev(np.linspace(0, 1, 1000), tck)
# plot the result
fig, ax = plt.subplots(1, 1)
ax.plot(x, y, 'or')
ax.plot(xi, yi, '-b')
filter = (xi >= 24) & (xi <= 25)
x2 = xi[filter]
y2 = yi[filter]
ax.scatter(x2, y2, color='c')
plt.show()
I have a pandas DataFrame with non-uniformly spaced data points given by an x, y and z column, where x and y are pairs of variables and z is the dependent variable. For example:
import matplotlib.pyplot as plt
from matploblib.mlab import griddata
import numpy as np
import pandas as pd
df = pd.DataFrame({'x':[0, 0, 1, 1, 3, 3, 3, 4, 4, 4],
'y':[0, 1, 0, 1, 0.2, 0.7, 1.4, 0.2, 1.4, 2],
'z':[50, 40, 40, 30, 30, 30, 20, 20, 20, 10]})
x = df['x']
y = df['y']
z = df['z']
I want to do a contour plot of the dependent variable z over x and y. For this, I create a new grid to interpolate the data on using matplotlib.mlab's griddata function.
xi = np.linspace(x.min(), x.max(), 100)
yi = np.linspace(y.min(), y.max(), 100)
z_grid = griddata(x, y, z, xi, yi, interp='linear')
plt.contourf(xi, yi, z_grid, 15)
plt.scatter(x, y, color='k') # The original data points
plt.show()
While this works, the output is not what I want. I do not want griddata to interpolate outside of the boundaries given by the min and max values of the x and y data. The following plots are what shows up after calling plt.show(), and then highlighted in purple what area of the data I want to have interpolated and contoured. The contour outside the purple line is supposed to be blank. How could I go about masking the outlying data?
The linked question does unfortunately not answer my question, as I don't have a clear mathematical way to define the conditions on which to do a triangulation. Is it possible to define a condition to mask the data based on the data alone, taking the above Dataframe as an example?
As seen in the answer to this question one may introduce a condition to mask the values.
The sentence from the question
"I do not want griddata to interpolate outside of the boundaries given by the min and max values of the x and y data." implies that there is some min/max condition present, which can be used.
Should that not be the case, one may clip the contour using a path. The points of this path need to be specified as there is no generic way of knowing which points should be the edges. The code below does this for three different possible paths.
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from matplotlib.mlab import griddata
import numpy as np
import pandas as pd
df = pd.DataFrame({'x':[0, 0, 1, 1, 3, 3, 3, 4, 4, 4],
'y':[0, 1, 0, 1, 0.2, 0.7, 1.4, 0.2, 1.4, 2],
'z':[50, 40, 40, 30, 30, 30, 20, 20, 20, 10]})
x = df['x']
y = df['y']
z = df['z']
xi = np.linspace(x.min(), x.max(), 100)
yi = np.linspace(y.min(), y.max(), 100)
z_grid = griddata(x, y, z, xi, yi, interp='linear')
clipindex = [ [0,2,4,7,8,9,6,3,1,0],
[0,2,4,7,5,8,9,6,3,1,0],
[0,2,4,7,8,9,6,5,3,1,0]]
fig, axes = plt.subplots(ncols=3, sharey=True)
for i, ax in enumerate(axes):
cont = ax.contourf(xi, yi, z_grid, 15)
ax.scatter(x, y, color='k') # The original data points
ax.plot(x[clipindex[i]], y[clipindex[i]], color="crimson")
clippath = Path(np.c_[x[clipindex[i]], y[clipindex[i]]])
patch = PathPatch(clippath, facecolor='none')
ax.add_patch(patch)
for c in cont.collections:
c.set_clip_path(patch)
plt.show()
Ernest's answer is a great solution, but very slow for lots of contours. Instead of clipping every one of them, I built a mask by constructing the complement polygon of the desired clipping mask.
Here is the code based on Ernest's accepted answer:
import numpy as np
import pandas as pd
import matplotlib.tri as tri
import matplotlib.pyplot as plt
from descartes import PolygonPatch
from shapely.geometry import Polygon
df = pd.DataFrame({'x':[0, 0, 1, 1, 3, 3, 3, 4, 4, 4],
'y':[0, 1, 0, 1, 0.2, 0.7, 1.4, 0.2, 1.4, 2],
'z':[50, 40, 40, 30, 30, 30, 20, 20, 20, 10]})
points = df[['x', 'y']]
values = df[['z']]
xi = np.linspace(points.x.min(), points.x.max(), 100)
yi = np.linspace(points.y.min(), points.y.max(), 100)
triang = tri.Triangulation(points.x, points.y)
interpolator = tri.LinearTriInterpolator(triang, values.z)
Xi, Yi = np.meshgrid(xi, yi)
zi = interpolator(Xi, Yi)
clipindex = [ [0,2,4,7,8,9,6,3,1,0],
[0,2,4,7,5,8,9,6,3,1,0],
[0,2,4,7,8,9,6,5,3,1,0]]
fig, axes = plt.subplots(ncols=3, sharey=True, figsize=(10,4))
for i, ax in enumerate(axes):
ax.set_xlim(-0.5, 4.5)
ax.set_ylim(-0.2, 2.2)
xlim = ax.get_xlim()
ylim = ax.get_ylim()
cont = ax.contourf(Xi, Yi, zi, 15)
ax.scatter(points.x, points.y, color='k', zorder=2) # The original data points
ax.plot(points.x[clipindex[i]], points.y[clipindex[i]], color="crimson", zorder=1)
#### 'Universe polygon':
ext_bound = Polygon([(xlim[0], ylim[0]), (xlim[0], ylim[1]), (xlim[1], ylim[1]), (xlim[1], ylim[0]), (xlim[0], ylim[0])])
#### Clipping mask as polygon:
inner_bound = Polygon([ (row.x, row.y) for idx, row in points.iloc[clipindex[i]].iterrows() ])
#### Mask as the symmetric difference of both polygons:
mask = ext_bound.symmetric_difference(inner_bound)
ax.add_patch(PolygonPatch(mask, facecolor='white', zorder=1, edgecolor='white'))
plt.show()