I am working on a Jupyter Notebook and I am using the following ipywidget to set a threshold value:
Thr = widgets.IntSlider(value=-17, min=-30, max=-13, step=1, description='Threshold: ', disabled=False, continuous_update=True, orientation='horizontal', readout=True, readout_format='d')
Thr
Next, I am masking a numpy array using that value with:
import numpy.ma as ma
test= ma.masked_less_equal(S_images[0], Thr.value)
And finally I plot the result with:
plt.figure(figsize = (15,15))
plt.imshow(test[0], cmap='gray')
The ipywidget is in a different Jupyter cell than the other code so when I change the value of Thr I have to manually run again the cell where the masking and ploting takes place.
My question is: I have always seen those interactive plots where you change a parameter (in my case the ipywidget Thr) and automatically the plot gets updated.
I see that widgets.IntSlider has a continuous_update parameter which seems to be close to what I want but still cannot get the behaviour I want.
Any idea if this is doable or possible?
_ EDIT _
Starting from the comment of ac24, I am adapting the example he proposes:
from IPython.display import display, clear_output
import ipywidgets as ipy
import matplotlib.pyplot as plt
import numpy as np
# setup figure
n = 10
out = ipy.Output()
# show random mesh
def update(idx):
with out:
clear_output()
fig, ax = plt.subplots(figsize = (5,5))
h = ax.imshow(S_images[0]) # here I put my image
h.set_data(np.ma.masked_less_equal(S_images[0], slider.value)) # here I set the task to masked accordint to the `slider.value`
fig.canvas.flush_events()
fig.canvas.draw()
plt.show()
slider = ipy.IntSlider(min = 0, max = 10, orientation = 'vertical')
widget = ipy.interactive(update, idx = slider)
layout = ipy.Layout(
# display = 'flex',
# flex_flow = 'row',
# justify_content = 'space-between',
# align_items = 'center',
)
widgets = ipy.HBox(children=(slider, out), layout = layout)
display(widgets)
The example works very nice and is just what I was looking for. However, I have a small question regargind the layout. Originally I am working with 3 Images so I would like to have them displayed as follows, each one with its slider next to it to do the task: (the image below is not real, just made it up to represent what I would like)
EDIT 2
in this occasion, the question is, once I select a value in the slider, I would write to geotiff that raster. For this I am using the following code:
with rasterio.open('/Path/20190331_VV_Crop') as src:
ras_meta = src.profile
with rasterio.open('/path/Threshold.tif', 'w', **ras_meta) as dst:
dst.write(X)
However, I am not sure how to reference the numpy array in dst.write(X)
I've adapted the example I gave into a class, as you want to link a specific output and slider instance, but create multiple groups of them. Setting the layout of the output widget avoids the widget resizing all the time as you slide the slider.
from IPython.display import display, clear_output
import ipywidgets as ipy
import matplotlib.pyplot as plt
import numpy as np
# setup figure
n = 10
class SliderAndImage():
# show random mesh
def update(self, idx):
with self.out:
clear_output()
fig, ax = plt.subplots(figsize = (5,5))
h = ax.imshow(np.random.rand(n, n))
h.set_data(np.random.rand(n, n))
fig.canvas.flush_events()
fig.canvas.draw()
plt.show()
def make_slider_and_image(self):
self.out = ipy.Output(layout=ipy.Layout(width='200px', height='200px'))
slider = ipy.IntSlider(min = 0, max = 10, orientation = 'vertical')
widget = ipy.interactive(self.update, idx = slider)
layout = ipy.Layout(
# display = 'flex',
# flex_flow = 'row',
# justify_content = 'space-between',
# align_items = 'center',
)
widgets = ipy.HBox(children=(slider, self.out), layout = layout)
return widgets
children = []
for _ in range(3):
widgets = SliderAndImage()
children.append(widgets.make_slider_and_image())
display(ipy.HBox(children))
Related
In my project, I have many polygons to draw for each time step.
At each step, the number of polygons varies, thus it is difficult to keep Axes.patchs and translate them to make the animation.
I want to create animation with final figures (show after calling matplotlib.pyplot.show()), how to do this?
We take the sin curve as example:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
fig = plt.figure()
ims = []
x = np.linspace(0, 2 * np.pi, 100)
y = np.sin(x)
z = np.cos(x)
for i in range(1,100):
tmpx = x[:i]
tmpy = y[:i]
tmpz = z[:i]
plt.plot(tmpx, tmpz)
im = plt.plot(tmpx, tmpy)
ims.append(im)
ani = animation.ArtistAnimation(fig, ims, interval=200)
ani.save('/home/test.gif', writer='imagemagick')
plt.show()
There are two curves: animated-sin-curve and static-cos-curve.
the sin-curve is kept as Line2D objects for each step
the cos-curve stay static for each step.
In this way, we show different Artist object for each step.
But I want to keep the rasterized Line2D figure for each step.
I find classes of AxesImage/FigureImage, but I don't know how to save the rasterized figure and make them work.
I tried to convert figure.canvas to AxesImage with following code :
def fig2AxesImage(fig):
import PIL.Image as Image
fig.canvas.draw()
w, h = fig.canvas.get_width_height()
buf = numpy.fromstring(fig.canvas.tostring_argb(), dtype=numpy.uint8)
buf.shape = (w, h, 4)
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = numpy.roll(buf, 3, axis=2)
image = Image.frombytes("RGBA", (w, h), buf.tostring())
image = numpy.asarray(image)
return plt.imshow(image, animated=True)
but with this way, I have to clear canvas at start of next frame, which make the final animation a blank video. (but the .jpg figures I output for each step get the right content)
Does anyone have done this before that save rasterized canvas-figures of matplotlib.pyplot.figure() as a animation Vedio?
celluloid for python 2.7
''' copy from celluloid'''
# from typing import Dict, List # not supported by python 2.7. So comment it
from collections import defaultdict
from matplotlib.figure import Figure
from matplotlib.artist import Artist
from matplotlib.animation import ArtistAnimation
__version__ = '0.2.0'
class Camera:
def __init__(self, figure):
self.figure_ = figure
self.offsets_ = { k:defaultdict(int) \
for k in ['collections', 'patches', 'lines', 'texts', 'artists', 'images']
}
self.photos_ = []
def snap(self):
frame_artists = []
for i, axis in enumerate(self.figure_.axes):
if axis.legend_ is not None:
axis.add_artist(axis.legend_)
for name in self.offsets_:
new_artists = getattr(axis, name)[self.offsets_[name][i]:]
frame_artists += new_artists
self.offsets_[name][i] += len(new_artists)
self.photos_.append(frame_artists)
def animate(self):
return ArtistAnimation(self.figure_, self.photos_)
I have a couple of images that show how something changes in time. I visualize them as many images on the same plot with the following code:
import matplotlib.pyplot as plt
import matplotlib.cm as cm
img = [] # some array of images
fig = plt.figure()
for i in xrange(6):
fig.add_subplot(2, 3, i + 1)
plt.imshow(img[i], cmap=cm.Greys_r)
plt.show()
and get something like:
Which is ok, but I would rather animate them to get something like this video. How can I achieve this with python and preferably (not necessarily) with matplotlib
For a future myself, here is what I ended up with:
def generate_video(img):
for i in xrange(len(img)):
plt.imshow(img[i], cmap=cm.Greys_r)
plt.savefig(folder + "/file%02d.png" % i)
os.chdir("your_folder")
subprocess.call([
'ffmpeg', '-framerate', '8', '-i', 'file%02d.png', '-r', '30', '-pix_fmt', 'yuv420p',
'video_name.mp4'
])
for file_name in glob.glob("*.png"):
os.remove(file_name)
Another solution is to use AnimationArtist from matplotlib.animation as described in the animated image demo. Adapting for your example would be
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.animation as animation
img = [] # some array of images
frames = [] # for storing the generated images
fig = plt.figure()
for i in xrange(6):
frames.append([plt.imshow(img[i], cmap=cm.Greys_r,animated=True)])
ani = animation.ArtistAnimation(fig, frames, interval=50, blit=True,
repeat_delay=1000)
# ani.save('movie.mp4')
plt.show()
You could export images from matplotlib using Agg interface.
See those examples:
Agg Buffer to Array
CanvasAgg demo
Here is your full code:
# imports
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import cv2
# Use Agg backend for canvas
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
# create OpenCV video writer
video = cv2.VideoWriter('video.mp4', cv2.VideoWriter_fourcc('A','V','C','1'), 1, (mat.shape[0],mat.shape[1]))
# loop over your images
for i in xrange(len(img)):
fig = plt.figure()
plt.imshow(img[i], cmap=cm.Greys_r)
# put pixel buffer in numpy array
canvas = FigureCanvas(fig)
canvas.draw()
mat = np.array(canvas.renderer._renderer)
mat = cv2.cvtColor(mat, cv2.COLOR_RGB2BGR)
# write frame to video
video.write(mat)
# close video writer
cv2.destroyAllWindows()
video.release()
You can try drawing the images (frames) sequentially with a delay. If you have many frames, it might make sense to reduce the wait time between frames in the plt.pause() function.
# need this line if you're using jupyter notebooks
%matplotlib notebook
x = [] # Some array of images
fig = plt.figure()
viewer = fig.add_subplot(111)
plt.ion() # Turns interactive mode on (probably unnecessary)
fig.show() # Initially shows the figure
for i in range(len(x)):
viewer.clear() # Clears the previous image
viewer.imshow(x[i]) # Loads the new image
plt.pause(.1) # Delay in seconds
fig.canvas.draw() # Draws the image to the screen
You could for example export the images to png using plt.savefig("file%d.png" % i), then use ffmpeg to generate the video.
Here you find help to generate video from images
I implemented a handy script that just suits you and new comers. Try it out here.
For your example:
imagelist = YOUR-IMAGE-LIST
def redraw_fn(f, axes):
img = imagelist[f]
if not redraw_fn.initialized:
redraw_fn.im = axes.imshow(img, animated=True)
redraw_fn.initialized = True
else:
redraw_fn.im.set_array(img)
redraw_fn.initialized = False
videofig(len(imagelist), redraw_fn, play_fps=30)
Here's a copy-pastable function, handy for if you're dealing with long videos and are using a streaming iterator (from here)
from typing import Iterator, Optional, Tuple
from pathlib import Path
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
def write_animation(
itr: Iterator[np.array],
out_file: Path,
dpi: int = 50,
fps: int = 30,
title: str = "Animation",
comment: Optional[str] = None,
writer: str = "ffmpeg",
) -> None:
"""Function that writes an animation from a stream of input tensors.
Args:
itr: The image iterator, yielding images with shape (H, W, C).
out_file: The path to the output file.
dpi: Dots per inch for output image.
fps: Frames per second for the video.
title: Title for the video metadata.
comment: Comment for the video metadata.
writer: The Matplotlib animation writer to use (if you use the
default one, make sure you have `ffmpeg` installed on your
system).
"""
first_img = next(itr)
height, width, _ = first_img.shape
fig, ax = plt.subplots(figsize=(width / dpi, height / dpi))
# Ensures that there's no extra space around the image.
fig.subplots_adjust(
left=0,
bottom=0,
right=1,
top=1,
wspace=None,
hspace=None,
)
# Creates the writer with the given metadata.
Writer = mpl.animation.writers[writer]
metadata = {
"title": title,
"artist": __name__,
"comment": comment,
}
mpl_writer = Writer(
fps=fps,
metadata={k: v for k, v in metadata.items() if v is not None},
)
with mpl_writer.saving(fig, out_file, dpi=dpi):
im = ax.imshow(first_img, interpolation="nearest")
mpl_writer.grab_frame()
for img in itr:
im.set_data(img)
mpl_writer.grab_frame()
Original question:
How do you set the text in a bqplot label using a slider?
I have solved that problem and show my answer below.
New question:
Why is this the behavior? Doesn't it seem crazy for bqplot to expose the text as a list and then not allow item assignment?
I want to replace the label text with something new.
In particular, I want the label to reflect a changing slider value.
After being confused for some time, this is what I have found:
To create a label, the text (and x and y position too) have to be given as a list:
label(text=['my label']).
You can't just pass the string:
label(text='my label').
To update this text, you must replace the whole list:
my_label.text = ['new label'].
You cannot just assign the element:
my_label.text[0] = 'new label'.
Here is code that demonstrates the behavior.
# Imports
import numpy as np
from bqplot import pyplot as plt
from ipywidgets import IntSlider, FloatSlider, Layout
from IPython.display import display
# Make plot and label
xs = np.linspace(0, 10, 100)
ys = np.sin(xs)
layout=Layout(width='40%', height='300px')
fig = plt.figure(layout=layout)
line = plt.plot(xs, ys)
value = 123.45678
lab = plt.label(text=['{:3.3f}'.format(value)], x=[5], y=[0.2], colors=['Red'], align='middle')
# Create and link sliders
good_slider = FloatSlider(description='good')#, min=0, max=100, step=0.001)
bad_slider = FloatSlider(description='bad')#, min=0, max=100, step=0.001)
def good_update_label(change):
lab.text = ['{:3.3f}'.format(change['new'])]
def bad_update_label(change):
lab.text[0] = '{:3.3f}'.format(change['new'])
good_slider.observe(good_update_label, 'value')
bad_slider.observe(bad_update_label, 'value')
# See what happens!
display(good_slider, bad_slider, fig)
I am trying to save an animation with a completely transparent background. Setting:
fig1 = (...,facecolor=(1,1,1,0))
Does not seem to work. Also, just as a side note, if you do that and view the plot then you get these weird transparency effects and lagging animation. Curious why that happens too, but mostly I just want the background to save as transparent.
If I then try:
line_ani.save('lines1.gif', writer='imagemagick',savefig_kwargs={"facecolor": (1,1,1,0)})
Then I get an output which does not have a transparent background and makes the lines thick. Same curiosity as above as why making the figure alpha to 0 would give this effect.
Another attempt:
fig1 = (...,facecolor=(1,1,1,0))
line_ani.save(...,savefig_kwargs={"transparent": None})
Also doesn't produce a transparent background.
If I just include the facecolor in the dictionary, then it gives the undesired line thickening bug.
line_ani.save(...,savefig_kwargs={"transparent": None,"facecolor":(1,1,1,0)})
The code is below.
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import time
from matplotlib.pyplot import figure
def update_line(num, data, line):
line.set_data(data[..., :num])
return line,
def plots():
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.xticks([])
plt.yticks([])
plt.box()
# Since I'm calling things twice, it's convenient to define these
fs = (3,3)
inter = 100
frames = 219
lw = 0.25
alph = 0
fig1 = plt.figure(figsize=fs)
l, = plt.plot([], [],'r',linewidth =lw)
# Generate placeholder for data and set initial conditions
DAT =np.zeros((2,300))
DAT[0][0] = 0
DAT[1][0] = 1
theta=2*np.pi*(1/np.e +0.01)
# 2D Rotation Matrix
def R(x):
return [[np.cos(x),-np.sin(x)],[np.sin(x),np.cos(x)]]
# Generate the data
for i in range(len(DAT[0])):
if i < len(DAT[0])-1:
DAT[0][i+1]=DAT[0][i]*R(theta)[0][0] + DAT[1][i]*R(theta)[0][1]
DAT[1][i+1]=DAT[0][i]*R(theta)[1][0] + DAT[1][i]*R(theta)[1][1]
# Animate the data
plots()
line_ani = animation.FuncAnimation(fig1, update_line, frames, fargs=(DAT, l),
interval=inter, blit=True,repeat_delay = 2000)
plt.show()
# Save the animation
matplotlib.use("Agg")
fig1 = plt.figure(figsize=fs)
l, = plt.plot([], [],'r',linewidth = lw)
plots()
line_ani = animation.FuncAnimation(fig1, update_line, frames, fargs=(DAT, l),
interval=inter, blit=True,repeat_delay = 2000)
print("Saving animation...")
now=time.time()
line_ani.save('lines1.gif', writer='imagemagick',savefig_kwargs={"transparent": None})
later = time.time()
print("Saved in time: ", int(later-now),"seconds")
If you run the code it should show you the animation and then save it. It also will calculate the runtime.
Setting transparent = True does the trick...
line_ani.save('lines1.gif', writer='imagemagick',savefig_kwargs={"transparent": True})
Note: I will be answering this question myself to help other people who come across this problem in the future. Feel free to submit your own answers if you want, but know that it's already answered!
How can I overlay a masked image with one colormap onto another image with a different colormap in Chaco? Also, how can I add colorbars for each of these?
I'm not taking 100% credit for this, with a quick search online i've found that you can do a simple overlay with the code below:
Source where found:
http://docs.enthought.com/chaco/user_manual/containers.html#overlayplotcontainer
Ref Code:
class OverlayImageExample(HasTraits):
plot = Instance(OverlayImage)
traits_view = View(
Item('plot', editor=ComponentEditor(), show_label=False),
width=800, height=600, resizable=True
)
def _plot_default(self):
# Create data
x = linspace(-5, 15.0, 100)
y = jn(3, x)
pd = ArrayPlotData(index=x, value=y)
zoomable_plot = Plot(pd)
zoomable_plot.plot(('index', 'value'),
name='external', color='red', line_width=3)
# Attach tools to the plot
zoom = ZoomTool(component=zoomable_plot,
tool_mode="box", always_on=False)
zoomable_plot.overlays.append(zoom)
zoomable_plot.tools.append(PanTool(zoomable_plot))
# Create a second inset plot, not resizable, not zoom-able
inset_plot = Plot(pd)
inset_plot.plot(('index', 'value'), color='blue')
inset_plot.set(resizable = '',
bounds = [250, 150],
position = [450, 350],
border_visible = True
)
# Create a container and add our plots
container = OverlayPlotContainer()
container.add(zoomable_plot)
container.add(inset_plot)
return container
Overlaying images in this manner in Chaco is not well documented, but definitely possible. Firstly, how do you plot a masked image with chaco? When plotting with Plot().img_plot(), Chaco uses np.nan values as transparent pixels. For example, plotting:
img = np.eye(100)
img[img==0] = np.nan
would plot a diagonal line with a transparent background.
But how do you actually overlay this image on another image?
There are two main methods to do this.
Make two separate plots and stack them using an OverlayPlotContainer
Make one plot and plot them both in the one plot
The advantage to the second method is that both images will use the same axes. Also if you plot a second image in the same plot as the first, it keeps the same pixel aspect ratio. This means that if you plot a 100x100 image and then overlay a 50x50 image on top of it, the overlaying image will only take up 25% of the whole plot starting at (0,0).
There are some problems with the second method, so I will explain how to correct them.
When you plot multiple images on the same Plot object (using img_plot()), they will both use the same color_mapper by default. This means that both will be scaled to the same range. This may not be the required result, so you must create new color_mappers for both images.
Here's some example code with TraitsUI, which was adapted from the Qt code.
from traits.api import HasTraits, Instance
from traitsui.api import Item, View
from enable.api import ComponentEditor
from chaco.api import ArrayPlotData, Plot, ColorBar, LinearMapper, HPlotContainer, DataRange1D, ImageData
import chaco.default_colormaps
#
import numpy as np
class ImagePlot(HasTraits):
plot = Instance(HPlotContainer)
traits_view = View(
Item('plot', editor=ComponentEditor(), show_label=False), width=500, height=500, resizable=True, title="Chaco Plot")
def _plot_default(self):
bottomImage = np.reshape(np.repeat(np.linspace(0, 5, 100),100), (100,100))
topImage = np.eye(50)
topImage = topImage*np.reshape(np.repeat(np.linspace(-2, 2, 50),50), (50,50))
topImage[topImage==0] = np.nan
#
bottomImageData = ImageData()
bottomImageData.set_data(bottomImage)
#
topImageData = ImageData()
topImageData.set_data(topImage)
#
plotData = ArrayPlotData(imgData=bottomImageData, imgData2=topImageData)
plot = Plot(plotData, name='My Plot')
plot.img_plot("imgData")
plot.img_plot("imgData2")
# Note: DO NOT specify a colormap in the img_plot!
plot.aspect_ratio = 1.0
#
bottomRange = DataRange1D()
bottomRange.sources = [plotData.get_data("imgData")]
topRange = DataRange1D()
topRange.sources = [plotData.get_data("imgData2")]
plot.plots['plot0'][0].color_mapper = chaco.default_colormaps.gray(bottomRange)
plot.plots['plot1'][0].color_mapper = chaco.default_colormaps.jet(topRange)
#
colormapperBottom = plot.plots['plot0'][0].color_mapper
colormapperTop = plot.plots['plot1'][0].color_mapper
#
colorbarBottom = ColorBar(index_mapper=LinearMapper(range=colormapperBottom.range), color_mapper=colormapperBottom, orientation='v', resizable='v', width=30, padding=20)
colorbarBottom.padding_top = plot.padding_top
colorbarBottom.padding_bottom = plot.padding_bottom
#
colorbarTop = ColorBar(index_mapper=LinearMapper(range=colormapperTop.range), color_mapper=colormapperTop, orientation='v', resizable='v', width=30, padding=20)
colorbarTop.padding_top = plot.padding_top
colorbarTop.padding_bottom = plot.padding_bottom
#
container = HPlotContainer(resizable = "hv", bgcolor='transparent', fill_padding=True, padding=0)
container.spacing = 0
container.add(plot)
container.add(colorbarBottom)
container.add(colorbarTop)
#
return container
if __name__ == "__main__":
ImagePlot().configure_traits()