I am currently trying to save a mayavi animation generated by my simulation, so I don't have to rerun the code each time to see it.
plt = points3d(x_coord, y_coord, z_coord)
msplt = plt.mlab_source
#mlab.animate(delay=100)
def anim():
f = mlab.gcf()
while True:
#animation updates here
msplt.set(x = x_coord, y = y_coord, z = z_coord)
yield
anim()
mlab.savefig(filename = 'ani.mp4')
mlab.show()
I have tried saving it through the pipleline editor and just get a still of the frame it is on, and mlab.savefig doesn't generate a file. Any help appreciated.
The following will will work for both viewing the animation, saving each frame as a 'png', and then converting them to a movie, BUT it is perhaps fastest in this case to forgo playing the animation, and just cycle through the data saving figures, and then using this method to make a video.
from mayavi import mlab
import numpy as np
import os
# Output path for you animation images
out_path = './'
out_path = os.path.abspath(out_path)
fps = 20
prefix = 'ani'
ext = '.png'
# Produce some nice data.
n_mer, n_long = 6, 11
pi = np.pi
dphi = pi/1000.0
phi = np.arange(0.0, 2*pi + 0.5*dphi, dphi, 'd')
mu = phi*n_mer
x = np.cos(mu)*(1+np.cos(n_long*mu/n_mer)*0.5)
y = np.sin(mu)*(1+np.cos(n_long*mu/n_mer)*0.5)
z = np.sin(n_long*mu/n_mer)*0.5
# Init plot
plt = mlab.points3d(x[0], y[0], z[0])
padding = len(str(len(x)))
# Define data source and update routine
msplt = plt.mlab_source
#mlab.animate(delay=10)
def anim():
f = mlab.gcf()
for i in range(len(x)):
#animation updates here
msplt.set(x=x[i], y=y[i], z=z[i])
# create zeros for padding index positions for organization
zeros = '0'*(padding - len(str(i)))
# concate filename with zero padded index number as suffix
filename = os.path.join(out_path, '{}_{}{}{}'.format(prefix, zeros, i, ext))
mlab.savefig(filename=filename)
yield
anim()
mlab.view(distance=15)
mlab.show()
import subprocess
ffmpeg_fname = os.path.join(out_path, '{}_%0{}d{}'.format(prefix, padding, ext))
cmd = 'ffmpeg -f image2 -r {} -i {} -vcodec mpeg4 -y {}.mp4'.format(fps,
ffmpeg_fname,
prefix)
print cmd
subprocess.check_output(['bash','-c', cmd])
# Remove temp image files with extension
[os.remove(f) for f in os.listdir(out_path) if f.endswith(ext)]
Instead of saving the images to disk and then stitching them together it's also possible to pipe them directly to ffmpeg using the python-ffmpeg package.
import ffmpeg
# Set up the figure
width = 200
height = 200
mlab.options.offscreen = True # Stops the view window popping up and makes sure you get the correct size screenshots.
fig = mlab.figure(size=(width, height))
# ... set up the scene ...
# Define update function
def update_scene(idx):
# -- update the scene
return
# Initialise ffmpeg process
output_args = {
'pix_fmt': 'yuv444p',
'vcodec': 'libx264',
'r': 25,
}
process = (
ffmpeg
.input('pipe:', format='rawvideo', pix_fmt='rgb24', s=f'{width}x{height}')
.output('animation.mp4', **output_args)
.overwrite_output()
.run_async(pipe_stdin=True)
)
fig.scene._lift() # Throws an error without this.
for i in range(100):
update_scene(i)
screenshot = mlab.screenshot(mode='rgb', antialiased=True)
frame = Image.fromarray(screenshot, 'RGB')
process.stdin.write(frame.tobytes())
# Flush video
process.stdin.close()
process.wait()
Related
Why is my code slowing down as time goes on? When the code initially starts the loop takes ~.1 second to run. By the 90th pass I am at 1+ second. I am displaying and saving the image. I noticed if I comment out fig.canvas.draw() and fig.canvas.flush_events() the code runs as expected. The issue is with displaying and updating the interactive window. Is there a variable I need to clear?
from flirpy.camera.lepton import Lepton
import matplotlib.pyplot as plt
import numpy as np
import cv2
from PIL import Image as im
import os, sys
import time
%matplotlib notebook
savefold = ('Therm')
if not os.path.exists(savefold):
os.makedirs(savefold)
camera = Lepton()
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
def getFrame():
image = camera.grab()
fimage = np.around((image/100 - 273.15) * 9 / 5 + 32, 2)
cimage = np.around(image/100 - 273.15, 2)
kimage = np.around(image/100, 2)
cimage = np.where(cimage < 20, 20, cimage)
cimage = np.where(cimage > 30, 30, cimage)
getFrame.z = cimage
return getFrame.z
getFrame.z = None
i = 0
while True:
tic = time.perf_counter()
npimage=getFrame()
npimage=npimage
data = im.fromarray(npimage)
plt.imshow(data)
fig.canvas.draw()
fig.canvas.flush_events()
plt.imsave(os.path.join(savefold, f'test_{i:05}.png'), data)
i += 1
toc = time.perf_counter()
print(f'Time to execute: {toc - tic:0.4f} seconds')
camera.close()
Because you are adding one image for each iteration: all the previous images are still in matplotlib buffer, and they get rendered!
You need to clear the buffer. Try to use this line of code before the imshow command:
plt.gca().images.clear()
I have two csv files that i am uploading from an ec2 instance to the s3 bucket along with a few other files. All the other files are being uploaded just fine but my csv files, though it is uploaded, there seems ot be no data inside it even though the local copy of the file on the instance is showing the data. im not sure why its saying 0 bytes on the bucket.
the csv file is part of another larger program. here is the code.
from boto3.session import Session
import botocore
import boto3
import zipfile
import darknet
import os
import cv2
import glob
import csv
import numpy as np
global lat_start, lon_start
import shutil
#HELPER FUNCTION DEFINITIONS
ACCESS_KEY = '*********'
SECRET_KEY = '******D'
def image_detection(image_path, network, class_names, class_colors, thresh):
# Darknet doesn't accept numpy images.
# Create one with image we reuse for each detect
width = darknet.network_width(network)
height = darknet.network_height(network)
darknet_image = darknet.make_image(width, height, 3)
image = cv2.imread(image_path)
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_resized = cv2.resize(image_rgb, (width, height),interpolation=cv2.INTER_LINEAR)
darknet.copy_image_from_bytes(darknet_image, image_resized.tobytes())
detections = darknet.detect_image(network, class_names, darknet_image, thresh=thresh)
darknet.free_image(darknet_image)
image = darknet.draw_boxes(detections, image_resized, class_colors)
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB), detections
def discretize_line(lat_start, lon_start, d_element, d, bearing):
# d_element -> how many element we need in a line secment
# global lat_start, lon_start
R = 6371.0*1000.0
# -1 because in case of 10 elements/points we also want len(lat_array) the same
dstep = d/(d_element-1) #0.6524896365354135 #2.0 # meters
dist_list = np.ones(int(d/dstep))*dstep
# print(dist_list)
brg = np.radians(bearing)
# if d%dstep != 0:
# dist_list = np.append(dist_list, d%dstep)
# This will append lat and lon into array which contains
# small segments of distance
lat_array = np.array([np.radians(lat_start)]) # rads
lon_array = np.array([np.radians(lon_start)]) # rads
# lat_array = np.array([])
# lon_array = np.array([])
for i, dist in enumerate(dist_list):
## last element make the waypoint shifted, so we break it
if i >= (d_element):
break
lat1 = lat_array[i]
lon1 = lon_array[i]
# print(dist)
Ad = dist/R
lat2 = np.arcsin(np.sin(lat1)*np.cos(Ad) + np.cos(lat1)*np.sin(Ad)*np.cos(brg))
lon2 = lon1 + np.arctan2( (np.sin(brg)*np.sin(Ad)*np.cos(lat1)) , (np.cos(Ad) - np.sin(lat1)*np.sin(lat2)))
lat_array = np.append(lat_array, lat2)
lon_array = np.append(lon_array, lon2)
# print(i)
return lat_array, lon_array
def get_distance_bearing(lat1, lon1, lat2, lon2):
# global lat_start, lon_start
R = 6371.0*1000.0
lat_start = np.radians(lat1)
lon_start = np.radians(lon1)
lat_end = np.radians(lat2)
lon_end = np.radians(lon2)
dLat = lat_end - lat_start
dLon = lon_end - lon_start
a = np.sin(dLat/2.0)*np.sin(dLat/2.0) + np.cos(lat_start)*np.cos(lat_end)*np.sin(dLon/2.0)*np.sin(dLon/2.0)
c = 2.0*np.arctan2(np.sqrt(a),np.sqrt(1-a))
d = c*R
y = np.sin(dLon)*np.cos(lat_end)
x = np.cos(lat_start)*np.sin(lat_end) - np.sin(lat_start)*np.cos(lat_end)*np.cos(dLon)
bearing = np.degrees(np.arctan2(y,x))
return d, bearing
def upload_to_aws(local_file, bucket, s3_file):
s3 = boto3.client('s3', aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY)
try:
s3.upload_file(local_file, bucket, s3_file)
print("Upload Successful")
return True
except FileNotFoundError:
print("The file was not found")
return False
except NoCredentialsError:
print("Credentials not available")
return False
##END OF FUNCTION DEFINITIONS ##
#Unzip the zip file and its contents
print("unzipping")
path_to_zip_file = "/home/ubuntu/pano/Zip/Videos.zip"
with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
zip_ref.extractall("/home/ubuntu/pano/Video")
print("Finished Unzipping")
#End of Unzip
# CSV open and declaration##
data_file_path = "/home/ubuntu/pano/stack/quantity.csv"
data_file = open(data_file_path, "w+")
dataCSVWriter = csv.writer(data_file, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL)
dataCSVWriter.writerow(['lat', 'lon', 'Quantity'])
#CSV for lane thumbnail
thumbnail_data_file_path = "/home/ubuntu/pano/stack/lane_thumbnail.csv"
thumbnail_data_file = open(thumbnail_data_file_path, "w+")
thumbnail_dataCSVWriter = csv.writer(thumbnail_data_file, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL)
thumbnail_dataCSVWriter.writerow(['lat', 'lon'])
#Define start and end point lists
#start_point_list = [(35.841454251754755, 139.52427014959153),(35.84147944801779, 139.52420150963678)]
start_point_list = [(36.12083710338884, 139.21630320454503),(36.12080527337101, 139.2164926108044)]
#end_point_list = [(35.84151350159559, 139.52424466860762),(35.84144222040454, 139.52422739581436)]
end_point_list = [(36.12083735438514, 139.2164757318577),(36.12081575161991, 139.21630345327617)]
wp_lat_array = np.array([])
wp_lon_array = np.array([])
##Split th eline into points and it is stored in lat array lon array
"""for i in range(len(start_point_list)):
## input two points and find a slicing waypoint between it
distance, bearing_deg = get_distance_bearing(start_point_list[i][0], start_point_list[i][1], end_point_list[i][0], end_point_list[i][1])
print(distance)
lat_array, lon_array = discretize_line(start_point_list[i][0], start_point_list[i][1], float(d_element[i]), distance, bearing_deg)"""
#Initialize the detector variables and paths
quantity_bottles_frame = []
config_file = "/home/ubuntu/darknet_bottle_example/yolov4_bottle_can.cfg"
data_file = "/home/ubuntu/darknet_bottle_example/obj_bottle_can.data"
weights = "/home/ubuntu/darknet_bottle_example/yolov4_bottle_can_best.weights"
network, class_names, class_colors = darknet.load_network(
config_file,
data_file,
weights,
batch_size=1
)
image_dir = "/home/ubuntu/pano/Frames"
#1.Split into frames
path = "/home/ubuntu/pano/Video/Panorama/Videos"
j = 0
"""Order of events
1. Split into frames
2. Rotate images if needed
3. Running through detctor
4. Calculate count and draw bounding boxes
5. Store these images in respective directoies
6. Take start point of lane and end point and split into many coordinates in between based on number of frames
7. Write to csv file
8. Stack the images per lane
9. Empty the Frames folder after every lane
10. Upload stacked images and csv to cloud """
# Parameter to change is fps in the ffmpeg command. Change accoprding to need based on reference
for filename in os.listdir(path):
if (filename.endswith(".mp4")): #or .avi, .mpeg, whatever.
j += 1
path1 = path + filename
print(path1)
os.system("ffmpeg -i /home/ubuntu/pano/Video/Panorama/Videos/{0} -vf fps=0.07 /home/ubuntu/pano/Frames/{1}-%3d.jpg".format(filename,j))
#2. Rotate images if needed
frames_path = "/home/ubuntu/pano/Frames/*.jpg"
list_images = glob.glob(frames_path)
list_sorted = sorted(list_images)
#for image in list_sorted:
#read the image
# temp = cv2.imread(image)
# image1 = cv2.rotate(temp, cv2.ROTATE_90_COUNTERCLOCKWISE)
# cv2.imwrite("{0}".format(image), image1)
## according to how many partial panorama we have in each lane
d_element =[len(list_images)]
print(f"Now detecting objects in lane {j}")
#3. Running through detctor
frame_number = 1
for image in sorted(os.listdir(image_dir)):
#Path to the input images for the detector i.e Frames
quantity_frame = 0
image_name = f"{image}"
ext = '.jpg'
input_image_name = image_name
image_path = os.path.join(image_dir, input_image_name)
print(image_path)
#Path to output images to be stored after running through detector
output_dir = f"/home/ubuntu/pano/lane{j}"
output_name = "yolo_" + image_name
output_path = os.path.join(output_dir, output_name)
# image = load_images(image_path)
dn_frame_width = 416
dn_frame_height = 416
frame = cv2.imread(image_path)
frame_width = frame.shape[1]
frame_height = frame.shape[0]
#### Passing the image to darknet
image, detections = image_detection(image_path, network, class_names, class_colors, thresh=0.05)
#cv2.imwrite(f'/home/ubuntu/temp/Inference{frame_number}.jpg', image)
#cv2.imwrite(f'/home/ubuntu/temp/orignal_detect{frame_number}.jpg', frame)
###Based on the detections, running them through a loop to draw bounding box and also incrememnt count of object in the frame
#4. Calculate count and draw bounding boxes
for i in range(len(detections)):
xc_percent = detections[i][2][0]/dn_frame_width
yc_percent = detections[i][2][1]/dn_frame_height
w_percent = detections[i][2][2]/dn_frame_width
h_percent = detections[i][2][3]/dn_frame_height
xc = xc_percent*frame_width
yc = yc_percent*frame_height
w = w_percent*frame_width
h = h_percent*frame_height
xmin = xc - w/2.0
ymin = yc - h/2.0
xmax = xc + w/2.0
ymax = yc + h/2.0
#If object is detected, increase the count of the object in the frame
if detections[i][0] == "bottle":
cv2.rectangle(frame, (int(xmin),int(ymin)),(int(xmax),int(ymax)),(0,0,255),2)
cv2.putText(frame, "bottle", (int(xmin), int(ymin-10)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,0,255), 2)
quantity_frame += 1
elif detections[i][0] == "can":
cv2.rectangle(frame, (int(xmin),int(ymin)),(int(xmax),int(ymax)),(255,0,0),2)
cv2.putText(frame, "can", (int(xmin), int(ymin-10)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,0,0), 2)
else:
print(f"{image} has no objects ")
print(f"Quantity in frame {frame_number} = {quantity_frame}")
#5. Store these images in respective directoies
cv2.imwrite(output_path, frame)
quantity_bottles_frame.append(quantity_frame)
frame_number += 1
###Split the points into equidistant points between start point and end point
##6. Take start point of lane and end point and split into many coordinates in between based on number of frames
distance, bearing_deg = get_distance_bearing(start_point_list[j-1][0], start_point_list[j-1][1], end_point_list[j-1][0], end_point_list[j-1][1])
print(distance)
lat_array, lon_array = discretize_line(start_point_list[j-1][0], start_point_list[j-1][1], float(d_element[0]), distance, bearing_deg)
lat_csv = []
lon_csv = []
##Convery those points into degrees
for lat,lon in zip(lat_array, lon_array):
lat_degrees = "{:}".format(np.degrees(lat))
lon_degrees = "{:}".format(np.degrees(lon))
lat_csv.append(lat_degrees)
lon_csv.append(lon_degrees)
#lat_csv = "{:}".format(np.degrees(lat))
#lon_csv = "{:}".format(np.degrees(lon))
##7.Write each row in the csv file
for k in range(d_element[0]):
dataCSVWriter.writerow([lat_csv[k], lon_csv[k], quantity_bottles_frame[k]])
#if k != d_element[0]-1:
# dataCSVWriter.writerow([lat_csv[k], lon_csv[k], quantity_bottles_frame[k], "-", "-" ])
if k ==d_element[0]-1:
print(lat_csv[int(d_element[0]/2)])
thumbnail_dataCSVWriter.writerow([ lat_csv[int(d_element[0]/2)],lon_csv[int(d_element[0]/2)]])
#####8.STACKING THE IMAGES ######
images = []
stacking_input = f"/home/ubuntu/pano/lane{j}/*.jpg"
list_images = glob.glob(stacking_input)
#print(list_images)
stacking_input_reverse = sorted(list_images, reverse = True)
print(stacking_input_reverse)
for image in stacking_input_reverse:
img = cv2.imread(image)
images.append(img)
final_image = cv2.hconcat(images)
image_name = f"cloud_lane{j}_stack.jpg"
stacking_output = f"/home/ubuntu/pano/stack"
output_path = os.path.join(stacking_output, image_name)
cv2.imwrite(output_path, final_image)
##### 9. DELETE FRAMES AFTER ONE ITERATION OF LOOP #####
for f in os.listdir(image_dir):
del_path = "/home/ubuntu/pano/Frames/" + f
os.remove(del_path)
else:
continue
#Close csv file
#data_file.close()
#thumbnail_data_file.close()
### 10. Upload to s3 bucket ####
stack_path = "/home/ubuntu/pano/stack"
for file in sorted(os.listdir(stack_path)):
print(f"Uploading {file}")
uploaded = upload_to_aws(f'/home/ubuntu/pano/stack/{file}', 'fbt-pano-test', f'{file}')
Do i need to close the csv file in any way? Or does s3 not support csv upload through boto3?
I found it. Turns out, the csv files werent closed at the end. So i moved the upload to s3 part to another program. now python closes the csv files at the end of this program automatically. and so when the upload program runs next, it gets uploaded properly.
I need help please, to bring the test results and the file name together as the title over the image that is evaluated. I can generate the test results as a list (see image), I can add the test results over the image. I can add the file name over the top of the image. But, I cannot do both. Not sure what I am doing wrong. Thank you.
path = '/Users/minny/Desktop/A/png/file2/'
ref_images = glob.iglob(path + "*.png")
all_ref_images = []
for ref_path in ref_images:
ref_head, tail = os.path.splitext(ref_path)
image1 = Image.open(ref_path)
imgA= print(calculate_brightness(image1))
#all_ref_images.append([imgA, ref_head])
ref_image = plt.imshow(image1)
fig = plt.figure()
plt.title(os.path.basename(ref_head)), plt.title(calculate_brightness(image1))
#plt.axis("off")
def calculate_brightness(image):
greyscale_image = image.convert('L')
histogram = greyscale_image.histogram()
pixels = sum(histogram)
brightness = scale = len(histogram)
for index in range(0, scale):
ratio = histogram[index] / pixels
brightness += ratio * (-scale + index)
return 1 if brightness == 255 else brightness / scale
%%capture
#gives a standard size image to view inline with the text
def display_img(img, cmap=None):
fig = plt.figure(figsize=(3,3))
ax = fig.add_subplot(111)
ax.imshow(img, cmap)
path = '/Users/minny/Desktop/A/png/file2/'
ref_images = glob.iglob(path + "*.png")
all_ref_images = []
for ref_path in ref_images:
ref_head, tail = os.path.splitext(ref_path)
image1 = Image.open(ref_path)
imgA= print(calculate_brightness(image1))
all_ref_images.append([imgB, ref_head])
fig = plt.figure()
ref_image = plt.imshow(image1)
print(os.path.basename(ref_head))
#plt.axis("off")
ref_image = plt.imshow(image1)
image_basename = os.path.basename(ref_head)
title = '\n'.join([image_basename, str(calculate_brightness(image1))])
plt.title(title, loc='left')
dir_name = '/Users/minny/Desktop/A/png/results/'
plt.savefig('{dir_name}/{filename}'.format(dir_name=dir_name, filename=image_basename))
You can concatenate basename of image's path with output of calculate_brightness function and set result as title without overwriting them:
ref_image = plt.imshow(image1)
image_basename = os.path.basename(ref_head)
test_results = '\n'.join(map(str, calculate_brightness(image1)))
title = '\n'.join([image_basename, test_results])
plt.title(title, loc='left')
UPD:
If result of calculate_brightness function is a float number, you can solve your problem by this way:
ref_image = plt.imshow(image1)
image_basename = os.path.basename(ref_head)
title = '\n'.join([image_basename, str(calculate_brightness(image1))])
plt.title(title, loc='left')
UPD2:
For saving images to specified folder you can use plt.savefig method:
dir_name = '/Users/minny/Desktop/A/png/file2/some_directory' # create directory if necessary
plt.savefig('{dir_name}/{filename}'.format(dir_name=dir_name, filename=image_basename))
I am trying to stitch about 50 images(all in the same 287x287 size) together. Specifically, there should be 25 images on the top row and 25 images on the bottom row, and there also exists a small distance between each two images.
I met two difficulties during my attempts:
First problem is that there are 25 images in a folder with their name 'prefix-70',...,'prefix-94' while other 25 images in another folder with the same name 'prefix-70',...,'prefix-94'. I do not know how to them in Python without conflicts.
Second problem is that I wrote the following code to read one folder images to form a row but it outputs a column.
#!/usr/bin/python3.0
#encoding=utf-8
import numpy as np
from PIL import Image
import glob,os
if __name__=='__main__':
#prefix=input('Input the prefix of images:')
prefix = 'prefix'
files=glob.glob(prefix+'-*')
num=len(files)
filename_lens=[len(x) for x in files] #length of the files
min_len=min(filename_lens) #minimal length of filenames
max_len=max(filename_lens) #maximal length of filenames
if min_len==max_len:#the last number of each filename has the same length
files=sorted(files) #sort the files in ascending order
else:
index=[0 for x in range(num)]
for i in range(num):
filename=files[i]
start=filename.rfind('-')+1
end=filename.rfind('.')
file_no=int(filename[start:end])
index[i]=file_no
index=sorted(index)
files=[prefix+'-'+str(x)+'.png' for x in index]
print(files[0])
baseimg=Image.open(files[0])
sz=baseimg.size
basemat=np.atleast_2d(baseimg)
for i in range(1,num):
file=files[i]
im=Image.open(file)
im=im.resize(sz,Image.ANTIALIAS)
mat=np.atleast_2d(im)
print(file)
basemat=np.append(basemat,mat,axis=0)
final_img=Image.fromarray(basemat)
final_img.save('merged.png')
I guess i have got into a wrong way...
How can i stitch them properly? Any suggestion is appreciated.
Try this (explanation in comments):
from PIL import Image
from os import listdir, path
space_between_row = 10
new_image_path = 'result.jpg'
im_dirs = ['images/1', 'images/2']
# get sorted list of images
im_path_list = [[path.join(p, f) for f in sorted(listdir(p))] for p in im_dirs]
# open images and calculate total widths and heights
im_list = []
total_width = 0
total_height = 0
for path_list in im_path_list:
images = list(map(Image.open, path_list))
widths, heights = zip(*(i.size for i in images))
total_width = max(total_width, sum(widths))
total_height += max(heights)
im_list.append(images)
# concat images
new_im = Image.new('RGB', (total_width, total_height))
y_offset = 0
for images in im_list:
x_offset = 0
max_height = 0
for im in images:
new_im.paste(im, (x_offset, y_offset))
x_offset += im.size[0]
max_height = max(im.size[1], max_height)
y_offset = y_offset + max_height + space_between_row
# show and save
new_im.show()
new_im.save(new_image_path)
Install ImageMagick, then tell it where your two directories are.
#!/usr/bin/python3
##=========================================================
## required ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##
## imagemagick.org/script/download.php
##
##=========================================================
## libs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import subprocess as sp
##=========================================================
## vars ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
offset = 2 ## pixel gap between images
color = '#000000' ## background color to fill gaps
dir1 = '/home/me/Pictures/topRow/'
dir2 = '/home/me/Pictures/bottomRow/'
## note: windows dirs use double backslashes
## 'C:\\Users\\me\\Pictures\\topRow\\'
##=========================================================
## script ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
row1args = ['convert', '+smush', offset, '-background', color, dir1 + '*.png', 'row1.png']
row2args = ['convert', '+smush', offset, '-background', color, dir2 + '*.png', 'row2.png']
merge = ['convert', '-smush', offset, '-background', color, 'row*.png', 'merged.png']
##=========================================================
## main ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
sp .call(row1args)
sp .call(row2args)
sp .call(merge)
##=========================================================
## eof ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
I'm trying to display further images (ct-scan) using numpy/vtk as describe in this sample code (http://www.vtk.org/Wiki/VTK/Examples/Python/vtkWithNumpy) but I don't get it and don't know why.
If someone could help me it would be kind.
Here's my code :
import vtk
import numpy as np
import os
import cv, cv2
import matplotlib.pyplot as plt
import PIL
import Image
DEBUG =True
directory="splitted_mri/"
w = 226
h = 186
d = 27
stack = np.zeros((w,d,h))
k=-1 #add the next picture in a differente level of depth/z-positions
for file in os.listdir(directory):
k+=1
img = directory + file
im = Image.open(img)
temp = np.asarray(im, dtype=int)
stack[:,k,:]= temp
print stack.shape
#~ plt.imshow(test)
#~ plt.show()
print type(stack[10,10,15])
res = np.amax(stack)
res1 = np.amin(stack)
print res, type(res)
print res1, type(res1)
#~ for (x,y,z), value in np.ndenumerate(stack):
#~ stack[x,y,z]=np.require(stack[x,y,z],dtype=np.int16)
#~ print type(stack[x,y,z])
stack = np.require(stack,dtype=np.uint16)
print stack.dtype
if DEBUG : print stack.shape
dataImporter = vtk.vtkImageImport()
data_string = stack.tostring()
dataImporter.CopyImportVoidPointer(data_string, len(data_string))
dataImporter.SetDataScalarTypeToUnsignedChar()
dataImporter.SetNumberOfScalarComponents(1)
dataImporter.SetDataExtent(0, w-1, 0, 1, 0, h-1)
dataImporter.SetWholeExtent(0, w-1, 0, 1, 0, h-1)
essai = raw_input()
alphaChannelFunc = vtk.vtkPiecewiseFunction()
colorFunc = vtk.vtkColorTransferFunction()
for i in range (0,255):
alphaChannelFunc.AddPoint(i, 0.9)
colorFunc.AddRGBPoint(i,i,i,i)
volumeProperty = vtk.vtkVolumeProperty()
volumeProperty.SetColor(colorFunc)
#volumeProperty.ShadeOn()
volumeProperty.SetScalarOpacity(alphaChannelFunc)
# This class describes how the volume is rendered (through ray tracing).
compositeFunction = vtk.vtkVolumeRayCastCompositeFunction()
# We can finally create our volume. We also have to specify the data for it, as well as how the data will be rendered.
volumeMapper = vtk.vtkVolumeRayCastMapper()
volumeMapper.SetVolumeRayCastFunction(compositeFunction)
volumeMapper.SetInputConnection(dataImporter.GetOutputPort())
# The class vtkVolume is used to pair the preaviusly declared volume as well as the properties to be used when rendering that volume.
volume = vtk.vtkVolume()
volume.SetMapper(volumeMapper)
volume.SetProperty(volumeProperty)
# With almost everything else ready, its time to initialize the renderer and window, as well as creating a method for exiting the application
renderer = vtk.vtkRenderer()
renderWin = vtk.vtkRenderWindow()
renderWin.AddRenderer(renderer)
renderInteractor = vtk.vtkRenderWindowInteractor()
renderInteractor.SetRenderWindow(renderWin)
# We add the volume to the renderer ...
renderer.AddVolume(volume)
# ... set background color to white ...
renderer.SetBackground(1, 1, 1)
# ... and set window size.
renderWin.SetSize(400, 400)
# A simple function to be called when the user decides to quit the application.
def exitCheck(obj, event):
if obj.GetEventPending() != 0:
obj.SetAbortRender(1)
# Tell the application to use the function as an exit check.
renderWin.AddObserver("AbortCheckEvent", exitCheck)
#to quit, press q
renderInteractor.Initialize()
# Because nothing will be rendered without any input, we order the first render manually before control is handed over to the main-loop.
renderWin.Render()
renderInteractor.Start()
I finally find out what was wrong
here's my new code
import vtk
import numpy as np
import os
import matplotlib.pyplot as plt
import PIL
import Image
DEBUG =False
directory="splitted_mri/"
l = []
k=0 #add the next picture in a differente level of depth/z-positions
for file in os.listdir(directory):
img = directory + file
if DEBUG : print img
l.append(img)
# the os.listdir function do not give the files in the right order
#so we need to sort them
l=sorted(l)
temp = Image.open(l[0])
h, w = temp.size
d = len(l)*5 #with our sample each images will be displayed 5times to get a better view
if DEBUG : print 'width, height, depth : ',w,h,d
stack = np.zeros((w,d,h),dtype=np.uint8)
for i in l:
im = Image.open(i)
temp = np.asarray(im, dtype=int)
for i in range(5):
stack[:,k+i,:]= temp
k+=5
#~ stack[:,k,:]= temp
#~ k+=1
if DEBUG :
res = np.amax(stack)
print 'max value',res
res1 = np.amin(stack)
print 'min value',res1
#convert the stack in the right dtype
stack = np.require(stack,dtype=np.uint8)
if DEBUG :#check if the image have not been modified
test = stack [:,0,:]
plt.imshow(test,cmap='gray')
plt.show()
if DEBUG : print 'stack shape & dtype' ,stack.shape,',',stack.dtype
dataImporter = vtk.vtkImageImport()
data_string = stack.tostring()
dataImporter.CopyImportVoidPointer(data_string, len(data_string))
dataImporter.SetDataScalarTypeToUnsignedChar()
dataImporter.SetNumberOfScalarComponents(1)
#vtk uses an array in the order : height, depth, width which is
#different of numpy (w,h,d)
w, d, h = stack.shape
dataImporter.SetDataExtent(0, h-1, 0, d-1, 0, w-1)
dataImporter.SetWholeExtent(0, h-1, 0, d-1, 0, w-1)
alphaChannelFunc = vtk.vtkPiecewiseFunction()
colorFunc = vtk.vtkColorTransferFunction()
for i in range(256):
alphaChannelFunc.AddPoint(i, 0.2)
colorFunc.AddRGBPoint(i,i/255.0,i/255.0,i/255.0)
# for our test sample, we set the black opacity to 0 (transparent) so as
#to see the sample
alphaChannelFunc.AddPoint(0, 0.0)
colorFunc.AddRGBPoint(0,0,0,0)
volumeProperty = vtk.vtkVolumeProperty()
volumeProperty.SetColor(colorFunc)
#volumeProperty.ShadeOn()
volumeProperty.SetScalarOpacity(alphaChannelFunc)
# This class describes how the volume is rendered (through ray tracing).
compositeFunction = vtk.vtkVolumeRayCastCompositeFunction()
# We can finally create our volume. We also have to specify the data for
# it, as well as how the data will be rendered.
volumeMapper = vtk.vtkVolumeRayCastMapper()
# function to reduce the spacing between each image
volumeMapper.SetMaximumImageSampleDistance(0.01)
volumeMapper.SetVolumeRayCastFunction(compositeFunction)
volumeMapper.SetInputConnection(dataImporter.GetOutputPort())
# The class vtkVolume is used to pair the preaviusly declared volume as
#well as the properties to be used when rendering that volume.
volume = vtk.vtkVolume()
volume.SetMapper(volumeMapper)
volume.SetProperty(volumeProperty)
# With almost everything else ready, its time to initialize the renderer and window,
# as well as creating a method for exiting the application
renderer = vtk.vtkRenderer()
renderWin = vtk.vtkRenderWindow()
renderWin.AddRenderer(renderer)
renderInteractor = vtk.vtkRenderWindowInteractor()
renderInteractor.SetRenderWindow(renderWin)
# We add the volume to the renderer ...
renderer.AddVolume(volume)
# ... set background color to white ...
renderer.SetBackground(1, 1, 1)
# ... and set window size.
renderWin.SetSize(550, 550)
renderWin.SetMultiSamples(4)
# A simple function to be called when the user decides to quit the application.
def exitCheck(obj, event):
if obj.GetEventPending() != 0:
obj.SetAbortRender(1)
# Tell the application to use the function as an exit check.
renderWin.AddObserver("AbortCheckEvent", exitCheck)
#to auit, press q
renderInteractor.Initialize()
# Because nothing will be rendered without any input, we order the first
# render manually before control is handed over to the main-loop.
renderWin.Render()
renderInteractor.Start()
If you are ok with a solution not using VTK, you could use Matplotlib imshow and interactive navigation with keys.
This tutorial shows how:
https://www.datacamp.com/community/tutorials/matplotlib-3d-volumetric-data
https://github.com/jni/mpl-volume-viewer
and here an implementation for viewing RTdose files:
https://github.com/pydicom/contrib-pydicom/pull/19
See also:
https://github.com/napari/napari