I'm using OpenCV to read images into numpy.array, and they have the following shape.
import cv2
def readImages(path):
imgs = []
for file in os.listdir(path):
if file.endswith('.png'):
img = cv2.imread(file)
imgs.append(img)
imgs = numpy.array(imgs)
return (imgs)
imgs = readImages(...)
print imgs.shape # (100, 718, 686, 3)
Each of the image has 718x686 pixels/dimension. There are 100 images.
I don't want to work on 718x686, I'd like to combine the pixels into a single dimension. That is, the shape should look like: (100,492548,3). Is there anyway either in OpenCV (or any other library) or Numpy that allows me to do that?
Without modifying your reading function:
imgs = readImages(...)
print imgs.shape # (100, 718, 686, 3)
# flatten axes -2 and -3, using -1 to autocalculate the size
pixel_lists = imgs.reshape(imgs.shape[:-3] + (-1, 3))
print pixel_lists.shape # (100, 492548, 3)
In case anyone wants it. Here's a general way of doing this
import functools
def combine_dims(a, i=0, n=1):
"""
Combines dimensions of numpy array `a`,
starting at index `i`,
and combining `n` dimensions
"""
s = list(a.shape)
combined = functools.reduce(lambda x,y: x*y, s[i:i+n+1])
return np.reshape(a, s[:i] + [combined] + s[i+n+1:])
With this function you could use it like this:
imgs = combine_dims(imgs, 1) # combines dimension 1 and 2
# imgs.shape = (100, 718*686, 3)
def combine_dims(a, start=0, count=2):
""" Reshapes numpy array a by combining count dimensions,
starting at dimension index start """
s = a.shape
return numpy.reshape(a, s[:start] + (-1,) + s[start+count:])
This function does what you need in a more general way.
imgs = combine_dims(imgs, 1) # combines dimension 1 and 2
# imgs.shape == (100, 718*686, 3)
It works by using numpy.reshape, which turns an array of one shape into an array with the same data but viewed as another shape. The target shape is just the initial shape, but with the dimensions to be combined replaced by -1. numpy uses -1 as a flag to indicate that it should work out itself how big that dimension should be (based on the total number of elements.)
This code is essentially a simplified version of Multihunter's answer, but my edit was rejected and hinted that it should be a separate answer. So there you go.
import cv2
import os
import numpy as np
def readImages(path):
imgs = np.empty((0, 492548, 3))
for file in os.listdir(path):
if file.endswith('.png'):
img = cv2.imread(file)
img = img.reshape((1, 492548, 3))
imgs = np.append(imgs, img, axis=0)
return (imgs)
imgs = readImages(...)
print imgs.shape # (100, 492548, 3)
The trick was to reshape and append to a numpy array. It's not good practice to hardcode the length of the vector (492548) so if I were you I'd also add a line that calculates this number and puts it in a variable, for use in the rest of the script.
Related
I'm struggling in creating a data generator in PyTorch to extract 2D images from many 3D cubes saved in .dat format
There is a total of 200 3D cubes each having a 128*128*128 shape. Now I want to extract 2D images from all of these cubes along length and breadth.
For example, a is a cube having size 128*128*128
So I want to extract all 2D images along length i.e., [:, i, :] which will get me 128 2D images along the length, and similarly i want to extract along width i.e., [:, :, i], which will give me 128 2D images along the width. So therefore i get a total of 256 2D images from 1 3D cube, and i want to repeat this whole process for all 200 cubes, there by giving me 51200 2D images.
So far I've tried a very basic implementation which is working fine but is taking approximately 10 minutes to run. I want you guys to help me create a more optimal implementation keeping in mind time and space complexity. Right now my current approach has a time complexity of O(n2), can we dec it further to reduce the time complexity
I'm providing below the current implementation
from os.path import join as pjoin
import torch
import numpy as np
import os
from tqdm import tqdm
from torch.utils import data
class DataGenerator(data.Dataset):
def __init__(self, is_transform=True, augmentations=None):
self.is_transform = is_transform
self.augmentations = augmentations
self.dim = (128, 128, 128)
seismicSections = [] #Input
faultSections = [] #Ground Truth
for fileName in tqdm(os.listdir(pjoin('train', 'seis')), total = len(os.listdir(pjoin('train', 'seis')))):
unrolledVolSeismic = np.fromfile(pjoin('train', 'seis', fileName), dtype = np.single) #dat file contains unrolled cube, we need to reshape it
reshapedVolSeismic = np.transpose(unrolledVolSeismic.reshape(self.dim)) #need to transpose the axis to get height axis at axis = 0, while length (axis = 1), and width(axis = 2)
unrolledVolFault = np.fromfile(pjoin('train', 'fault', fileName),dtype=np.single)
reshapedVolFault = np.transpose(unrolledVolFault.reshape(self.dim))
for idx in range(reshapedVolSeismic.shape[2]):
seismicSections.append(reshapedVolSeismic[:, :, idx])
faultSections.append(reshapedVolFault[:, :, idx])
for idx in range(reshapedVolSeismic.shape[1]):
seismicSections.append(reshapedVolSeismic[:, idx, :])
faultSections.append(reshapedVolFault[:, idx, :])
self.seismicSections = seismicSections
self.faultSections = faultSections
def __len__(self):
return len(self.seismicSections)
def __getitem__(self, index):
X = self.seismicSections[index]
Y = self.faultSections[index]
return X, Y
Please Help!!!
why not storing only the 3D data in mem, and let the __getitem__ method "slice" it on the fly?
class CachedVolumeDataset(Dataset):
def __init__(self, ...):
super(...)
self._volumes_x = # a list of 200 128x128x128 volumes
self._volumes_y = # a list of 200 128x128x128 volumes
def __len__(self):
return len(self._volumes_x) * (128 + 128)
def __getitem__(self, index):
# extract volume index from general index:
vidx = index // (128 + 128)
# extract slice index
sidx = index % (128 + 128)
if sidx < 128:
# first dim
x = self._volumes_x[vidx][:, :, sidx]
y = self._volumes_y[vidx][:, :, sidx]
else:
sidx -= 128
# second dim
x = self._volumes_x[vidx][:, sidx, :]
y = self._volumes_y[vidx][:, sidx, :]
return torch.squeeze(x), torch.squeeze(y)
I have an image that I want to perform some calculations on. The image pixels will be represented as f(x, y) where x is the column number and y is the row number of each pixel. I want to perform a calculation using the following formula:
Here is the code that does the calculation:
import matplotlib.pyplot as plt
import numpy as np
import os.path
from PIL import Image
global image_width, image_height
# A. Blur Measurement
def measure_blur(f):
D_sub_h = [[0 for y in range(image_height)] for x in range(image_width)]
for x in range(image_width):
for y in range(image_height):
if(y == 0):
f_x_yp1 = f[x][y+1]
f_x_ym1 = 0
elif(y == (image_height -1)):
f_x_yp1 = 0
f_x_ym1 = f[x][y -1]
else:
f_x_yp1 = f[x][y+1]
f_x_ym1 = f[x][y -1]
D_sub_h[x][y] = abs(f_x_yp1 - f_x_ym1)
return D_sub_h
if __name__ == '__main__':
image_counter = 1
while True:
if not os.path.isfile(str (image_counter) + '.jpg'):
break
image_path = str(image_counter) + '.jpg'
image = Image.open(image_path )
image_height, image_width = image.size
print("Image Width : " + str(image_width))
print("Image Height : " + str(image_height))
f = np.array(image)
D_sub_h = measure_blur(f)
image_counter = image_counter + 1
The problem with this code is when the image size becomes large, such as (5000, 5000), it takes a very long time to complete. Is there any way or function I can use to make the execution time faster by not doing one by one or manual computation?
Since you specifically convert the input f to a numpy array, I am assuming you want to use numpy. In that case, the allocation of D_sub_h needs to change from a list to an array:
D_sub_h = np.empty_like(f)
If we assume that everything outside your array is zeros, then the first row and last row can be computed as the second and negative second-to-last rows, respectively:
D_sub_h[0, :] = f[1, :]
D_sub_h[-1, :] = -f[-2, :]
The remainder of the data is just the difference between the next and previous index at each location, which is idiomatically computed by shifting views: f[2:, :] - f[:-2, :]. This formulation creates a temporary array. You can avoid doing that by using np.subtract explicitly:
np.subtract(f[2:, :], f[:-2, :], out=D_sub_h[1:-1, :])
The entire thing takes four lines in this formulation, and is fully vectorized, which means that loops run quickly under the hood, without most of Python's overhead:
def measure_blur(f):
D_sub_h = np.empty_like(f)
D_sub_h[0, :] = f[1, :]
D_sub_h[-1, :] = -f[-2, :]
np.subtract(f[2:, :], f[:-2, :], out=D_sub_h[1:-1, :])
return D_sub_h
Notice that I return the value instead of printing it. When you write functions, get in the habit of returning a value. Printing can be done later, and effectively discards the computation if it replaces a proper return.
The way shown above is fairly efficient with regards to time and space. If you want to write a one liner that uses a lot of temporary arrays, you can also do:
D_sub_h = np.concatenate((f[1, None], f[2:, :] - f[:-2, :], -f[-2, None]), axis=0)
I'm using PIL to load images and then transform them to NumPy arrays. Then I've to create a new image based on a list of images, so I append all theearrays to a list and then transform the list back to an array, so the shape for the list of images has 4 dimensions (n_images, height, width, rgb_channels). I'm using this code:
def gallery(array, ncols=4):
nindex, height, width, intensity = array.shape
nrows = nindex // ncols
# want result.shape = (height*nrows, width*ncols, intensity)
result = (array.reshape(nrows, ncols, height, width, intensity)
.swapaxes(1,2)
.reshape(height*nrows, width*ncols, intensity))
return result
def make_array(dim_x):
for i in range(dim_x):
print('series',i)
series = []
for j in range(TIME_STEP-1):
print('photo',j)
aux = np.asarray(Image.open(dirpath+'/images/pre_images /series_{0}_Xquakemap_{1}.jpg'.format(i,j)).convert('RGB'))
print(np.shape(aux))
series.append(aux)
print(np.shape(series))
im = Image.fromarray(gallery(np.array(series)))
im.save(dirpath+'/images/gallery/series_{0}_Xquakemap.jpg'.format(i))
im_shape = (im.size)
make_array(n_photos)
# n_photos is the total of photos in the dirpath
The problem is when the append on the series list happened, the shape of the image (the NumPy array added) gets lost. So when trying to reshape the array in the function gallery it causes a problem. A snippet of the output for the code above is this one:
...
series 2
photo 0
(585, 619, 3)
(1, 585, 619, 3)
photo 1
(587, 621, 3)
(2,)
photo 2
(587, 621, 3)
(3,)
photo 3
(587, 621, 3)
(4,)
...
As you can see, when appending the second photo the list loses a dimension. This is weird because the code works the first two iterations, which use fairly the same images. I tried using np.stack() but the error prevails.
I also find this issue on Github but I think it doesn't apply to this case even if the behavior is similar.
Working on Ubuntu 18, Python 3.7.3 and Numpy 1.16.2.
edit: added what #kwinkunks asked
In the second function, I think you need to move series = [] to before the outer loop.
Here's my reproduction of the problem:
import numpy as np
from PIL import Image
TIME_STEP = 3
def gallery(array, ncols=4):
"""Stitch images together."""
nindex, height, width, intensity = array.shape
nrows = nindex // ncols
result = array.reshape(nrows, ncols, height, width, intensity)
result = result.swapaxes(1,2)
result = result.reshape(height*nrows, width*ncols, intensity)
return result
def make_array(dim_x):
"""Make an image from a list of arrays."""
series = [] # <<<<<<<<<<< This is the line you need to check.
for i in range(dim_x):
for j in range(TIME_STEP - 1):
aux = np.ones((100, 100, 3)) * np.random.randint(0, 256, 3)
series.append(aux.astype(np.uint8))
im = Image.fromarray(gallery(np.array(series)))
return im
make_array(4)
This results in:
I try to process many images which represented as NumPy array, but it takes too long. that's what im trying to do
# image is a list with images
max = np.amax(image[k])# k is current image index in loop
# here i try to normalize SHORT color to BYTE color and make it fill all range from 0 to 255
# in images max color value is like 30000 min is usually 0
i = 0
while i < len(image[k]):
j = 0
while j < len(image[k][i]):
image[k][i][j] = float(image[k][i][j]) / (max) * 255
j += 1
i += 1
if i only read images (170 in total (images is 512x512)) without it takes about 7 secs, if i do this normalization it takes 20 mins. And it's all over in code. Here i try to make my image colored
maskLoot1=np.zeros([len(mask1), 3*len(mask1[0])])
for i in range(len(mask1)):
for j in range(len(mask1[0])):
maskLoot1[i][j*3]=mask1[i][j]
maskLoot1[i][j*3+1]=mask1[i][j]
maskLoot1[i][j*3+2]=mask1[i][j]
Next i try to replace selected region pixels with colored ones, for example 120 (grey) -> (255 40 0) in rgb model.
for i in range(len(mask1)):
for j in range(len(mask1[0])):
#mask is NumPy array with selected pixel painted in white (255)
if (mask[i][j] > 250):
maskLoot1[i][j * 3] = lootScheme[mask1[i][j]][1] #red chanel
maskLoot1[i][j * 3+1] = lootScheme[mask1[i][j]][2] #green chanel
maskLoot1[i][j * 3+2] = lootScheme[mask1[i][j]][3] #bluechanel
And it also takes much time, not 20 min but long enouch to make my script lag. consider it's just 2 of many my operations on arrays, and if for second case we can use some bultin function for others is very unlikely. So is there a way to speed up my sode?
For your mask-making code try this replacement to loops:
maskLoot1 = np.dstack(3*[mask1]).reshape((mask1.shape[0],3*mask1.shape[1]))
There are many other ways/variations of achieving the above, e.g.,
maskLoot1 = np.tile(mask1[:,:,None], 3).reshape((mask1.shape[0],3*mask1.shape[1]))
As for the first part of your question the best answer is in the first comment to your question by #furas
First thing, consider moving to Python 3.*. Numpy is dropping support for Python Numpy is dropping support for Python 2.7 from 2020.
For your code questions. You are missing the point of using Numpy below. Numpy is compiled from lower level libraries and it runs very fast, you should not loop over indices in Python, you should throw matrices to Numpy.
Question 1
Normalization is very fast using a listcomp and an np.array
import numpy as np
import time
# create dummy image structure (k, i, j, c) or (k, i, j)
# k is image index, i is row, j is columns, c is channel RGB
images = np.random.uniform(0, 30000, size=(170, 512, 512))
t_start = time.time()
norm_images = np.array([(255*images[k, :, :]/images[k, :, :].max()).astype(int) for k in range(170)])
t_end = time.time()
print("Processing time = {} seconds".format(t_end-t_start))
print("Input shape = {}".format(images.shape))
print("Output shape = {}".format(norm_images.shape))
print("Maximum input value = {}".format(images.max()))
print("Maximum output value = {}".format(norm_images.max()))
That creates the following output
Processing time = 0.2568979263305664 seconds
Input shape = (170, 512, 512)
Output shape = (170, 512, 512)
Maximum input value = 29999.999956185838
Maximum output value = 255
It takes 0.25 seconds!
Question 2
Not sure what you meant here but if you want to clone the values of a monochromatic image to RGB values you can do it like this
# coloring (by copying value and keeping your structure)
color_img = np.array([np.tile(images[k], 3) for k in range(170)])
print("Output shape = {}".format(color_img.shape))
Which produces
Output shape = (170, 512, 1536)
If you instead would like to keep a (c, i, j, k) structure
color_img = np.array([[images[k]]*3 for k in range(170)]) # that creates (170, 3, 512, 512)
color_img = np.swapaxes(np.swapaxes(color_img, 1,2), 2, 3) # that creates (170, 512, 512, 3)
All this takes 0.26 seconds!
Question 3
Coloring certain regions, I would use a function again and a listcomp. Since this is an example I have used a default colouring of (255, 40, 0) but you can use anything, including a LUT.
# create mask of zeros and ones
mask = np.floor(np.random.uniform(0,256, size=(512,512)))
default_scheme = (255, 40, 0)
def substitute(cimg, mask, scheme):
ind = mask > 250
cimg[ind, :] = scheme
return cimg
new_cimg = np.array([substitute(color_img[k], mask, default_scheme) for k in range(170)])
In general for-loops are significantly faster than while-loops. Also using a function for
maskLoot1[i][j*3]=mask1[i][j]
maskLoot1[i][j*3+1]=mask1[i][j]
maskLoot1[i][j*3+2]=mask1[i][j]
and calling the function in the loop should speed up the process significantly.
How do I concatenate two matrices into one matrix? The resulting matrix should have the same height as the two input matrices, and its width will equal the sum of the width of the two input matrices.
I am looking for a pre-existing method that will perform the equivalent of this code:
def concatenate(mat0, mat1):
# Assume that mat0 and mat1 have the same height
res = cv.CreateMat(mat0.height, mat0.width + mat1.width, mat0.type)
for x in xrange(res.height):
for y in xrange(mat0.width):
cv.Set2D(res, x, y, mat0[x, y])
for y in xrange(mat1.width):
cv.Set2D(res, x, y + mat0.width, mat1[x, y])
return res
If you are using OpenCV, (you will get Numpy support then), you can use Numpy function np.hstack((img1,img2)) to do this.
eg :
import cv2
import numpy as np
# Load two images of same size
img1 = cv2.imread('img1.jpg')
img2 = cv2.imread('img2.jpg')
both = np.hstack((img1,img2))
You should use OpenCV. Legacy uses cvmat. But numpy arrays are really easy to work with.
As suggested by #abid-rahman-k, you can use hstack(which I didn't know about) so I had used this.
h1, w1 = img.shape[:2]
h2, w2 = img1.shape[:2]
nWidth = w1+w2
nHeight = max(h1, h2)
hdif = (h1-h2)/2
newimg = np.zeros((nHeight, nWidth, 3), np.uint8)
newimg[hdif:hdif+h2, :w2] = img1
newimg[:h1, w2:w1+w2] = img
But if you want to work with Legacy code, this should help
Let's assume that height of img0 is greater than height of image
nW = img0.width+image.width
nH = img0.height
newCanvas = cv.CreateImage((nW,nH), cv.IPL_DEPTH_8U, 3)
cv.SetZero(newCanvas)
yc = (img0.height-image.height)/2
cv.SetImageROI(newCanvas,(0,yc,image.width,image.height))
cv.Copy(image, newCanvas)
cv.ResetImageROI(newCanvas)
cv.SetImageROI(newCanvas,(image.width,0,img0.width,img0.height))
cv.Copy(img0,newCanvas)
cv.ResetImageROI(newCanvas)
OpenCV has in-built functions for concatenating images vertically/horizontally:
cv2.vconcat()
cv2.hconcat()
Note: While concatenating, images must be of the same dimensions or else you will come across an error message similar to: error: (-215:Assertion failed)....
Code:
img = cv2.imread('flower.jpg', 1)
# concatenate images vertically
vertical_concat = cv2.vconcat([img, img])
# concatenate images horizontally
horizontal_concat = cv2.hconcat([img, img])
I know this question is old, but I stumbled across it because I was looking to concatenate arrays that are two dimensions ( not just concatenate in 1 dimension ).
np.hstack will not do this.
Assuming you have two 640x480 images that are simply two dimensions use dstack.
a = cv2.imread('imgA.jpg')
b = cv2.imread('imgB.jpg')
a.shape # prints (480,640)
b.shape # prints (480,640)
imgBoth = np.dstack((a,b))
imgBoth.shape # prints (480,640,2)
imgBothH = np.hstack((a,b))
imgBothH.shape # prints (480,1280)
# = not what I wanted, first dimension not preserverd