Shift image in Scikit-image [Python] - python

I would like to have function doing this, but it doesn't exist:
from skimage.transform import shift
shifted = shift(image, translation=(15.2, 35.7),
mode='wrap', preserve_range=True)
Could you help me writing a function using skimage.transform.AffineTransform?
from skimage.transform import AffineTransform
def shift(image, translation):
transform = AffineTransform(translation=translation)
# How to do it???
shifted = transform(image) # Does not work, documentation for usage present
# of this class is not present...
return shifted
However function scipy.ndimage.interpolation.shift does what i want, it is veeeeery slow - approximately even 10-20x slower than rotating. numpy.roll is off the table too, as it doesn't support fractional translations.
documentation is somewhat mean:
http://scikit-image.org/docs/stable/api/skimage.transform.html#skimage.transform.AffineTransform

Seems like this is working. Yet if anyone knows simpler and faster way - please let me know.
from skimage.transform import AffineTransform, warp
def shift(image, vector):
transform = AffineTransform(translation=vector)
shifted = warp(image, transform, mode='wrap', preserve_range=True)
shifted = shifted.astype(image.dtype)

You can do in 2D :
shift_matrix = np.array( [ [ 1, 0, -15.2,], [0, 1, -35.7 ] , [0, 0, 1] ] )
shifted= scipy.ndimage.affine_transform( image, shift_matrix )
But it is still relatively slow.
A home made function could be :
def shift_img_along_axis( img, axis=0, shift = 1 , constant_values=0):
""" shift array along a specific axis. New value is taken as weighted by the two distances to the assocaited original pixels.
CHECKED : Works for floating shift ! ok.
NOTE: at the border of image, when not enough original pixel is accessible, data will be meaned with regard to additional constant_values.
constant_values: value to set to pixels with no association in original image img
RETURNS : shifted image.
A.Mau. """
intshift = int(shift)
remain0 = abs( shift - int(shift) )
remain1 = 1-remain0 #if shift is uint : remain1=1 and remain0 =0
npad = int( np.ceil( abs( shift ) ) ) #ceil relative to 0. ( 0.5=> 1 and -0.5=> -1 )
# npad = int( abs( shift+ 0.5*[-1,1][shift>0] ) )
pad_arg = [(0,0)]*img.ndim
pad_arg[axis] = (npad,npad)
bigger_image = np.pad( img, pad_arg, 'constant', constant_values=constant_values)
part1 = remain1*bigger_image.take( np.arange(npad+intshift, npad+intshift+img.shape[axis]) ,axis)
if remain0==0:
shifted = part1
else:
if shift>0:
part0 = remain0*bigger_image.take( np.arange(npad+intshift+1, npad+intshift+1+img.shape[axis]) ,axis) #
else:
part0 = remain0*bigger_image.take( np.arange(npad+intshift-1, npad+intshift-1+img.shape[axis]) ,axis) #
shifted = part0 + part1
return shifted

Related

Color Correction Matrix in XYZ/RGB not working

I am aiming to perform a color correction based on a reference image, using color charts. As a personal goal, I'm trying to correct the colors of an image I previously modified. The chart has been affected by the same layers, of course:
Originals:
Manually modified:
I'm using the following function that I've written myself to get the matrix:
def _get_matrix_transformation(self,
observed_colors: np.ndarray,
reference_colors: np.ndarray):
"""
Args:
observed_colors: colors found in target chart
reference_colors: colors found on source/reference image
Returns:
Nothing.
"""
# case 1
observed_m = [observed_colors[..., i].mean() for i in range(observed_colors.shape[-1])]
observed_colors = (observed_colors - observed_m).astype(np.float32)
reference_m = [reference_colors[..., i].mean() for i in range(reference_colors.shape[-1])]
reference_colors = (reference_colors - reference_m).astype(np.float32)
# XYZ color conversion
observed_XYZ = cv.cvtColor(observed_colors, cv.COLOR_BGR2XYZ)
observed_XYZ = np.reshape(observed_colors, (observed_XYZ.shape[0] * observed_XYZ.shape[1],
observed_XYZ.shape[2]))
reference_XYZ = cv.cvtColor(reference_colors, cv.COLOR_BGR2XYZ)
reference_XYZ = np.reshape(reference_colors, (reference_XYZ.shape[0] * reference_XYZ.shape[1],
reference_XYZ.shape[2]))
# case 2
# mean subtraction in order to use the covariance matrix
# observed_m = [observed_XYZ[..., i].mean() for i in range(observed_XYZ.shape[-1])]
# observed_XYZ = observed_XYZ - observed_m
# reference_m = [reference_XYZ[..., i].mean() for i in range(reference_XYZ.shape[-1])]
# reference_XYZ = reference_XYZ - reference_m
# apply SVD
H = np.dot(reference_XYZ.T, observed_XYZ)
U, S, Vt = np.linalg.svd(H)
# get transformation
self._M = Vt.T * U.T
# consider reflection case
if np.linalg.det(self._M) < 0:
Vt[2, :] *= -1
self._M = Vt.T * U.T
return
I'm applying the correction like this:
def _apply_profile(self, img: np.ndarray) -> np.ndarray:
"""
Args:
img: image to be corrected.
Returns:
Corrected image.
"""
# Revert gamma compression
img = adjust_gamma(img, gamma=1/2.2)
# Apply color correction
corrected_img = cv.cvtColor(img.astype(np.float32), cv.COLOR_BGR2XYZ)
corrected_img = corrected_img.reshape((corrected_img.shape[0]*corrected_img.shape[1], corrected_img.shape[2]))
corrected_img = np.dot(self._M, corrected_img.T).T.reshape(img.shape)
corrected_img = cv.cvtColor(corrected_img.astype(np.float32), cv.COLOR_XYZ2BGR)
corrected_img = np.clip(corrected_img, 0, 255)
# Apply gamma
corrected_img = adjust_gamma(corrected_img.astype(np.uint8), gamma=2.2)
return corrected_img
The result I'm currently getting if the transformation is done in BGR (just commented color conversion functions):
In XYZ (don't pay attention to the resizing, that's because of me):
Now, I'm asking these questions:
Is inverting gamma necessary in this case? If so, am I doing it correctly? Should I implement a LUT that works with other data types such as np.float32?
Subtraction of the mean should be done in XYZ on BGR color space (case 1 vs case 2)?
Is considering the reflection case (as in a rigid body rotation problem) necessary?
Is clipping necessary? And if so, are those the correct values and data types?

How to optimize a Python loop that replaces raster nan values with other raster region mean values

I need to read 2 rasters, one a satellite image (target) and the other regions of this image (segmentation). The target image present numeric values and nans. The segmentation image are regions, in which each pixel with the same value are from the same region, e.g., all pixels with value 1 are from segment 1.
Based on that, I would like to calculate the mean value for each segment that contains nans and replace the nan calues by the segment mean value. If I have a segment of 5 pixels and target image has the values (2,nan,4,4,2), the nan value must be replaced by 3.
I have writen a script that does this. However the code is very slow at the FOR loop when I am processing large images. Based on that, I would like to know how can the loop be improved in performance.
import numpy
import rasterio
### returns which pixels are nan
def get_gaps(img):
gaps = numpy.argwhere( numpy.isnan( img ) )
return( gaps )
def fill(img_targ, gaps_targ, img_seg1):
### Get which segments contains NA on target image
indices_gap_targ = numpy.array( list( zip(gaps_targ[:,0], gaps_targ[:,1] ) ) )
segments_targ = img_seg1[ indices_gap_targ[:,0], indices_gap_targ[:,1] ]
segments_targ = numpy.unique( segments_targ[~numpy.isnan( segments_targ )] )
for seg in segments_targ:
### Get seg pixel position
seg_pixels = numpy.nonzero( img_seg1 == seg )
seg_indices = numpy.array( list( zip(seg_pixels[:][0], seg_pixels[:][1] ) ) )
### Get targ pix values
targ_values_seg = img_targ[ seg_indices[:,0], seg_indices[:,1] ]
### Check if any is not nan otherwise it will not have any value to use as mean
if( numpy.any( ~numpy.isnan(targ_values_seg) ) ):
### Get nan position and replace by mean value
nan_pos = numpy.isnan( targ_values_seg )
img_targ[ seg_indices[:,0][nan_pos], seg_indices[:,1][nan_pos] ] = numpy.nanmean(targ_values_seg)
return img_targ
input_targ_filename = "/home/path/target.tif"
input_seg1_filename = "/home/path/segmentation.tif"
with rasterio.open(input_targ_filename) as dataset:
img_targ = dataset.read(1)
img_targ[ img_targ < -100000 ] = numpy.nan
kwargs = dataset.meta
with rasterio.open(input_seg1_filename) as dataset:
img_seg1 = dataset.read(1)
img_seg1[ img_seg1 < -100000 ] = numpy.nan
gaps_targ = get_gaps(img_targ)
img_filled = fill(img_targ, gaps_targ, img_seg1)
np.bincount is the tool of choice for this kind of problem. (It does essentially the same as the more intuitive np.add.at but is typically way faster.)
import numpy as np
# create mock data (this takes longer than the actual processing)
print("creating example")
N = 1000
NS = 2000
tgt = np.random.randn(N,N)
tgt[np.random.random((N,N))<0.1] = np.nan
seg = np.zeros((N,N),int)
seg.ravel()[np.random.choice(N*N,NS,replace=False)] = np.arange(1,NS+1)
idcs = np.s_[1:],np.s_[:,1:],np.s_[:-1],np.s_[:,:-1]
while np.count_nonzero(seg) < N*N/2:
i = np.random.randint(4)
idx,cidx = idcs[i],idcs[i-2]
seg[idx][seg[idx]==0] = seg[cidx][seg[idx]==0]
# replace nans (in-place, overwrites nans in tgt)
print("replacing nans")
n = np.isnan(tgt)
nn = ~n
segnn = seg[nn]
tgt[n] = (np.bincount(segnn,tgt[nn],NS+1)/np.bincount(segnn,None,NS+1))[seg[n]]
# check
print("verifying",end=" ... ")
sample = np.random.randint(0,NS+1,10)
for i in sample:
assert np.allclose(tgt[n][seg[n]==i],np.mean(tgt[nn][seg[nn]==i]))
print("looks ok")
to answer your example, you can do it like this with no loops:
seg = np.array((2,np.nan,4,4,2))
seg[np.isnan(seg)] = np.nanmean(seg)
output:
array([2., 3., 4., 4., 2.])
I hope this principle helps you implement this into your larger code

Interpolate between two images

I'm trying to interpolate between two images in Python.
Images are of shapes (188, 188)
I wish to interpolate the image 'in-between' these two images. Say Image_1 is at location z=0 and Image_2 is at location z=2. I want the interpolated image at location z=1.
I believe this answer (MATLAB) contains a similar problem and solution.
Creating intermediate slices in a 3D MRI volume with MATLAB
I've tried to convert this code to Python as follows:
from scipy.interpolate import interpn
from scipy.interpolate import griddata
# Construct 3D volume from images
# arr.shape = (2, 182, 182)
arr = np.r_['0,3', image_1, image_2]
slices,rows,cols = arr.shape
# Construct meshgrids
[X,Y,Z] = np.meshgrid(np.arange(cols), np.arange(rows), np.arange(slices));
[X2,Y2,Z2] = np.meshgrid(np.arange(cols), np.arange(rows), np.arange(slices*2));
# Run n-dim interpolation
Vi = interpn([X,Y,Z], arr, np.array([X1,Y1,Z1]).T)
However, this produces an error:
ValueError: The points in dimension 0 must be strictly ascending
I suspect I am not constructing my meshgrid(s) properly but am kind of lost on whether or not this approach is correct.
Any ideas?
---------- Edit -----------
Found some MATLAB code that appears to solve this problem:
Interpolating Between Two Planes in 3d space
I attempted to convert this to Python:
from scipy.ndimage.morphology import distance_transform_edt
from scipy.interpolate import interpn
def ndgrid(*args,**kwargs):
"""
Same as calling ``meshgrid`` with *indexing* = ``'ij'`` (see
``meshgrid`` for documentation).
"""
kwargs['indexing'] = 'ij'
return np.meshgrid(*args,**kwargs)
def bwperim(bw, n=4):
"""
perim = bwperim(bw, n=4)
Find the perimeter of objects in binary images.
A pixel is part of an object perimeter if its value is one and there
is at least one zero-valued pixel in its neighborhood.
By default the neighborhood of a pixel is 4 nearest pixels, but
if `n` is set to 8 the 8 nearest pixels will be considered.
Parameters
----------
bw : A black-and-white image
n : Connectivity. Must be 4 or 8 (default: 8)
Returns
-------
perim : A boolean image
From Mahotas: http://nullege.com/codes/search/mahotas.bwperim
"""
if n not in (4,8):
raise ValueError('mahotas.bwperim: n must be 4 or 8')
rows,cols = bw.shape
# Translate image by one pixel in all directions
north = np.zeros((rows,cols))
south = np.zeros((rows,cols))
west = np.zeros((rows,cols))
east = np.zeros((rows,cols))
north[:-1,:] = bw[1:,:]
south[1:,:] = bw[:-1,:]
west[:,:-1] = bw[:,1:]
east[:,1:] = bw[:,:-1]
idx = (north == bw) & \
(south == bw) & \
(west == bw) & \
(east == bw)
if n == 8:
north_east = np.zeros((rows, cols))
north_west = np.zeros((rows, cols))
south_east = np.zeros((rows, cols))
south_west = np.zeros((rows, cols))
north_east[:-1, 1:] = bw[1:, :-1]
north_west[:-1, :-1] = bw[1:, 1:]
south_east[1:, 1:] = bw[:-1, :-1]
south_west[1:, :-1] = bw[:-1, 1:]
idx &= (north_east == bw) & \
(south_east == bw) & \
(south_west == bw) & \
(north_west == bw)
return ~idx * bw
def signed_bwdist(im):
'''
Find perim and return masked image (signed/reversed)
'''
im = -bwdist(bwperim(im))*np.logical_not(im) + bwdist(bwperim(im))*im
return im
def bwdist(im):
'''
Find distance map of image
'''
dist_im = distance_transform_edt(1-im)
return dist_im
def interp_shape(top, bottom, num):
if num<0 and round(num) == num:
print("Error: number of slices to be interpolated must be integer>0")
top = signed_bwdist(top)
bottom = signed_bwdist(bottom)
r, c = top.shape
t = num+2
print("Rows - Cols - Slices")
print(r, c, t)
print("")
# rejoin top, bottom into a single array of shape (2, r, c)
# MATLAB: cat(3,bottom,top)
top_and_bottom = np.r_['0,3', top, bottom]
#top_and_bottom = np.rollaxis(top_and_bottom, 0, 3)
# create ndgrids
x,y,z = np.mgrid[0:r, 0:c, 0:t-1] # existing data
x1,y1,z1 = np.mgrid[0:r, 0:c, 0:t] # including new slice
print("Shape x y z:", x.shape, y.shape, z.shape)
print("Shape x1 y1 z1:", x1.shape, y1.shape, z1.shape)
print(top_and_bottom.shape, len(x), len(y), len(z))
# Do interpolation
out = interpn((x,y,z), top_and_bottom, (x1,y1,z1))
# MATLAB: out = out(:,:,2:end-1)>=0;
array_lim = out[-1]-1
out[out[:,:,2:out] >= 0] = 1
return out
I call this as follows:
new_image = interp_shape(image_1,image_2, 1)
Im pretty sure this is 80% of the way there but I still get this error when running:
ValueError: The points in dimension 0 must be strictly ascending
Again, I am probably not constructing my meshes correctly. I believe np.mgrid should produce the same result as MATLABs ndgrid though.
Is there a better way to construct the ndgrid equivalents?
I figured this out. Or at least a method that produces desirable results.
Based on: Interpolating Between Two Planes in 3d space
def signed_bwdist(im):
'''
Find perim and return masked image (signed/reversed)
'''
im = -bwdist(bwperim(im))*np.logical_not(im) + bwdist(bwperim(im))*im
return im
def bwdist(im):
'''
Find distance map of image
'''
dist_im = distance_transform_edt(1-im)
return dist_im
def interp_shape(top, bottom, precision):
'''
Interpolate between two contours
Input: top
[X,Y] - Image of top contour (mask)
bottom
[X,Y] - Image of bottom contour (mask)
precision
float - % between the images to interpolate
Ex: num=0.5 - Interpolate the middle image between top and bottom image
Output: out
[X,Y] - Interpolated image at num (%) between top and bottom
'''
if precision>2:
print("Error: Precision must be between 0 and 1 (float)")
top = signed_bwdist(top)
bottom = signed_bwdist(bottom)
# row,cols definition
r, c = top.shape
# Reverse % indexing
precision = 1+precision
# rejoin top, bottom into a single array of shape (2, r, c)
top_and_bottom = np.stack((top, bottom))
# create ndgrids
points = (np.r_[0, 2], np.arange(r), np.arange(c))
xi = np.rollaxis(np.mgrid[:r, :c], 0, 3).reshape((r**2, 2))
xi = np.c_[np.full((r**2),precision), xi]
# Interpolate for new plane
out = interpn(points, top_and_bottom, xi)
out = out.reshape((r, c))
# Threshold distmap to values above 0
out = out > 0
return out
# Run interpolation
out = interp_shape(image_1,image_2, 0.5)
Example output:
I came across a similar problem where I needed to interpolate the shift between frames where the change did not merely constitute a translation but also changes to the shape itself . I solved this problem by :
Using center_of_mass from scipy.ndimage.measurements to calculate the center of the object we want to move in each frame
Defining a continuous parameter t where t=0 first and t=1 last frame
Interpolate the motion between two nearest frames (with regard to a specific t value) by shifting the image back/forward via shift from scipy.ndimage.interpolation and overlaying them.
Here is the code:
def inter(images,t):
#input:
# images: list of arrays/frames ordered according to motion
# t: parameter ranging from 0 to 1 corresponding to first and last frame
#returns: interpolated image
#direction of movement, assumed to be approx. linear
a=np.array(center_of_mass(images[0]))
b=np.array(center_of_mass(images[-1]))
#find index of two nearest frames
arr=np.array([center_of_mass(images[i]) for i in range(len(images))])
v=a+t*(b-a) #convert t into vector
idx1 = (np.linalg.norm((arr - v),axis=1)).argmin()
arr[idx1]=np.array([0,0]) #this is sloppy, should be changed if relevant values are near [0,0]
idx2 = (np.linalg.norm((arr - v),axis=1)).argmin()
if idx1>idx2:
b=np.array(center_of_mass(images[idx1])) #center of mass of nearest contour
a=np.array(center_of_mass(images[idx2])) #center of mass of second nearest contour
tstar=np.linalg.norm(v-a)/np.linalg.norm(b-a) #define parameter ranging from 0 to 1 for interpolation between two nearest frames
im1_shift=shift(images[idx2],(b-a)*tstar) #shift frame 1
im2_shift=shift(images[idx1],-(b-a)*(1-tstar)) #shift frame 2
return im1_shift+im2_shift #return average
if idx1<idx2:
b=np.array(center_of_mass(images[idx2]))
a=np.array(center_of_mass(images[idx1]))
tstar=np.linalg.norm(v-a)/np.linalg.norm(b-a)
im1_shift=shift(images[idx2],-(b-a)*(1-tstar))
im2_shift=shift(images[idx1],(b-a)*(tstar))
return im1_shift+im2_shift
Result example
I don't know the solution to your problem, but I don't think it's possible to do this with interpn.
I corrected the code that you tried, and used the following input images:
But the result is:
Here's the corrected code:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy import interpolate
n = 8
img1 = np.zeros((n, n))
img2 = np.zeros((n, n))
img1[2:4, 2:4] = 1
img2[4:6, 4:6] = 1
plt.figure()
plt.imshow(img1, cmap=cm.Greys)
plt.figure()
plt.imshow(img2, cmap=cm.Greys)
points = (np.r_[0, 2], np.arange(n), np.arange(n))
values = np.stack((img1, img2))
xi = np.rollaxis(np.mgrid[:n, :n], 0, 3).reshape((n**2, 2))
xi = np.c_[np.ones(n**2), xi]
values_x = interpolate.interpn(points, values, xi, method='linear')
values_x = values_x.reshape((n, n))
print(values_x)
plt.figure()
plt.imshow(values_x, cmap=cm.Greys)
plt.clim((0, 1))
plt.show()
I think the main difference between your code and mine is in the specification of xi. interpn tends to be somewhat confusing to use, and I've explained it in greater detail in an older answer. If you're curious about the mechanics of how I've specified xi, see this answer of mine explaining what I've done.
This result is not entirely surprising, because interpn just linearly interpolated between the two images: so the parts which had 1 in one image and 0 in the other simply became 0.5.
Over here, since one image is the translation of the other, it's clear that we want an image that's translated "in-between". But how would interpn interpolate two general images? If you had one small circle and one big circle, is it in any way clear that there should be a circle of intermediate size "between" them? What about interpolating between a dog and a cat? Or a dog and a building?
I think you are essentially trying to "draw lines" connecting the edges of the two images and then trying to figure out the image in between. This is similar to sampling a moving video at a half-frame. You might want to check out something like optical flow, which connects adjacent frames using vectors. I'm not aware if and what python packages/implementations are available though.

How to hide one image into another one using LSB substitutuion?

I am trying to implement a basic stenography technique in which I am replacing the LSB of the carrier image with the MSB of the message image. The LSB can belong to any of the RGB channel.
My approach is quite naive as I am looping the message_matrix and storing its MSB of a particular RGB channel in the corresponding LSB of the carrier_matrix. As the image size is not more than 1024 * 1024 the time complexity of performing this operation is O(n^2) but since I am using python the time taken is very high as compared to Java.
Can the same operation be performed in a more optimised way ?
def hide_using_bpcs(self, carrier_path, message_path, index, color_index):
carrier_matrix = self.image_to_matrix(carrier_path)
message_matrix = self.image_to_matrix(message_path) #use np.zeros
for row_index, row in enumerate(message_matrix):
for pixel_index, pixel in enumerate(row):
color = message_matrix[row_index][pixel_index][color_index]
msb = (color & 0xff) >> 7
carrier_pixel = carrier_matrix[
row_index][pixel_index][color_index]
carrier_matrix[row_index][pixel_index][
color_index] = self.set_bit(carrier_pixel, index, msb)
stegano_image = self.matrix_to_image(carrier_matrix)
return stegano_image
Now, for displaying a particular bit plane let say (Red 0), I am setting all the values of Green and Blue plane as 0 and retaining only the value of LSB (or the 0 bit) of the red color in the image. I have gone through some implementations done using openCV like [b,g,r = cv2.split(img)] but this is only splitting the image in 3 channels. What I want is to split a particular channel let say Red further into 8 Variations by retaining the value at the corresponding position.
def display_bit_plane(self, path, color_index, color_bit):
matrix = self.image_to_matrix(path)
matrix = matrix.astype(int)
result_matrix = self.image_to_matrix(path)
mask = 1 << color_bit
for row_index, row in enumerate(matrix):
for pixel_index, pixel in enumerate(row):
for iterator in range(0, 3):
result_matrix[row_index][pixel_index][iterator] = 0
color = matrix[row_index][pixel_index][color_index]
result_matrix[row_index][pixel_index][color_index] = self.set_bit(0, 7, ((color & mask) != 0))
stegano_image = self.matrix_to_image(result_matrix)
return stegano_image
I am using NumPy array for performing all the computations. However iterating it in usual way is very costly. Please provide some optimisation in the above two functions, so that these operations can be done in less than 1 second.
Edit 1 :
I have optimised the second function of retrieving the bit plane. If it can be further simplified please do tell. Color_index represents R, G, B as 0, 1, 2 respectively and color_bit is the bit position from 0-7.
def display_bit_plane_optimised(self, path, color_index, color_bit):
message_matrix = self.image_to_matrix(path)
change_index = [0, 1, 2]
change_index.remove(color_index)
message_matrix[:, :, change_index] = 0
mask = 1 << color_bit
message_matrix = message_matrix & mask
message_matrix[message_matrix == 1] = 1 << 7
stegano_image = self.matrix_to_image(message_matrix)
return stegano_image
Anything that applies to the whole array can be vectorised. If you want to apply an operation only on a part of the array, slice it.
I'm providing complete code so not to make assumptions about image_to_matrix() and matrix_to_image() methods. Take it from there.
I've kept your logic intact otherwise, but if you're only intending to embed the secret in the LSB, you can ditch pixel_bit, set its value to zero and simplify whatever constants result out of it. For example, in embed() you'd simply get mask = 0xfe, while any bitshifts by 0 are inconsequential.
import numpy as np
from PIL import Image
class Steganography:
def embed(self, cover_file, secret_file, color_plane, pixel_bit):
cover_array = self.image_to_matrix(cover_file)
secret_array = self.image_to_matrix(secret_file)
# every bit except the one at `pixel_bit` position is 1
mask = 0xff ^ (1 << pixel_bit)
# shift the MSB of the secret to the `pixel_bit` position
secret_bits = ((secret_array[...,color_plane] >> 7) << pixel_bit)
height, width, _ = secret_array.shape
cover_plane = (cover_array[:height,:width,color_plane] & mask) + secret_bits
cover_array[:height,:width,color_plane] = cover_plane
stego_image = self.matrix_to_image(cover_array)
return stego_image
def extract(self, stego_file, color_plane, pixel_bit):
stego_array = self.image_to_matrix(stego_file)
change_index = [0, 1, 2]
change_index.remove(color_plane)
stego_array[...,change_index] = 0
stego_array = ((stego_array >> pixel_bit) & 0x01) << 7
exposed_secret = self.matrix_to_image(stego_array)
return exposed_secret
def image_to_matrix(self, file_path):
return np.array(Image.open(file_path))
def matrix_to_image(self, array):
return Image.fromarray(array)
When I run it, it all completes within a second.
plane = 0
bit = 1
cover_file = "cover.jpg"
secret_file = "secret.jpg"
stego_file = "stego.png"
extracted_file = "extracted.png"
S = Steganography()
S.embed(cover_file, secret_file, plane, bit).save(stego_file)
S.extract(stego_file, plane, bit).save(extracted_file)
Notes
Your display_bit_plane_optimised() was reasonably optimised, but it had a bug if color_bit was anything but 0. The line
message_matrix = message_matrix & mask
zeros every other bit, but unless color_bit is 0, the values will be some other power of 2. So when you come to
message_matrix[message_matrix == 1] = 1 << 7
no pixel is modified. If you want to keep your way, you have to change the last line to
message_matrix[message_matrix != 0] = 1 << 7
My approach was simply to bring the embedded bit to the LSB position, zero out every other bit and then shift it to the MSB position with no conditionals.

Shifting an image in numpy

I have a image in a 2d numpy array. I want to shift the image by an X and Y offset and want the rest of the frame padded with zeros. I have seen discussions about the 'roll' function but that only works in 1 axis. (unless someone can point me to a 2d version with padding). I have tried slicing but I run into trouble when shifting offsets have all possible directions. I don't want to navigate through all X Y offset +/- permutations. Is there a simple general solution? I have the below code which works nice for X-offset=+100. But it crashes for X-offset=-100.
Thanks,
Gert
import matplotlib.pyplot as plt
import scipy.misc as msc
import numpy as np
lena = msc.lena()
lena.dtype
(imx,imy)= lena.shape
ox= 100
oy= 20
shift_lena = np.zeros((imx,imy))
shift_lena[0:imy-oy,0:imx-ox] = lena[oy:,ox:]
shift_lena_m = shift_lena.astype(np.int64)
shift_lena_m.dtype
plt.figure(figsize=(10, 3.6))
plt.subplot(131)
plt.imshow(lena, cmap=plt.cm.gray)
plt.subplot(132)
plt.imshow(shift_lena_m, cmap=plt.cm.gray)
plt.subplots_adjust(wspace=0, hspace=0., top=0.99, bottom=0.01, left=0.05, right=0.99)
plt.show()
There's no other way, as to handle negative and positive shifts accordingly:
non = lambda s: s if s<0 else None
mom = lambda s: max(0,s)
ox, oy = 100, 20
shift_lena = numpy.zeros_like(lena)
shift_lena[mom(oy):non(oy), mom(ox):non(ox)] = lena[mom(-oy):non(-oy), mom(-ox):non(-ox)]
You can use roll function to circular shift x and y and then zerofill the offset
def shift_image(X, dx, dy):
X = np.roll(X, dy, axis=0)
X = np.roll(X, dx, axis=1)
if dy>0:
X[:dy, :] = 0
elif dy<0:
X[dy:, :] = 0
if dx>0:
X[:, :dx] = 0
elif dx<0:
X[:, dx:] = 0
return X
For shifting along a specific axis, for integer and non-integer shifts, you may use:
def shift_img_along_axis( img, axis=0, shift = 1 , constant_values=0):
""" shift array along a specific axis. New value is taken as weighted by the two distances to the assocaited original pixels.
CHECKED : Works for floating shift ! ok.
NOTE: at the border of image, when not enough original pixel is accessible, data will be meaned with regard to additional constant_values.
constant_values: value to set to pixels with no association in original image img
RETURNS : shifted image.
A.Mau. """
intshift = int(shift)
remain0 = abs( shift - int(shift) )
remain1 = 1-remain0 #if shift is uint : remain1=1 and remain0 =0
npad = int( np.ceil( abs( shift ) ) ) #ceil relative to 0. ( 0.5=> 1 and -0.5=> -1 )
pad_arg = [(0,0)]*img.ndim
pad_arg[axis] = (npad,npad)
bigger_image = np.pad( img, pad_arg, 'constant', constant_values=constant_values)
part1 = remain1*bigger_image.take( np.arange(npad+intshift, npad+intshift+img.shape[axis]) ,axis)
if remain0==0:
shifted = part1
else:
if shift>0:
part0 = remain0*bigger_image.take( np.arange(npad+intshift+1, npad+intshift+1+img.shape[axis]) ,axis)
else:
part0 = remain0*bigger_image.take( np.arange(npad+intshift-1, npad+intshift-1+img.shape[axis]) ,axis)
shifted = part0 + part1
return shifted
A quick example :
np.random.seed(1)
img = np.random.uniform(0,10,(3,4)).astype('int')
print( img )
shift = 1.5
shifted = shift_img_along_axis( img, axis=1, shift=shift )
print( shifted )
Image print :
[[4 7 0 3]
[1 0 1 3]
[3 5 4 6]]
Shifted image:
[[3.5 1.5 1.5 0. ]
[0.5 2. 1.5 0. ]
[4.5 5. 3. 0. ]]
With our shift of 1.5 the first value in shifted image is the mean of 7 and 0, and so on... If a value is missing in the original image an additionnal value of 0 will be taken.

Categories

Resources