How many number of unique rotations are there in a 3D array around an axis. For example: a 3x3x3 array can be rotated 8 (rotation = single roll/shift in element - One rotation of the cube is rotating all the slices around one axis by 1 element. ) times around each axis. However, with all three axis, there are 8*8*8 rotations. But how many of them are unique?
In my calculation, there are 208. And in a 5x5x5 3D array, out of total of 13824 rotations (24*24*24) only 2048 are unique.
I would like if someone can confirm these or correct me if I am wrong.
I used the following script to generate these.
def rotateSlice(slice_data, num_elements=1, direction='c'):
if direction == 'ac':
num_elements *= -1
rotated_slice = slice_data
indexed_slice = numpy.arange(0, slice_data.shape[0]**2).reshape(slice_data.shape)
int_slice_idx = 0
for i in range(slice_data.shape[0], 1, -2):
clipped_array = slice_data[int_slice_idx:slice_data.shape[0]-int_slice_idx, int_slice_idx:slice_data.shape[0]-int_slice_idx]
clipped_indx_array = indexed_slice[int_slice_idx:slice_data.shape[0]-int_slice_idx, int_slice_idx:slice_data.shape[0]-int_slice_idx]
outer_border = list(range(i)) + list(range(range(i)[-1]+i, i*(i-2)+range(i)[-1]+i, i)) + list(range(i**2 - 1, i**2 - (i+1), -1)) + list(range(i*(i-2), 0, -i))
new_outer_border = numpy.roll(outer_border, num_elements)
rotated_slice.flat[clipped_indx_array.flat[outer_border]] = clipped_array.flat[new_outer_border]
int_slice_idx +=1
return rotated_slice
def rotateCube(cube, rotate_dim, num_elements=1, direction='c'):
out_cube = numpy.zeros(cube.shape, dtype='int')
for i in range(cube.shape[rotate_dim]):
slice = numpy.take(cube, i, rotate_dim)
rotated_slice = rotateSlice(slice, num_elements=num_elements, direction=direction)
#TODO - Its better if there is a less ugly way to do the following step. Similar to numpy.take
if rotate_dim == 0:
out_cube[i,:,:] = rotated_slice
elif rotate_dim == 1:
out_cube[:, i, :] = rotated_slice
elif rotate_dim == 2:
out_cube[:, :, i] = rotated_slice
return out_cube
def getAllCubeRotations(cube, step=1, direction='c'):
total_rotations_in_axis = cube.shape[0]**2 - 1
rotations = []
for i in range(int(total_rotations_in_axis)):
cube = rotateCube(cube, rotate_dim=0, num_elements=step, direction=direction)
for j in range(int(total_rotations_in_axis)):
cube = rotateCube(cube, rotate_dim=1, num_elements=step, direction=direction)
for k in range(int(total_rotations_in_axis)):
cube = rotateCube(cube, rotate_dim=2, num_elements=step, direction=direction)
rotations.append(cube)
return rotations
if __name__ == '__main__':
size = 5
a = numpy.arange(0, size*size*size).reshape((size,size,size))
rotations = getAllCubeRotations(a)
print(len(rotations))
uniques = []
for arr in rotations:
if not any(numpy.array_equal(arr, unique_arr) for unique_arr in uniques):
uniques.append(arr)
print(len(uniques))
Related
I have N NumPy arrays of shape data[n,m,3]. I want to fit/squeeze/split/slice/reshape them into N' arrays of shape new_data_#[1000,m,3] where # is the indexing of new arrays. The problem is that n can be smaller, or bigger than 1000. When it is smaller somehow I should fill the rest of 1000 capacity of new_array with the next array, and when it is bigger than 1000 I should make a new_data_# and add the rest to that one. I don't know how to manage this. Here is a pseudo-code but it can't be done this way, for example, the while maybe is not necessary. The output can be written to the disk or returned in a new data format.
def array2blocks(array_files)
for each N in array_files:
N = data = np.random.rand(n, m, 3)
new_data = np.zeros((1000, m, 3), dtype=np.float32)
j=0
index = 0
while j <= new_data.shape[0]:
for i in range(data.shape[0]):
print("--->", data[i,:,:])
print (i)
if i <= new_data.shape[0]:
# here first we should check the left capacity of new_data and then insert data into it
# new_data[i, :, :] = data[i, :, :] #this overrides previous items so not correct
print(new_data)
else:
print('n>1000')
new_data_name = 'new_data' + '_' + str(index)
# here fill rest of the data in the new_data
...
index += 1
#when capacity is full write it to the disk
print(new_data)
UPDATE with Aaron's old answer:
I replaced 1000 with batch_size = 5 to make it simple.
def numpyarrays2blocks(array_files):
N1 = np.random.rand(7, 4, 3)
N2 = np.random.rand(7, 4, 3)
N3 = np.random.rand(4, 4, 3)
# array_files = []
array_files.append(N1)
array_files.append(N2)
array_files.append(N3)
for N in array_files:
n = N.shape[0]
m = N.shape[1]
batch_size = 5
# N = data = np.random.rand(n, m, 3)
data = N
# print(data)
new_arrays = []
i = 0 # the current row index to insert
while i < n:
new_data = np.zeros((batch_size, m, 3), dtype=np.float32)
j = min(i + batch_size, n) # the last row (exclusive) to copy to new_data
# j - i is the number of rows to copy
new_data[:j - i, :, :] = data[i:j, :, :]
print('NEW DATA: ', new_data)
i = j # update the index
new_arrays.append(new_data)
print(new_arrays)
data is used to store the temporary result, and data_start is the index to insert rows to data.
Allocate data if it is None
yield data if it is fully filled.
merge_and_split is a generator so that the memory demand should be low.
import random
from typing import Iterator
import numpy as np
def merge_and_split(arrays, batch_size) -> Iterator:
arrays = tuple(arrays)
dtype = arrays[0].dtype
data_shape = (batch_size,) + arrays[0].shape[1:]
assert all(a.shape[1:] == data_shape[1:] for a in arrays), "Shape mismatch"
data = None
data_start = 0
for src in arrays:
src_index = 0
src_avail = src.shape[0]
while src_avail >= 1:
if data is None:
# allocate if None
data = np.zeros(data_shape, dtype=dtype)
data_start = 0
num_moved = min(batch_size - data_start, src_avail)
data[data_start:data_start + num_moved, ...] = src[src_index:src_index + num_moved, ...]
data_start += num_moved
src_index += num_moved
src_avail -= num_moved
if data_start >= batch_size:
yield data
data = None
if data is not None:
yield data
def input_arrays():
number = 10
r = random.Random(13)
return [np.random.randint(0, 10, size=(r.randint(1, 5), 4, 3)) for _ in range(number)]
def main():
# Testing input and output
arrays = input_arrays()
# for i, item in enumerate(arrays):
# print('input', i, item.shape)
# print(item)
result = list(merge_and_split(arrays, 5))
# for i, item in enumerate(result):
# print('result', i, item.shape)
# print(item)
src_concat = np.vstack(arrays)
row_number = sum(s.shape[0] for s in arrays)
print('concatenated', src_concat.shape, row_number)
out_concat = np.vstack(result)
print(out_concat.shape)
print((out_concat[0:row_number, ...] == src_concat).all()) # They are indeed the same
if __name__ == '__main__':
main()
You can concatenate all your original arrays split them:
ars = ... # list of N arrays
ars = np.concatenate(ars, axis=0)
ars = np.split(ars, np.arange(1000, ars.shape[0], 1000))
The last line can be written as ars = np.split(ars, 1000), but only if you're sure that the total number of elements is a multiple of 1000, since np.split will barf otherwise. Specifying explicit split-points, as with np.arange, allows you to have a shorter final segment.
I have been trying to implement k-means clustering with a heatmap, but have been unsuccessful.
Here is the initial data set:
https://raw.githubusercontent.com/gsprint23/cpts215/master/progassignments/files/simple.csv
And here is my code:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import math
import random
#%matplotlib inline
def truncate(f, n):
return math.floor(f * 10 ** n) / 10 ** n
def chooseCenter(data, centers):
length = data.shape
cent = []
while len(cent) < centers :
x = random.randrange(0,length[0])
y = random.randrange(0,length[1])
if data.iloc[x][y] not in cent:
d = truncate(data.iloc[x][y],2)
cent.append(d)
return cent
def distance(val, center):
return math.sqrt((val- center)**2)
def getDistances(centers, data):
length = data.shape
dist = []
for i in range(length[0]):
for j in range(length[1]):
y = []
for k in range(len(centers)):
val = distance(data.iloc[i][j], centers[k])
y.append(truncate(val,3))
dist.append(y)
return dist
def findClosest(data, dist):
close = data.copy()
length = close.shape
indexes = []
for i in range(len(dist)):
pt = min(dist[i])
idx = dist[i].index(pt)
indexes.append(idx)
#print(indexes)
length = data.shape
n = np.array(indexes)
n = pd.DataFrame(np.reshape(n, (length[0],length[1])))
#reshape this data frame into the same shape as the data
#keep running the find closest until there is no change
#try heatmap on this?
#this should cluster it, but to make sure test it
#might need to do some tweaking to this
return n
# for i in range(length[0]):
# for j in range(length[1]):
# print('dist[i]', dist[j])
# pt = min(dist[j])
# print(pt)
# idx = dist[j].index(pt)
# close.iloc[i][j] = int(idx)
#return close
def computeNewCenter(data, close):
d = dict()
for i in range(len(close)):
for j in range(len(close[0])):
d[close.iloc[i][j]] = []
for i in range(len(data)):
for j in range(len(data[0])):
if close.iloc[i][j] in d:
d[close.iloc[i][j]].append(data.iloc[i][j])
newCenters = []
for key, value in d.items():
m = np.mean(value)
newCenters.append(truncate(m, 3))
return newCenters
# lst = [[] * numcenters]
# for i in range(len(close)):
# for j in range(len(close[0])):
# if close.iloc[i][j]
def main():
data = np.array(pd.read_csv('https://raw.githubusercontent.com/gsprint23/cpts215/master/progassignments/files/simple.csv', header=None))
data = data.T
#print(data)
df = pd.DataFrame(data[1:], columns=data[0], dtype=float).T
df = df.iloc[::-1]
# print(df)
# print(df.iloc[1][9])
# print(df)
# print(df.iloc[0][1])
# heatmap = plt.pcolor(df, cmap=plt.cm.bwr)
# plt.colorbar(heatmap)
c = chooseCenter(df, 3)
print(c)
#print(len(c))
dist = getDistances(c, df)
#print(dist)
y = findClosest(df, dist)
# q = []
# for i in range(len(c)):
# q.append([])
# #print(q)
j = computeNewCenter(df, y)
#print(j)
length = df.shape
oldFrame = pd.DataFrame(np.ndarray((length[0],length[1])))
oldFrame = oldFrame.fillna(0)
ct=0
while y.equals(oldFrame) == False:
ct+=1
oldFrame = y.copy()
c = computeNewCenter(df, oldFrame)
#print(c)
dist = getDistances(c, df)
#print(dist)
y = findClosest(df, dist)
#print(y)
#plt.pcolor(df, cmap=plt.cm.bwr)
l = []
for i in range(len(y)):
for j in range(len(y[0])):
if y.iloc[i][j] == 1:
l.append(df.iloc[i][j])
for i in range(len(y)):
for j in range(len(y[0])):
if y.iloc[i][j] == 2:
l.append(df.iloc[i][j])
for i in range(len(y)):
for j in range(len(y[0])):
if y.iloc[i][j] == 0:
l.append(df.iloc[i][j])
l = np.ndarray((length[0],length[1]))
l = pd.DataFrame(l)
print(l)
hm = plt.pcolor(l, cmap=plt.cm.bwr)
plt.colorbar(hm)
# print(y)
# print(c)
# print(ct)
#plt.pcolor(y, cmap=plt.cm.bwr)
if __name__ == '__main__':
main()
My line of thinking was this:
My current thought process was to first randomly choose the centers.
Then create a list of lists for each point for the distance to each center.
Find the index of the minimum distance for each point for each center.
Create a data frame of the same size as the data set and fill each index for each element with the index of the center the point is closest to.
Recompute the center by taking the mean of the points with the same center index
Repeat this process multiple times until the index data frame does not change.
Create a new data frame and add the points which have the same center point close together in the frame.
Then create the heatmap.
This did not seem to work though.
Just wondering, am I on the right track or am I completely off, and if I am on the right track which parts would I need to change in order to fix the issue. If not could you please point me on the right track.
Here is a comparison of the maps:
Here are the maps
The first one is the one my program generated while the second is the way it is supposed to look.
I know my problem lies in some part of the k-means clustering algorithm, and my guess is it is either in the reassignment stage where you reassign the points to the centroids and calculate the new centroids or in the stopping condition in that the algorithm does not run long enough. Also in the back of my head, something tells me that I am not doing this as efficiently as I could have and that I am missing something key. I have watched several videos on K-means clustering and understand it conceptually, I'm just having a hard time implementing it.
I'm working on a neural network where I am augmenting data via rotation and varying the size of each input volume.
Let me back up, the input to the network is a 3D volume. I generate variable size 3D volumes, and then pad each volume with zero's such that the input volume is constant. Check here for an issue I was having with padding (now resolved).
I generate a variable size 3D volume, append it to a list, and then convert the list into a numpy array. At this point, padding hasn't occured so converting it into a 4D tuple makes no sense...
input_augmented_matrix = []
label_augmented_matrix = []
for i in range(n_volumes):
if i % 50 == 0:
print ("Augmenting step #" + str(i))
slice_index = randint(0,n_input)
z_max = randint(5,n_input)
z_rand = randint(3,5)
z_min = z_max - z_rand
x_max = randint(75, n_input_x)
x_rand = randint(60, 75)
x_min = x_max - x_rand
y_max = randint(75, n_input_y)
y_rand = randint(60, 75)
y_min = y_max - y_rand
random_rotation = randint(1,4) * 90
for j in range(2):
temp_volume = np.empty((z_rand, x_rand, y_rand))
k = 0
for z in range(z_min, z_max):
l = 0
for x in range(x_min, x_max):
m = 0
for y in range(y_min, y_max):
if j == 0:
#input volume
try:
temp_volume[k][l][m] = input_matrix[z][x][y]
except:
pdb.set_trace()
else:
#ground truth volume
temp_volume[k][l][m] = label_matrix[z][x][y]
m = m + 1
l = l + 1
k = k + 1
temp_volume = np.asarray(temp_volume)
temp_volume = np.rot90(temp_volume,random_rotation)
if j == 0:
input_augmented_matrix.append(temp_volume)
else:
label_augmented_matrix.append(temp_volume)
input_augmented_matrix = np.asarray(input_augmented_matrix)
label_augmented_matrix = np.asarray(label_augmented_matrix)
The dimensions of input_augmented_matrix at this point is (N,)
Then I pad with the following code...
for i in range(n_volumes):
print("Padding volume #" + str(i))
input_augmented_matrix[i] = np.lib.pad(input_augmented_matrix[i], ((0,n_input_z - int(input_augmented_matrix[i][:,0,0].shape[0])),
(0,n_input_x - int(input_augmented_matrix[i][0,:,0].shape[0])),
(0,n_input_y - int(input_augmented_matrix[i][0,0,:].shape[0]))),
'constant', constant_values=0)
label_augmented_matrix[i] = np.lib.pad(label_augmented_matrix[i], ((0,n_input_z - int(label_augmented_matrix[i][:,0,0].shape[0])),
(0,n_input_x - int(label_augmented_matrix[i][0,:,0].shape[0])),
(0,n_input_y - int(label_augmented_matrix[i][0,0,:].shape[0]))),
'constant', constant_values=0)
At this point, the dimensions are still (N,) even though every element of the list is constant. For example input_augmented_matrix[0] = input_augmented_matrix[1]
Currently I just loop through and create a new array, but it takes too long and I would prefer some sort of method that automates this. I do it with the following code...
input_4d = np.empty((n_volumes, n_input_z, n_input_x, n_input_y))
label_4d = np.empty((n_volumes, n_input_z, n_input_x, n_input_y))
for i in range(n_volumes):
print("Converting to 4D tuple #" + str(i))
for j in range(n_input_z):
for k in range(n_input_x):
for l in range(n_input_y):
input_4d[i][j][k][l] = input_augmented_matrix[i][j][k][l]
label_4d[i][j][k][l] = label_augmented_matrix[i][j][k][l]
Is there a cleaner and faster way to do this?
As I understood this part
k = 0
for z in range(z_min, z_max):
l = 0
for x in range(x_min, x_max):
m = 0
for y in range(y_min, y_max):
if j == 0:
#input volume
try:
temp_volume[k][l][m] = input_matrix[z][x][y]
except:
pdb.set_trace()
else:
#ground truth volume
temp_volume[k][l][m] = label_matrix[z][x][y]
m = m + 1
l = l + 1
k = k + 1
You just want to do this
temp_input = input_matrix[z_min:z_max, x_min:x_max, y_min:y_max]
temp_label = label_matrix[z_min:z_max, x_min:x_max, y_min:y_max]
and then
temp_input = np.rot90(temp_input, random_rotation)
temp_label = np.rot90(temp_label, random_rotation)
input_augmented_matrix.append(temp_input)
label_augmented_matrix.append(temp_label)
Here
input_augmented_matrix[i] = np.lib.pad(
input_augmented_matrix[i],
((0,n_input_z - int(input_augmented_matrix[i][:,0,0].shape[0])),
(0,n_input_x - int(input_augmented_matrix[i][0,:,0].shape[0])),
(0,n_input_y - int(input_augmented_matrix[i][0,0,:].shape[0]))),
'constant', constant_values=0)
Better to do this, because shape property gives you size of array by all dimensions
ia_shape = input_augmented_matrix[i].shape
input_augmented_matrix[i] = np.lib.pad(
input_augmented_matrix[i],
((0, n_input_z - ia_shape[0]),
(0, n_input_x - ia_shape[1])),
(0, n_input_y - ia_shape[2]))),
'constant',
constant_values=0)
I guess now you're ready to refactor the last part of your code with magic indexing of NumPy.
My common suggestions:
use functions for repeated parts of code to avoid such indents like in your cascade of loops;
if you need so lot of nested loops, think about recursion, if you can't deal without them;
explore abilities of NumPy in official documentation: they're really exciting ;) For example, indexing is helpful for this task;
use PyLint and Flake8 packages to inspect quality of your code.
Do you want to write neural network by yourself, or you just want to solve some patterns recognition task? SciPy library may contain what you need and it's based on NumPy.
I. Am. Stuck.
I have been working on this for over a week now, and I cannot seem to get my code to run correctly. I am fairly new to PIL and Python as a whole. I am trying to make a 2x3 collage of some pictures. I have my code listed below. I am trying to get my photos to fit without any access black space in the newly created collage, however when I run my code I can only get 2 pictures to be placed into the collage, instead of the 6 I want. Any suggestions would be helpful.
*CODE EDITED
from PIL import Image
im= Image.open('Tulips.jpg')
out=im.convert("RGB", (
0.412453, 0.357580, 0.180423, 0,
0.212671, 0.715160, 0.072169, 0,
0.019334, 0.119193, 0.950227, 0 ))
out.save("Image2" + ".jpg")
out2=im.convert("RGB", (
0.9756324, 0.154789, 0.180423, 0,
0.212671, 0.715160, 0.254783, 0,
0.123456, 0.119193, 0.950227, 0 ))
out2.save("Image3" + ".jpg")
out3= im.convert("1")
out3.save("Image4"+".jpg")
out4=im.convert("RGB", (
0.986542, 0.154789, 0.756231, 0,
0.212671, 0.715160, 0.254783, 0,
0.123456, 0.119193, 0.112348, 0 ))
out4.save("Image5" + ".jpg")
out5=Image.blend(im, out4, 0.5)
out5.save("Image6" + ".jpg")
listofimages=['Tulips.jpg', 'Image2.jpg', 'Image3.jpg', 'Image4.jpg', 'Image5.jpg', 'Image6.jpg']
def create_collage(width, height, listofimages):
Picturewidth=width//3
Pictureheight=height//2
size=Picturewidth, Pictureheight
new_im=Image.new('RGB', (450, 300))
for p in listofimages:
Image.open(p)
for col in range(0,width):
for row in range(0, height):
image=Image.eval(p, lambda x: x+(col+row)/30)
new_im.paste(p, (col,row))
new_im.save("Collage"+".jpg")
create_collage(450,300,listofimages)
Here's some working code.
When you call Image.open(p), that returns an Image object, so you need to store than in a variable: im = Image.open(p).
I'm not sure what image=Image.eval(p, lambda x: x+(col+row)/30) is meant to do so I removed it.
size is the size of the thumbnails, but you're not using that variable. After opening the image, it should be resized to size.
I renamed Picturewidth and Pictureheight to thumbnail_width and thumbnail_height to make it clear what they are and follow Python naming conventions.
I also moved the number of cols and rows to variables so they can be reused without magic numbers.
The first loop opens each image into an im, thumbnails it and puts it in a list of ims.
Before the next loops we initialise i,x, andy` variables to keep track of which image we're looking at, and the x and y coordinates to paste the thumbnails into the larger canvas. They'll be updated in the next loops.
The first loop is for columns (cols), not pixels (width). (Also range(0, thing) does the same as range(thing).)
Similarly the second loop is for rows instead of pixels. Inside this loop we paste the current image at ims[i] into the big new_im at x, y. These are pixel positions, not row/cols positions.
At the end of the inner loop, increment the i counter, and add thumbnail_height to y.
Similarly, at the end of the outer loop, and add thumnnail_width to x and reset y to zero.
You only need to save new_im once, after these loops have finished.
There's no need for concatenating "Image2" + ".jpg" etc., just do "Image2.jpg".
This results in something like this:
This code could be improved. For example, if you don't need them for anything else, there's no need to save the intermediate ImageX.jpg files, and rather than putting those filenames in listofimages, put the images directly there: listofimages = [im, out1, out2, etc...], and then replace for p in listofimages: with for im in listofimages: and remove im = Image.open(p).
You could also calculate some padding for the images so the blackspace is even.
from PIL import Image
im= Image.open('Tulips.jpg')
out=im.convert("RGB", (
0.412453, 0.357580, 0.180423, 0,
0.212671, 0.715160, 0.072169, 0,
0.019334, 0.119193, 0.950227, 0 ))
out.save("Image2.jpg")
out2=im.convert("RGB", (
0.9756324, 0.154789, 0.180423, 0,
0.212671, 0.715160, 0.254783, 0,
0.123456, 0.119193, 0.950227, 0 ))
out2.save("Image3.jpg")
out3= im.convert("1")
out3.save("Image4.jpg")
out4=im.convert("RGB", (
0.986542, 0.154789, 0.756231, 0,
0.212671, 0.715160, 0.254783, 0,
0.123456, 0.119193, 0.112348, 0 ))
out4.save("Image5.jpg")
out5=Image.blend(im, out4, 0.5)
out5.save("Image6.jpg")
listofimages=['Tulips.jpg', 'Image2.jpg', 'Image3.jpg', 'Image4.jpg', 'Image5.jpg', 'Image6.jpg']
def create_collage(width, height, listofimages):
cols = 3
rows = 2
thumbnail_width = width//cols
thumbnail_height = height//rows
size = thumbnail_width, thumbnail_height
new_im = Image.new('RGB', (width, height))
ims = []
for p in listofimages:
im = Image.open(p)
im.thumbnail(size)
ims.append(im)
i = 0
x = 0
y = 0
for col in range(cols):
for row in range(rows):
print(i, x, y)
new_im.paste(ims[i], (x, y))
i += 1
y += thumbnail_height
x += thumbnail_width
y = 0
new_im.save("Collage.jpg")
create_collage(450, 300, listofimages)
I made a solution inspired by #Hugo's answer which only requires the input list of images. The function automatically creates a grid based on the number of images input.
def find_multiples(number : int):
multiples = set()
for i in range(number - 1, 1, -1):
mod = number % i
if mod == 0:
tup = (i, int(number / i))
if tup not in multiples and (tup[1], tup[0]) not in multiples:
multiples.add(tup)
if len(multiples) == 0:
mod == number % 2
div = number // 2
multiples.add((2, div + mod))
return list(multiples)
def get_smallest_multiples(number : int, smallest_first = True) -> Tuple[int, int]:
multiples = find_multiples(number)
smallest_sum = number
index = 0
for i, m in enumerate(multiples):
sum = m[0] + m[1]
if sum < smallest_sum:
smallest_sum = sum
index = i
result = list(multiples[i])
if smallest_first:
result.sort()
return result[0], result[1]
def create_collage(listofimages : List[str], n_cols : int = 0, n_rows: int = 0,
thumbnail_scale : float = 1.0, thumbnail_width : int = 0, thumbnail_height : int = 0):
n_cols = n_cols if n_cols >= 0 else abs(n_cols)
n_rows = n_rows if n_rows >= 0 else abs(n_rows)
if n_cols == 0 and n_rows != 0:
n_cols = len(listofimages) // n_rows
if n_rows == 0 and n_cols != 0:
n_rows = len(listofimages) // n_cols
if n_rows == 0 and n_cols == 0:
n_cols, n_rows = get_smallest_multiples(len(listofimages))
thumbnail_width = 0 if thumbnail_width == 0 or n_cols == 0 else round(thumbnail_width / n_cols)
thumbnail_height = 0 if thumbnail_height == 0 or n_rows == 0 else round(thumbnail_height/n_rows)
all_thumbnails : List[Image.Image] = []
for p in listofimages:
thumbnail = Image.open(p)
if thumbnail_width * thumbnail_scale < thumbnail.width:
thumbnail_width = round(thumbnail.width * thumbnail_scale)
if thumbnail_height * thumbnail_scale < thumbnail.height:
thumbnail_height = round(thumbnail.height * thumbnail_scale)
thumbnail.thumbnail((thumbnail_width, thumbnail_height))
all_thumbnails.append(thumbnail)
new_im = Image.new('RGB', (thumbnail_width * n_cols, thumbnail_height * n_rows), 'white')
i, x, y = 0, 0, 0
for col in range(n_cols):
for row in range(n_rows):
if i > len(all_thumbnails) - 1:
continue
print(i, x, y)
new_im.paste(all_thumbnails[i], (x, y))
i += 1
y += thumbnail_height
x += thumbnail_width
y = 0
extension = os.path.splitext(listofimages[0])[1]
if extension == "":
extension = ".jpg"
destination_file = os.path.join(os.path.dirname(listofimages[0]), f"Collage{extension}")
new_im.save(destination_file)
Example usage:
listofimages=['Tulips.jpg', 'Image2.jpg', 'Image3.jpg', 'Image4.jpg', 'Image5.jpg', 'Image6.jpg']
create_collage(listofimages)
In this case, because the input images are 6, the function returns a 3x2 (3 rows, 2 columns) collage of the images.
To do so, the function finds the two smallest integer multiples of the length of the input list of graphs (e.g. for 12, it returns 3 and 4 rather than 2 and 6) and creates a grid, where the first number is always the smallest of the multiples and it is taken to be the number of columns (i.e. by default the grid gets fewer columns than rows; for 12 images, you get a 4x3 matrix: 4 rows, 3 columns). This it can be customized via the smallest_first argument (only exposed in get_smallest_multiples()).
Optional arguments also allow to force a number of rows/columns.
The final image size is the sum of the sizes of the single images, but an optional thumbnail_scale argument allows to specify a percentage of scaling for all the thumbnails (defaults to 1.0, i.e. 100%, no scaling).
This function works well when the size of the images are all roughly the same. I have not covered more complex scenarios.
I have a code.
It takes in a value N and does a quantum walk for that many steps and gives an array that shows the probability at each position.
It's quite a complex calculation and N must be a single integer.
What I want to do is repeat this calculation for 100 values of N and build a large 2D array.
Any idea how I would do this?
Here's my code:
N = 100 # number of random steps
P = 2*N+1 # number of positions
#defining a quantum coin
coin0 = array([1, 0]) # |0>
coin1 = array([0, 1]) # |1>
#defining the coin operator
C00 = outer(coin0, coin0) # |0><0|
C01 = outer(coin0, coin1) # |0><1|
C10 = outer(coin1, coin0) # |1><0|
C11 = outer(coin1, coin1) # |1><1|
C_hat = (C00 + C01 + C10 - C11)/sqrt(2.)
#step operator
ShiftPlus = roll(eye(P), 1, axis=0)
ShiftMinus = roll(eye(P), -1, axis=0)
S_hat = kron(ShiftPlus, C00) + kron(ShiftMinus, C11)
#walk operator
U = S_hat.dot(kron(eye(P), C_hat))
#defining the initial state
posn0 = zeros(P)
posn0[N] = 1 # array indexing starts from 0, so index N is the central posn
psi0 = kron(posn0,(coin0+coin1*1j)/sqrt(2.))
#the state after N steps
psiN = linalg.matrix_power(U, N).dot(psi0)
#finidng the probabilty operator
prob = empty(P)
for k in range(P):
posn = zeros(P)
posn[k] = 1
M_hat_k = kron( outer(posn,posn), eye(2))
proj = M_hat_k.dot(psiN)
prob[k] = proj.dot(proj.conjugate()).real
prob[prob==0] = np.nan
nanmask = np.isfinite(prob)
prob_masked=prob[nanmask] #this is the final probability to be plotted
P_masked=arange(P)[nanmask] #these are the possible positions
Rather than writing out the array I get as it is 100 units, this is a graph of the position and probability at N = 100
I eventually want to make a 3D plot of position against N against probability.