I am trying to convert numpy code into tensorflow graph format. But somewhere I am missing an understanding of dimensionality.
Here is numpy code:
def delta_to_boxes3d(deltas, anchors, coordinate='lidar'):
# Input:
# deltas: (N, w, l, 14)(200,240,14)
# feature_map_shape: (w, l)
# anchors: (w, l, 2, 7)(200,240,2,7)
# Ouput:
# boxes3d: (N, w*l*2, 7)
#anchros shape 9200,240,2,7)
anchors_reshaped = anchors.reshape(-1, 7) #(96000,7)
deltas = deltas.reshape(-1, 7) #(96000,7)
anchors_d = np.sqrt(anchors_reshaped[:, 4]**2 + anchors_reshaped[:, 5]**2)
boxes3d = np.zeros_like(deltas)
boxes3d[..., [0, 1]] = deltas[..., [0, 1]] * \
anchors_d[:, np.newaxis] + anchors_reshaped[..., [0, 1]] #in this line I have the problem
boxes3d[..., [2]] = deltas[..., [2]] * \
1.73 + anchors_reshaped[..., [2]] #ANCHOR_H = 1.73
boxes3d[..., [3, 4, 5]] = np.exp(
deltas[..., [3, 4, 5]]) * anchors_reshaped[..., [3, 4, 5]]
boxes3d[..., 6] = deltas[..., 6] + anchors_reshaped[..., 6]
return boxes3d
Here is the code which I have been trying:
def delta_boxes3d():
anchors = tf.placeholder(tf.float32,shape=[None,None,2,7],name="anchor") #check the anchor type later
anchors_reshaped = tf.reshape(anchors,shape=[96000,7])
delta = tf.placeholder(tf.float32,shape=[None,None,14],name="delta")
anchors_d = tf.sqrt(tf.add(tf.pow(anchors_reshaped[:,4],2),tf.pow(anchors_reshaped[:,5],2))) #96000
deltas = tf.reshape(delta,[96000,7])
x_shape = tf.shape(deltas)
boxes3d_ = tf.multiply(deltas[:,0:2],tf.add(tf.expand_dims(anchors_d,-1),anchors_reshaped[:,0:2]))
boxes3d = tf.ones(x_shape[:-1]) + boxes3d_
elta_ = np.random.rand(200,240,14)
anchor_ = np.random.rand(200,240,2,7)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
result = sess.run(boxes3d1,feed_dict={anchors:anchor_,delta:delta_}) #(96000,7) #need to get boxes3d
print(result.shape)
I am getting below error:
ValueError: Dimensions must be equal, but are 96000 and 2 for '{{node add_2}} = AddV2[T=DT_FLOAT](ones, Mul)' with input shapes: [96000], [96000,2].
Could someone help me with this?
Thanks in advance
The error comes from the line boxes3d = tf.ones(x_shape[:-1]) + boxes3d_.
You are trying to add shapes (96000,) and (96000,2), which you can't without expanding dims. If you want to add a scalar, you can do boxes3d = 1 + boxes3d.
In the example above, you want to do multiplication by scalar followed by addition.
Note that in the NumPy example in the highlighted line, you do the multiplication first and then addition. In TensorFlow, you made it the other way around (possibly by mistake).
I rewrote your NumPy example to Tensorflow 2 so that both functions return the same output.
def delta_boxes3d(deltas, anchors):
deltas = tf.constant(deltas)
anchors = tf.constant(anchors)
anchors_reshaped = tf.reshape(anchors, shape=[96000, 7])
anchors_d = tf.sqrt(tf.add(tf.pow(anchors_reshaped[:, 4], 2), tf.pow(anchors_reshaped[:, 5], 2))) # 96000
deltas = tf.reshape(deltas, [96000, 7])
boxes3d_01 = tf.add(tf.multiply(deltas[:, 0:2], tf.expand_dims(anchors_d, -1)), anchors_reshaped[:, 0:2])
boxes3d_2 = deltas[..., 2:3] * 1.73 + anchors_reshaped[..., 2:3]
boxes3d_345 = tf.exp(deltas[..., 3:6]) * anchors_reshaped[..., 3:6]
boxes3d_6 = deltas[..., 6:7] + anchors_reshaped[..., 6:7]
boxes3d = tf.concat([boxes3d_01, boxes3d_2, boxes3d_345, boxes3d_6], axis=-1)
return boxes3d
deltas = np.random.rand(200, 240, 14)
anchors = np.random.rand(200, 240, 2, 7)
print(delta_to_boxes3d(deltas, anchors))
print(delta_boxes3d(deltas, anchors))
You can notice I created smaller arrays first, and then I concatenated them. This is because Tensorflow won't allow me to modify EagerTensors.
Notice the difference between deltas[..., 2] and deltas[..., 2:3]. The second one doesn't reduce the last dimension. They return shapes (96000,), and (96000,1) respectively.
Related
I want to perform element wise multiplication between two tensors, where most of the elements are zero.
For two example tensors:
test1 = np.zeros((2, 3, 5, 6))
test1[0, 0, :, 2] = 4
test1[0, 1, [2, 4], 1] = 7
test1[0, 2, 2, :] = 2
test1[1, 0, 4, 1:3] = 5
test1[1, :, 0, 1] = 3
and,
test2 = np.zeros((5, 6, 4, 7))
test2[2, 2, 2, 4] = 4
test2[0, 1, :, 1] = 3
test2[4, 3, 2, :] = 6
test2[1, 0, 3, 1:3] = 1
test2[3, :, 0, 1] = 2
the calulation I need is:
result = test1[..., None, None] * test2[None, None, ...]
In the actual use case I am coding for, the tensors can have more dimensions and much longer lengths in some of the dimensions, so while the multiplication is reasonably quick, I would like to utilise the fact that most of the elements are zero.
My first thought was to make a sparse representation of each tensor.
coords1 = np.nonzero(test1)
shape1 = test1.shape
test1_squished = test1[coords1]
coords1 = np.array(coords1)
coords2 = np.nonzero(test2)
shape2 = test2.shape
test2_squished = test2[coords2]
coords2 = np.array(coords2)
Here there is enough information to perform the multiplication, by comparing the coordinates along the equal axes and multiplying if they are the same.
I have a function for adding a new axis,
def new_axis(coords, shape, axis):
new_coords = np.zeros((len(coords)+1, len(coords[0])))
new_index = np.delete(np.arange(0, len(coords)+1), axis)
new_coords[new_index] = coords
coords = new_coords
new_shape = np.zeros(len(new_coords), dtype=int)
new_shape[new_index] = shape
new_shape[axis] = 1
new_shape = np.array(new_shape)
return coords, new_shape
and for performing the multiplication,
def multiply(coords1, shape1, array1, coords2, shape2, array2): #all inputs should be numpy arrays
if np.array_equal( shape1, shape2 ):
index1 = np.nonzero( ( coords1.T[:, None, :] == coords2.T ).all(-1).any(-1) )[0]
index2 = np.nonzero( ( coords2.T[:, None, :] == coords1.T ).all(-1).any(-1) )[0]
array = array1[index1] * array2[index2]
coords = ( coords1.T[index] ).T
shape = shape1
else:
if len(shape1) == len(shape2):
equal_index = np.nonzero( ( shape1 == shape2 ) )[0]
not_equal_index = np.nonzero( ~( shape1 == shape2 ) )[0]
if np.logical_or( ( shape1[not_equal_index] == 1 ), ( shape2[not_equal_index] == 1 ) ).all():
#if where not equal, one of them = 1 -> can broadcast
# compare dimensions with same length, if equal then multiply corresponding elements
multiply_index1 = np.nonzero(
( coords1[equal_index].T[:, None, :] == coords2[equal_index].T ).all(-1).any(-1)
)[0]
# would like vecotrised version of below
array = []
coords = []
for index in multiply_index1:
multiply_index2 = np.nonzero( ( (coords2[equal_index]).T == (coords1[equal_index]).T[index] ).all(-1) )[0]
array.append( test_squished[index] * test2_squished[multiply_index2] )
temp = np.zeros((6, len(multiply_index2)))
temp[not_equal_index] = ((coords1[not_equal_index].T[index]).T + (coords2[not_equal_index].T[multiply_index2])).T
if len(multiply_index2)==1:
temp[equal_index] = coords1[equal_index].T[index].T[:, None]
else:
temp[equal_index] = np.repeat( coords1[equal_index].T[index].T[:, None], len(multiply_index2), axis=-1)
coords.append(temp)
array = np.concatenate(array)
coords = np.concatenate(coords, axis=-1)
shape = shape1
shape[np.where(shape==1)] = shape2[np.where(shape==1)]
else:
print("error")
else:
print("error")
return array, coords, shape
However the multiply function is very inefficient and so I lose any gain of going to the sparse representation.
Is there an elegant vectorised approach to the multiply function? Or is there a better solution than this sparse tensor idea?
Thanks in advance.
I would like get two arrays' sum of minumums efficiently with numpy. For example;
X=np.array([[1,2,3],[1,2,0]])
Y=np.array([[0,2,0],[1,3,1]])
My result should be;
result = array([[2, 4],[2, 3]])
The calculation for first cell;
result[0,0] = min(X[0,0],Y[0,0])+ min(X[0,1],Y[0,1])+min(X[0,2],Y[0,2])
In general, the result should be:
res[i,j] = sum(np.minimum(X[i, :], Y[j, :]))
but looking for fastest way.
dot is the equivalent of taking outer products, and summing on the appropriate axis.
The equivalent in your case is:
In [291]: np.minimum(X[:,None,:], Y[None,:,:])
Out[291]:
array([[[0, 2, 0],
[1, 2, 1]],
[[0, 2, 0],
[1, 2, 0]]])
In [292]: np.sum(np.minimum(X[:,None,:], Y[None,:,:]),axis=-1)
Out[292]:
array([[2, 4],
[2, 3]])
Best I could do:
import numpy as np
def sum_mins(x, y):
mask = (X - Y) < 0
return np.sum(X*mask + Y*np.logical_not(mask))
X=np.array([1,2,3])
Y=np.array([0,2,0])
print(sum_mins(X, Y))
One naive approach close to definition:
result = np.array([[np.sum(np.minimum(v_x, v_y)) for v_y in Y] for v_x in X])
A combination of hpaulj's and my former answer (deleted) that works in case you run out of memory otherwise:
# maximum number of float32s in memory - determining a max. chunk size
MAX_CHUNK_MEM_SIZE = 1000 * 1024 * 1024 / 4
def _fast_small(x, y):
"""Process a case with small size of x and y."""
# see answer of #hpaulj
return np.sum(np.minimum(x[:, None, :], y[None, :, :]), axis = -1)
def fast(x, y):
"""Process a case with potentially large size of x and y."""
assert len(x.shape) == len(y.shape) == 2
assert x.shape[1] == y.shape[1]
num_chunks = int(np.ceil(x.shape[0] * y.shape[0] * x.shape[0] / MAX_CHUNK_MEM_SIZE))
result_blocks = []
for x_block in np.array_split(x, num_chunks):
result_blocks_row = []
for y_block in np.array_split(y, num_chunks):
result_blocks_row.append(_fast_small(x_block, y_block))
result_blocks.append(result_blocks_row)
return np.block(result_blocks)
I want to vectorize the following code:
def style_noise(self, y, style):
n = torch.randn(y.shape)
for i in range(n.shape[0]):
n[i] = (n[i] - n.mean(dim=(1, 2, 3))[i]) * style.std(dim=(1, 2, 3))[i] / n.std(dim=(1, 2, 3))[i] + style.mean(dim=(1, 2, 3))[i]
noise = Variable(n, requires_grad=False).to(y.device)
return noise
I didn't find a way nice way of doing so.
y and style are 4d tensors, say style.shape = y.shape = [64, 3, 128, 128].
I want to return the noise tensor, noise.shape = [64, 3, 128, 128].
Please let me know in the comments if the question is not clear.
Your use case is exactly why the .mean and .std methods come with a keepdim parameter. You can make use of this to enable broadcasting semantics to vectorize things for you:
def style_noise(self, y, style):
n = torch.randn(y.shape)
n_mean = n.mean(dim=(1, 2, 3), keepdim=True)
n_std = n.std(dim=(1, 2, 3), keepdim=True)
style_mean = style.mean(dim=(1, 2, 3), keepdim=True)
style_std = style.std(dim=(1, 2, 3), keepdim=True)
n = (n - n_mean) * style_std / n_std + style_mean
noise = Variable(n, requires_grad=False).to(y.device)
return noise
To calculate mean and std for the whole tensor you set no arguments
m = t.mean(); print(m) # if you don't set the dim for the whole tensor
s = t.std(); print(s) # if you don't set the dim for the whole tensor
Then if your shape is 2,2,2 for instance, create tensors for broadcasting subtract and division.
ss = torch.empty(2,2,2).fill_(s)
print(ss)
mm = torch.empty(2,2,2).fill_(m)
print(mm)
At the moment keepdim is not working as expected when you don't set the dim.
m = t.mean(); print(m) # for the whole tensor
s = t.std(); print(s) # for the whole tensor
m = t.mean(dim=0); print(m) # 0 means columns mean
s = t.std(dim=0); print(s) # 0 means columns mean
m = t.mean(dim=1); print(m) # 1 means rows mean
s = t.std(dim=1); print(s) # 1 means rows mean
s = t.mean(keepdim=True);print(s) # will not work
m = t.std(keepdim=True);print(m) # will not work
If you set a dim as a tuple, then it will return mean for axes, you asked not for the whole.
I have R as 2D rotation matrices of shape (N,2,2). Now I wish to extend each matrix to (3,3) 3D rotation matrices, i.e. to put zeros in each [:,:2,:2] and put 1 to [:,2,2].
How to do this in tensorflow?
UPDATE
I tried this way
R = tf.get_variable(name='R', shape=np.shape(R_value), dtype=tf.float64,
initializer=tf.constant_initializer(R_value))
eye = tf.eye(np.shape(R_value)[1]+1)
right_column = eye[:2,2]
bottom_row = eye[2,:]
R = tf.concat([R, right_column], 3)
R = tf.concat([R, bottom_row], 2)
but failed, because concat doesn't do broadcasting...
UPDATE 2
I made explicit broadcasting and also fixed wrong indices in concat calls:
R = tf.get_variable(name='R', shape=np.shape(R_value), dtype=tf.float64,
initializer=tf.constant_initializer(R_value))
eye = tf.eye(np.shape(R_value)[1]+1, dtype=tf.float64)
right_column = eye[:2,2]
right_column = tf.expand_dims(right_column, 0)
right_column = tf.expand_dims(right_column, 2)
right_column = tf.tile(right_column, (np.shape(R_value)[0], 1, 1))
bottom_row = eye[2,:]
bottom_row = tf.expand_dims(bottom_row, 0)
bottom_row = tf.expand_dims(bottom_row, 0)
bottom_row = tf.tile(bottom_row, (np.shape(R_value)[0], 1, 1))
R = tf.concat([R, right_column], 2)
R = tf.concat([R, bottom_row], 1)
The solutions looks rather complex. Are there any simpler ones?
first pad zeros to [N, 2, 2] to be [N, 3, 3] with padded = tf.pad(R, [[0, 0], [0, 1], [0, 1]])
then convert padded[N, 2, 2] to 1:
since tf.Tensor does not support assignment, you can do this with initializing a np.array, and then add them together.
arr = np.zeros((3, 3))
arr[2, 2] = 1
R = padded + arr # broadcast used here
now variable R is what you need
I want to compute the pairwise square distance of a batch of feature in Tensorflow. I have a simple implementation using + and * operations by
tiling the original tensor :
def pairwise_l2_norm2(x, y, scope=None):
with tf.op_scope([x, y], scope, 'pairwise_l2_norm2'):
size_x = tf.shape(x)[0]
size_y = tf.shape(y)[0]
xx = tf.expand_dims(x, -1)
xx = tf.tile(xx, tf.pack([1, 1, size_y]))
yy = tf.expand_dims(y, -1)
yy = tf.tile(yy, tf.pack([1, 1, size_x]))
yy = tf.transpose(yy, perm=[2, 1, 0])
diff = tf.sub(xx, yy)
square_diff = tf.square(diff)
square_dist = tf.reduce_sum(square_diff, 1)
return square_dist
This function takes as input two matrices of size (m,d) and (n,d) and compute the squared distance between each row vector. The output is a matrix of size (m,n) with element 'd_ij = dist(x_i, y_j)'.
The problem is that I have a large batch and high dim features 'm, n, d' replicating the tensor consume a lot of memory.
I'm looking for another way to implement this without increasing the memory usage and just only store the final distance tensor. Kind of double looping the original tensor.
You can use some linear algebra to turn it into matrix ops. Note that what you need matrix D where a[i] is the ith row of your original matrix and
D[i,j] = (a[i]-a[j])(a[i]-a[j])'
You can rewrite that into
D[i,j] = r[i] - 2 a[i]a[j]' + r[j]
Where r[i] is squared norm of ith row of the original matrix.
In a system that supports standard broadcasting rules you can treat r as a column vector and write D as
D = r - 2 A A' + r'
In TensorFlow you could write this as
A = tf.constant([[1, 1], [2, 2], [3, 3]])
r = tf.reduce_sum(A*A, 1)
# turn r into column vector
r = tf.reshape(r, [-1, 1])
D = r - 2*tf.matmul(A, tf.transpose(A)) + tf.transpose(r)
sess = tf.Session()
sess.run(D)
result
array([[0, 2, 8],
[2, 0, 2],
[8, 2, 0]], dtype=int32)
Using squared_difference:
def squared_dist(A):
expanded_a = tf.expand_dims(A, 1)
expanded_b = tf.expand_dims(A, 0)
distances = tf.reduce_sum(tf.squared_difference(expanded_a, expanded_b), 2)
return distances
One thing I noticed is that this solution using tf.squared_difference gives me out of memory (OOM) for very large vectors, while the approach by #YaroslavBulatov doesn't. So, I think decomposing the operation yields a smaller memory footprint (which I thought squared_difference would handle better under the hood).
Here is a more general solution for two tensors of coordinates A and B:
def squared_dist(A, B):
assert A.shape.as_list() == B.shape.as_list()
row_norms_A = tf.reduce_sum(tf.square(A), axis=1)
row_norms_A = tf.reshape(row_norms_A, [-1, 1]) # Column vector.
row_norms_B = tf.reduce_sum(tf.square(B), axis=1)
row_norms_B = tf.reshape(row_norms_B, [1, -1]) # Row vector.
return row_norms_A - 2 * tf.matmul(A, tf.transpose(B)) + row_norms_B
Note that this is the square distance. If you want to change this to the Euclidean distance, perform a tf.sqrt on the result. If you want to do that, don't forget to add a small constant to compensate for the floating point instabilities: dist = tf.sqrt(squared_dist(A, B) + 1e-6).
If you want compute other method , then change the order of the tf modules.
def compute_euclidean_distance(x, y):
size_x = x.shape.dims[0]
size_y = y.shape.dims[0]
for i in range(size_x):
tile_one = tf.reshape(tf.tile(x[i], [size_y]), [size_y, -1])
eu_one = tf.expand_dims(tf.sqrt(tf.reduce_sum(tf.pow(tf.subtract(tile_one, y), 2), axis=1)), axis=0)
if i == 0:
d = eu_one
else:
d = tf.concat([d, eu_one], axis=0)
return d