How to read .bmp images using tensorflow and write to disk? - python

I am trying to read bmp images (2048 x2048), resize them to 256x 256 and write the image to disk using tensorflow. I have succeeded in reading it but unable to find a way write it to disk. Any idea how to do it ?
Here is the code below:
import tensorflow as tf
img_path = "D:/image01.bmp"
img = tf.read_file(img_path)
img_decode = tf.image.decode_bmp(img, channels=1) # unit8 tensor
IMG_WIDTH = 256
IMG_HEIGHT = 256
img_cast = tf.cast(img_decode,dtype=tf.uint8)
img_4d = tf.expand_dims(img_cast, axis=0)
img_res = tf.image.resize_bilinear(img_4d, (IMG_HEIGHT, IMG_WIDTH), align_corners=True)
session = tf.InteractiveSession()
file_name = "D:/out.bmp"
file = tf.write_file(file_name, img_res)
print('Image Saved')
session.close()
Error:
ValueError Traceback (most recent call last)
D:\Users\ge3f-P2\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py in _apply_op_helper(self, op_type_name, name, **keywords)
509 as_ref=input_arg.is_ref,
--> 510 preferred_dtype=default_dtype)
511 except TypeError as err:
D:\Users\ge3f-P2\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py in internal_convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, ctx)
1145 if ret is None:
-> 1146 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
1147
D:\Users\ge3f-P2\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py in _TensorTensorConversionFunction(t, dtype, name, as_ref)
982 "Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
--> 983 (dtype.name, t.dtype.name, str(t)))
984 return t
ValueError: Tensor conversion requested dtype string for Tensor with dtype uint8: 'Tensor("DecodeBmp:0", shape=(?, ?, 1), dtype=uint8)'
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
<ipython-input-18-9b7aeb9e42de> in <module>
----> 1 file = tf.write_file(file_name,final)
D:\Users\ge3f-P2\Anaconda3\lib\site-packages\tensorflow\python\ops\gen_io_ops.py in write_file(filename, contents, name)
2256 if _ctx is None or not _ctx._eager_context.is_eager:
2257 _, _, _op = _op_def_lib._apply_op_helper(
-> 2258 "WriteFile", filename=filename, contents=contents, name=name)
2259 return _op
2260 _result = None
D:\Users\ge3f-P2\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py in _apply_op_helper(self, op_type_name, name, **keywords)
531 if input_arg.type != types_pb2.DT_INVALID:
532 raise TypeError("%s expected type of %s." %
--> 533 (prefix, dtypes.as_dtype(input_arg.type).name))
534 else:
535 # Update the maps with the default, if needed.
TypeError: Input 'contents' of 'WriteFile' Op has type uint8 that does not match expected type of string.
The problem is I can't find a "encode_bmp" or any bmp related function that can be used to encode the image and save the resized image to disk.
I went through this thread but this doesn't help solve the question.
Link here

Since Tensorflow currently does not have a native way of saving/encoding images to the BMP format, one way to solve this would be to save the image as a PNG in a temporary location and then use the Python Imaging Library to convert it to a BMP.
See: PILs Image.Save method and the list of supported file formats.
From my understanding, the reason for the exception you are receiving is that you are trying to save a unit8 tensor, when the write_file method expects an - encoded - string.
Try this:
from PIL import Image
.
.
.
file_name = "D:/tmp.png"
enc = tf.image.encode_png(img_res)
file = tf.write_file(file_name, enc)
print('PNG Image Saved')
session.close()
Image.open(file_name).save("D:/out.bmp")
os.remove(file_name)

Related

InvalidArgumentError Related to the Shape of Tensors When Utilizing tf.image.random_crop (Tensorflow in Jupyter)

I'm working with a GAN using tensorflow.
inp, re = load(str(PATH / 'train/100.jpg'))
# Casting to int for matplotlib to display the images
plt.figure()
plt.imshow(inp / 255.0)
plt.figure()
plt.imshow(re / 255.0)
When running the above code, the following error occurs:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-17-abf50abadd26> in <module>
----> 1 inp, re = load(str(PATH / 'train/100.jpg'))
2 # Casting to int for matplotlib to display the images
3 plt.figure()
4 plt.imshow(inp / 255.0)
5 plt.figure()
<ipython-input-16-e73db7456b4c> in load(image_file)
16 #crop and resize
17 input_image = cv2.resize(input_image,(width1 * 10,height1 * 10))
---> 18 input_image = tf.image.random_crop(input_image,(width1,height1))
19 real_image = cv2.resize(real_image,(width1,height1))
20
/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
204 """Call target, and fall back on dispatchers if there is a TypeError."""
205 try:
--> 206 return target(*args, **kwargs)
207 except (TypeError, ValueError):
208 # Note: convert_to_eager_tensor currently raises a ValueError, not a
/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/random_ops.py in random_crop(value, size, seed, name)
400 shape = array_ops.shape(value)
401 check = control_flow_ops.Assert(
--> 402 math_ops.reduce_all(shape >= size),
403 ["Need value.shape >= size, got ", shape, size],
404 summarize=1000)
/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/math_ops.py in wrapper(x, y, *args, **kwargs)
1815 def wrapper(x, y, *args, **kwargs):
1816 x, y = maybe_promote_tensors(x, y, force_same_dtype=False)
-> 1817 return fn(x, y, *args, **kwargs)
1818 return tf_decorator.make_decorator(fn, wrapper)
1819
/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/gen_math_ops.py in greater_equal(x, y, name)
4030 return _result
4031 except _core._NotOkStatusException as e:
-> 4032 _ops.raise_from_not_ok_status(e, name)
4033 except _core._FallbackException:
4034 pass
/opt/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/ops.py in raise_from_not_ok_status(e, name)
6939 message = e.message + (" name: " + name if name is not None else "")
6940 # pylint: disable=protected-access
-> 6941 six.raise_from(core._status_to_exception(e.code, message), None)
6942 # pylint: enable=protected-access
6943
/opt/anaconda3/lib/python3.8/site-packages/six.py in raise_from(value, from_value)
InvalidArgumentError: Incompatible shapes: [3] vs. [2] [Op:GreaterEqual]
To my understanding, this means that a tensor is the incorrect shape, but how to I know what is causing this difference, or how to fix it?
It is certainly worth noting that the load() function is defined by:
def load(image_file):
#Read and decode an image file to a uint8 tensor
image = tf.io.read_file(image_file)
image = tf.io.decode_jpeg(image)
#conversion to numPy array
image = np.array(image)
#seperate
input_image = image
real_image = image
#crop and resize
input_image = cv2.resize(input_image,(width1 * 10,height1 * 10))
input_image = tf.image.random_crop(input_image,(width1,height1))
real_image = cv2.resize(real_image,(width1,height1))
#convert to float32
input_image = tf.cast(input_image,tf.float32) #51,34
real_image = tf.cast(real_image,tf.float32) #510,340
return input_image, real_image
When tf.image.random_crop is removed, the error is not encountered, however the images are not properly cropped, of course. I'm not entirely sure why this happens.
inp, re = load(str(PATH/'train/100.jpg'))
This is where the error comes from initially. It looks like you forgot to define the image before this command.
The error was related to the syntax required for tf.image.random_crop(). While I used tf.image.random_crop(image, (width,height)), the correct syntax is tf.image.random_crop(image, size = [width,height,3]) 3 Is used here as the image is in color. The syntax for this was rather unclear from sources I had previously consulted; thusly, I hope this answer helps someone.

ValueError: Object arrays cannot be loaded when allow_pickle=False

I tried to get solution for this code , hoping for a positive response
much_data = np.load('muchdata-50-50-20.npy')
output:
ValueError Traceback (most recent call last)
<ipython-input-6-6710fe7f2bb7> in <module>
----> 1 much_data = np.load('muchdata-50-50-20.npy')
~\anaconda3\envs\tf-gpu-cuda8\lib\site-packages\numpy\lib\npyio.py in load(file, mmap_mode, allow_pickle, fix_imports, encoding)
437 return format.open_memmap(file, mode=mmap_mode)
438 else:
--> 439 return format.read_array(fid, allow_pickle=allow_pickle,
440 pickle_kwargs=pickle_kwargs)
441 else:
~\anaconda3\envs\tf-gpu-cuda8\lib\site-packages\numpy\lib\format.py in read_array(fp, allow_pickle, pickle_kwargs)
725 # The array contained Python objects. We need to unpickle the data.
726 if not allow_pickle:
--> 727 raise ValueError("Object arrays cannot be loaded when "
728 "allow_pickle=False")
729 if pickle_kwargs is None:
ValueError: Object arrays cannot be loaded when allow_pickle=False
Please let me know the solution for this
Try
much_data = np.load('muchdata-50-50-20.npy', allow_pickle=True)

How can I process OPUS format with Librosa?

I am trying to generate spectrograms by using Librosa. When I was working with the .wav format file it was working fine. But I changed the format to OPUS audio codec and tried to run the same file, it give me below error.
X, sample_rate = librosa.load('TESS emotion datasets opus/OAF_Fear/OAF_beg_fear.opus', res_type='kaiser_fast', duration = 2.5, sr = 22050*2, offset = 0.5)
Error generated:
RuntimeError Traceback (most recent call last)
~/anaconda3/lib/python3.6/site-packages/librosa/core/audio.py in load(path, sr, mono, offset, duration, dtype, res_type)
145 try:
--> 146 with sf.SoundFile(path) as sf_desc:
147 sr_native = sf_desc.samplerate
~/anaconda3/lib/python3.6/site-packages/soundfile.py in __init__(self, file, mode, samplerate, channels, subtype, endian, format, closefd)
628 format, subtype, endian)
--> 629 self._file = self._open(file, mode_int, closefd)
630 if set(mode).issuperset('r+') and self.seekable():
~/anaconda3/lib/python3.6/site-packages/soundfile.py in _open(self, file, mode_int, closefd)
1183 _error_check(_snd.sf_error(file_ptr),
-> 1184 "Error opening {0!r}: ".format(self.name))
1185 if mode_int == _snd.SFM_WRITE:
~/anaconda3/lib/python3.6/site-packages/soundfile.py in _error_check(err, prefix)
1356 err_str = _snd.sf_error_number(err)
-> 1357 raise RuntimeError(prefix + _ffi.string(err_str).decode('utf-8', 'replace'))
1358
RuntimeError: Error opening 'TESS emotion datasets opus/OAF_Fear/OAF_beg_fear.opus': File contains data in an unimplemented format.
During handling of the above exception, another exception occurred:
NoBackendError Traceback (most recent call last)
<ipython-input-39-1372f02f676e> in <module>()
----> 1 X, sample_rate = librosa.load('TESS emotion datasets opus/OAF_Fear/OAF_beg_fear.opus', res_type='kaiser_fast', duration = 2.5, sr = 22050*2, offset = 0.5)
~/anaconda3/lib/python3.6/site-packages/librosa/core/audio.py in load(path, sr, mono, offset, duration, dtype, res_type)
161 if isinstance(path, (str, pathlib.PurePath)):
162 warnings.warn("PySoundFile failed. Trying audioread instead.")
--> 163 y, sr_native = __audioread_load(path, offset, duration, dtype)
164 else:
165 raise (exc)
~/anaconda3/lib/python3.6/site-packages/librosa/core/audio.py in __audioread_load(path, offset, duration, dtype)
185
186 y = []
--> 187 with audioread.audio_open(path) as input_file:
188 sr_native = input_file.samplerate
189 n_channels = input_file.channels
~/anaconda3/lib/python3.6/site-packages/audioread/__init__.py in audio_open(path, backends)
114
115 # All backends failed!
--> 116 raise NoBackendError()
NoBackendError:
I tried to install ffmpeg and gstreamer as suggested by some previous answers and github page of Librosa. But it didn't solve the problem.
On the contrary, this audio format works well when I run the same code in Google Colab.
What can be the reason of this error? How to solve it?

Cannot load a pickle file using joblib of sklearn

I trained a model in a cluster, downloaded it (pkl format) and tried to load locally. I know that sklearn's version of joblib was used to save a model mymodel.pkl (but I don't know which exactly version...).
from sklearn.externals import joblib
print(joblib.__version__)
model = joblib.load("mymodel.pkl")
I use the version 0.13.0 of sklearn's joblib locally.
This is the error that I got:
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-100-d0a3c42e5c53> in <module>
3 print(joblib.__version__)
4
----> 5 model = joblib.load("mymodel.pkl")
~\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\externals\joblib\numpy_pickle.py in load(filename, mmap_mode)
596 return load_compatibility(fobj)
597
--> 598 obj = _unpickle(fobj, filename, mmap_mode)
599
600 return obj
~\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\externals\joblib\numpy_pickle.py in _unpickle(fobj, filename, mmap_mode)
524 obj = None
525 try:
--> 526 obj = unpickler.load()
527 if unpickler.compat_mode:
528 warnings.warn("The file '%s' has been generated with a "
~\AppData\Local\Continuum\anaconda3\lib\pickle.py in load(self)
1083 raise EOFError
1084 assert isinstance(key, bytes_types)
-> 1085 dispatch[key[0]](self)
1086 except _Stop as stopinst:
1087 return stopinst.value
KeyError: 239
Update:
Also I tried, but got an error AttributeError: 'str' object has no attribute 'readable':
with io.BufferedReader("mymodel.pkl") as pickle_file:
model = pickle.load(pickle_file)
You tried to dump it with joblib.dump('pipeline','mymodel.pkl'). This only dumped the string 'pipeline'! Not your actual pipeline object.
Dump it correctly with:
joblib.dump(pipeline,'mymodel.pkl')
...then read back with:
model = joblib.load('mymodel.pkl')

Word2Vec error when loading in GoogleNews data

I am following a tutorial here: https://towardsdatascience.com/multi-class-text-classification-model-comparison-and-selection-5eb066197568
I am at the part "Word2vec and Logistic Regression". I have downloaded the "GoogleNews-vectors-negative300.bin.gz" file and I am tyring to apply it to my own text data. However when I get to the following code:
%%time
from gensim.models import Word2Vec
wv = gensim.models.KeyedVectors.load_word2vec_format("/data/users/USERS/File_path/classifier/GoogleNews_Embedding/GoogleNews-vectors-negative300.bin.gz", binary=True)
wv.init_sims(replace=True)
I run into the following error:
/data/users/msmith/env/lib64/python3.6/site-packages/smart_open/smart_open_lib.py:398: UserWarning: This function is deprecated, use smart_open.open instead. See the migration notes for details: https://github.com/RaRe-Technologies/smart_open/blob/master/README.rst#migrating-to-the-new-open-function
'See the migration notes for details: %s' % _MIGRATION_NOTES_URL
---------------------------------------------------------------------------
EOFError Traceback (most recent call last)
<timed exec> in <module>
~/env/lib64/python3.6/site-packages/gensim/models/keyedvectors.py in load_word2vec_format(cls, fname, fvocab, binary, encoding, unicode_errors, limit, datatype)
1492 return _load_word2vec_format(
1493 cls, fname, fvocab=fvocab, binary=binary, encoding=encoding, unicode_errors=unicode_errors,
-> 1494 limit=limit, datatype=datatype)
1495
1496 def get_keras_embedding(self, train_embeddings=False):
~/env/lib64/python3.6/site-packages/gensim/models/utils_any2vec.py in _load_word2vec_format(cls, fname, fvocab, binary, encoding, unicode_errors, limit, datatype)
383 with utils.ignore_deprecation_warning():
384 # TODO use frombuffer or something similar
--> 385 weights = fromstring(fin.read(binary_len), dtype=REAL).astype(datatype)
386 add_word(word, weights)
387 else:
/usr/lib64/python3.6/gzip.py in read(self, size)
274 import errno
275 raise OSError(errno.EBADF, "read() on write-only GzipFile object")
--> 276 return self._buffer.read(size)
277
278 def read1(self, size=-1):
/usr/lib64/python3.6/_compression.py in readinto(self, b)
66 def readinto(self, b):
67 with memoryview(b) as view, view.cast("B") as byte_view:
---> 68 data = self.read(len(byte_view))
69 byte_view[:len(data)] = data
70 return len(data)
/usr/lib64/python3.6/gzip.py in read(self, size)
480 break
481 if buf == b"":
--> 482 raise EOFError("Compressed file ended before the "
483 "end-of-stream marker was reached")
484
EOFError: Compressed file ended before the end-of-stream marker was reached
Any idea whats gone wrong/ how to overcome this issue?
Thanks in advance!

Categories

Resources