When I use
fits_datasweep_gal = fits.open('Macintosh HD/Users/lingxuan/Downloads/datasweep-index-gal.fits')
to open a FITS file on Jupyter notebook, it returns:
FileNotFoundError Traceback (most recent call last)
<ipython-input-13-e5886f60eba2> in <module>
----> 1 fits_datasweep_gal = fits.open('Macintosh HD/Users/lingxuan/Downloads/datasweep-index-gal.fits')
~/anaconda3/lib/python3.7/site-packages/astropy/io/fits/hdu/hdulist.py in fitsopen(name, mode, memmap, save_backup, cache, lazy_load_hdus, **kwargs)
149
150 return HDUList.fromfile(name, mode, memmap, save_backup, cache,
--> 151 lazy_load_hdus, **kwargs)
152
153
~/anaconda3/lib/python3.7/site-packages/astropy/io/fits/hdu/hdulist.py in fromfile(cls, fileobj, mode, memmap, save_backup, cache, lazy_load_hdus, **kwargs)
388 return cls._readfrom(fileobj=fileobj, mode=mode, memmap=memmap,
389 save_backup=save_backup, cache=cache,
--> 390 lazy_load_hdus=lazy_load_hdus, **kwargs)
391
392 #classmethod
~/anaconda3/lib/python3.7/site-packages/astropy/io/fits/hdu/hdulist.py in _readfrom(cls, fileobj, data, mode, memmap, save_backup, cache, lazy_load_hdus, **kwargs)
1037 if not isinstance(fileobj, _File):
1038 # instantiate a FITS file object (ffo)
-> 1039 fileobj = _File(fileobj, mode=mode, memmap=memmap, cache=cache)
1040 # The Astropy mode is determined by the _File initializer if the
1041 # supplied mode was None
~/anaconda3/lib/python3.7/site-packages/astropy/utils/decorators.py in wrapper(*args, **kwargs)
501 # one with the name of the new argument to the function
502 kwargs[new_name[i]] = value
--> 503 return function(*args, **kwargs)
504
505 return wrapper
~/anaconda3/lib/python3.7/site-packages/astropy/io/fits/file.py in __init__(self, fileobj, mode, memmap, overwrite, cache)
176 self._open_fileobj(fileobj, mode, overwrite)
177 elif isinstance(fileobj, str):
--> 178 self._open_filename(fileobj, mode, overwrite)
179 else:
180 self._open_filelike(fileobj, mode, overwrite)
~/anaconda3/lib/python3.7/site-packages/astropy/io/fits/file.py in _open_filename(self, filename, mode, overwrite)
553
554 if not self._try_read_compressed(self.name, magic, mode, ext=ext):
--> 555 self._file = fileobj_open(self.name, IO_FITS_MODES[mode])
556 self.close_on_error = True
557
~/anaconda3/lib/python3.7/site-packages/astropy/io/fits/util.py in fileobj_open(filename, mode)
386 """
387
--> 388 return open(filename, mode, buffering=0)
389
390
FileNotFoundError: [Errno 2] No such file or directory: 'Macintosh HD/Users/lingxuan/Downloads/datasweep-index-gal.fits'
What should I do?
Remove Macintosh HD part from path
fits_datasweep_gal = fits.open('/Users/lingxuan/Downloads/datasweep-index-gal.fits')
Related
I run the following hvplot code,
from hvplot.sample_data import us_crime
columns = ['Burglary rate', 'Larceny-theft rate', 'Robbery rate', 'Violent Crime rate']
us_crime.plot.violin(y=columns, group_label='Type of crime', value_label='Rate per 100k', invert=True)
but get No such file or directory error, anyone know what might be wrong ? Thanks
FileNotFoundError Traceback (most recent call last)
<ipython-input-30-6e0bc6b3a875> in <module>
----> 1 from hvplot.sample_data import us_crime
2
3 columns = ['Burglary rate', 'Larceny-theft rate', 'Robbery rate', 'Violent Crime rate']
4
5
/opt/conda/lib/python3.7/site-packages/hvplot/sample_data.py in <module>
19
20 # Load catalogue
---> 21 catalogue = open_catalog(_cat_path)
22
23 # Add catalogue entries to namespace
/opt/conda/lib/python3.7/site-packages/intake/__init__.py in open_catalog(uri, **kwargs)
160 raise ValueError('Unknown catalog driver (%s), supply one of: %s'
161 % (driver, list(sorted(registry))))
--> 162 return registry[driver](uri, **kwargs)
163
164
/opt/conda/lib/python3.7/site-packages/intake/catalog/local.py in __init__(self, path, autoreload, **kwargs)
550 self.autoreload = autoreload # set this to False if don't want reloads
551 self.filesystem = kwargs.pop('fs', None)
--> 552 super(YAMLFileCatalog, self).__init__(**kwargs)
553
554 def _load(self, reload=False):
/opt/conda/lib/python3.7/site-packages/intake/catalog/base.py in __init__(self, name, description, metadata, auth, ttl, getenv, getshell, persist_mode, storage_options, *args)
111 self.updated = time.time()
112 self._entries = self._make_entries_container()
--> 113 self.force_reload()
114
115 #classmethod
/opt/conda/lib/python3.7/site-packages/intake/catalog/base.py in force_reload(self)
168 def force_reload(self):
169 """Imperative reload data now"""
--> 170 self._load()
171 self.updated = time.time()
172
/opt/conda/lib/python3.7/site-packages/intake/catalog/local.py in _load(self, reload)
575 self._dir = get_dir(self.path)
576
--> 577 with file_open as f:
578 text = f.read().decode()
579 if "!template " in text:
/opt/conda/lib/python3.7/site-packages/fsspec/core.py in __enter__(self)
100 mode = self.mode.replace("t", "").replace("b", "") + "b"
101
--> 102 f = self.fs.open(self.path, mode=mode)
103
104 self.fobjects = [f]
/opt/conda/lib/python3.7/site-packages/fsspec/spec.py in open(self, path, mode, block_size, cache_options, **kwargs)
934 autocommit=ac,
935 cache_options=cache_options,
--> 936 **kwargs
937 )
938 if not ac:
/opt/conda/lib/python3.7/site-packages/fsspec/implementations/local.py in _open(self, path, mode, block_size, **kwargs)
115 if self.auto_mkdir and "w" in mode:
116 self.makedirs(self._parent(path), exist_ok=True)
--> 117 return LocalFileOpener(path, mode, fs=self, **kwargs)
118
119 def touch(self, path, **kwargs):
/opt/conda/lib/python3.7/site-packages/fsspec/implementations/local.py in __init__(self, path, mode, autocommit, fs, **kwargs)
197 self.autocommit = autocommit
198 self.blocksize = io.DEFAULT_BUFFER_SIZE
--> 199 self._open()
200
201 def _open(self):
/opt/conda/lib/python3.7/site-packages/fsspec/implementations/local.py in _open(self)
202 if self.f is None or self.f.closed:
203 if self.autocommit or "w" not in self.mode:
--> 204 self.f = open(self.path, mode=self.mode)
205 else:
206 # TODO: check if path is writable?
FileNotFoundError: [Errno 2] No such file or directory: '/opt/conda/lib/python3.7/site-packages/hvplot/../examples/datasets.yaml'
ERROR
Took 1 sec. Last updated by anonymous at March 12 2021, 12:11:24 PM.
I want to train a tensorflow image segmentation model on COCO, and thought I would leverage the dataset builder already included. Download seems to be completed but it crashes on extracting the zip files.
Running with TF 2.0.0 on a Jupyter Notebook under a conda environment. Computer is 64-bit Windows 10. The Oxford Pet III dataset used in the official image segmentation tutorial works fine.
Below is the error message (my local user name replaced with %user%).
---------------------------------------------------------------------------
OutOfRangeError Traceback (most recent call last)
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\extractor.py in _sync_extract(self, from_path, method, to_path)
88 try:
---> 89 for path, handle in iter_archive(from_path, method):
90 path = tf.compat.as_text(path)
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\extractor.py in iter_zip(arch_f)
176 with _open_or_pass(arch_f) as fobj:
--> 177 z = zipfile.ZipFile(fobj)
178 for member in z.infolist():
~\.conda\envs\tf-tutorial\lib\zipfile.py in __init__(self, file, mode, compression, allowZip64)
1130 if mode == 'r':
-> 1131 self._RealGetContents()
1132 elif mode in ('w', 'x'):
~\.conda\envs\tf-tutorial\lib\zipfile.py in _RealGetContents(self)
1193 try:
-> 1194 endrec = _EndRecData(fp)
1195 except OSError:
~\.conda\envs\tf-tutorial\lib\zipfile.py in _EndRecData(fpin)
263 # Determine file size
--> 264 fpin.seek(0, 2)
265 filesize = fpin.tell()
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\util\deprecation.py in new_func(*args, **kwargs)
506 instructions)
--> 507 return func(*args, **kwargs)
508
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\lib\io\file_io.py in seek(self, offset, whence, position)
166 elif whence == 2:
--> 167 offset += self.size()
168 else:
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\lib\io\file_io.py in size(self)
101 """Returns the size of the file."""
--> 102 return stat(self.__name).length
103
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\lib\io\file_io.py in stat(filename)
726 """
--> 727 return stat_v2(filename)
728
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\lib\io\file_io.py in stat_v2(path)
743 file_statistics = pywrap_tensorflow.FileStatistics()
--> 744 pywrap_tensorflow.Stat(compat.as_bytes(path), file_statistics)
745 return file_statistics
OutOfRangeError: C:\Users\%user%\tensorflow_datasets\downloads\images.cocodataset.org_zips_train20147eQIfmQL3bpVDgkOrnAQklNLVUtCsFrDPwMAuYSzF3U.zip; Unknown error
During handling of the above exception, another exception occurred:
ExtractError Traceback (most recent call last)
<ipython-input-27-887fa0198611> in <module>
1 cocoBuilder = tfds.builder('coco')
2 info = cocoBuilder.info
----> 3 cocoBuilder.download_and_prepare()
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\api_utils.py in disallow_positional_args_dec(fn, instance, args, kwargs)
50 _check_no_positional(fn, args, ismethod, allowed=allowed)
51 _check_required(fn, kwargs)
---> 52 return fn(*args, **kwargs)
53
54 return disallow_positional_args_dec(wrapped) # pylint: disable=no-value-for-parameter
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\dataset_builder.py in download_and_prepare(self, download_dir, download_config)
285 self._download_and_prepare(
286 dl_manager=dl_manager,
--> 287 download_config=download_config)
288
289 # NOTE: If modifying the lines below to put additional information in
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\dataset_builder.py in _download_and_prepare(self, dl_manager, download_config)
946 super(GeneratorBasedBuilder, self)._download_and_prepare(
947 dl_manager=dl_manager,
--> 948 max_examples_per_split=download_config.max_examples_per_split,
949 )
950
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\dataset_builder.py in _download_and_prepare(self, dl_manager, **prepare_split_kwargs)
802 # Generating data for all splits
803 split_dict = splits_lib.SplitDict()
--> 804 for split_generator in self._split_generators(dl_manager):
805 if splits_lib.Split.ALL == split_generator.split_info.name:
806 raise ValueError(
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\image\coco.py in _split_generators(self, dl_manager)
237 root_url = 'http://images.cocodataset.org/'
238 extracted_paths = dl_manager.download_and_extract({
--> 239 key: root_url + url for key, url in urls.items()
240 })
241
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\download_manager.py in download_and_extract(self, url_or_urls)
357 with self._downloader.tqdm():
358 with self._extractor.tqdm():
--> 359 return _map_promise(self._download_extract, url_or_urls)
360
361 #property
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\download_manager.py in _map_promise(map_fn, all_inputs)
393 """Map the function into each element and resolve the promise."""
394 all_promises = utils.map_nested(map_fn, all_inputs) # Apply the function
--> 395 res = utils.map_nested(_wait_on_promise, all_promises)
396 return res
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\utils\py_utils.py in map_nested(function, data_struct, dict_only, map_tuple)
127 return {
128 k: map_nested(function, v, dict_only, map_tuple)
--> 129 for k, v in data_struct.items()
130 }
131 elif not dict_only:
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\utils\py_utils.py in <dictcomp>(.0)
127 return {
128 k: map_nested(function, v, dict_only, map_tuple)
--> 129 for k, v in data_struct.items()
130 }
131 elif not dict_only:
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\utils\py_utils.py in map_nested(function, data_struct, dict_only, map_tuple)
141 return tuple(mapped)
142 # Singleton
--> 143 return function(data_struct)
144
145
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\download_manager.py in _wait_on_promise(p)
377
378 def _wait_on_promise(p):
--> 379 return p.get()
380
381 else:
~\.conda\envs\tf-tutorial\lib\site-packages\promise\promise.py in get(self, timeout)
508 target = self._target()
509 self._wait(timeout or DEFAULT_TIMEOUT)
--> 510 return self._target_settled_value(_raise=True)
511
512 def _target_settled_value(self, _raise=False):
~\.conda\envs\tf-tutorial\lib\site-packages\promise\promise.py in _target_settled_value(self, _raise)
512 def _target_settled_value(self, _raise=False):
513 # type: (bool) -> Any
--> 514 return self._target()._settled_value(_raise)
515
516 _value = _reason = _target_settled_value
~\.conda\envs\tf-tutorial\lib\site-packages\promise\promise.py in _settled_value(self, _raise)
222 if _raise:
223 raise_val = self._fulfillment_handler0
--> 224 reraise(type(raise_val), raise_val, self._traceback)
225 return self._fulfillment_handler0
226
~\.conda\envs\tf-tutorial\lib\site-packages\six.py in reraise(tp, value, tb)
694 if value.__traceback__ is not tb:
695 raise value.with_traceback(tb)
--> 696 raise value
697 finally:
698 value = None
~\.conda\envs\tf-tutorial\lib\site-packages\promise\promise.py in handle_future_result(future)
840 # type: (Any) -> None
841 try:
--> 842 resolve(future.result())
843 except Exception as e:
844 tb = exc_info()[2]
~\.conda\envs\tf-tutorial\lib\concurrent\futures\_base.py in result(self, timeout)
423 raise CancelledError()
424 elif self._state == FINISHED:
--> 425 return self.__get_result()
426
427 self._condition.wait(timeout)
~\.conda\envs\tf-tutorial\lib\concurrent\futures\_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~\.conda\envs\tf-tutorial\lib\concurrent\futures\thread.py in run(self)
54
55 try:
---> 56 result = self.fn(*self.args, **self.kwargs)
57 except BaseException as exc:
58 self.future.set_exception(exc)
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\extractor.py in _sync_extract(self, from_path, method, to_path)
92 except BaseException as err:
93 msg = 'Error while extracting %s to %s : %s' % (from_path, to_path, err)
---> 94 raise ExtractError(msg)
95 # `tf.io.gfile.Rename(overwrite=True)` doesn't work for non empty
96 # directories, so delete destination first, if it already exists.
ExtractError: Error while extracting C:\Users\%user%\tensorflow_datasets\downloads\images.cocodataset.org_zips_train20147eQIfmQL3bpVDgkOrnAQklNLVUtCsFrDPwMAuYSzF3U.zip to C:\Users\%user%\tensorflow_datasets\downloads\extracted\ZIP.images.cocodataset.org_zips_train20147eQIfmQL3bpVDgkOrnAQklNLVUtCsFrDPwMAuYSzF3U.zip : C:\Users\%user%\tensorflow_datasets\downloads\images.cocodataset.org_zips_train20147eQIfmQL3bpVDgkOrnAQklNLVUtCsFrDPwMAuYSzF3U.zip; Unknown error
The message seems cryptic to me. The folder to which it is trying to extract does not exist when the notebook is started - it is created by Tensorflow, and only at that command line. I obviously tried deleting it completely and running it again, to no effect.
The code that leads to the error is (everything runs fine until the last line):
import tensorflow as tf
from __future__ import absolute_import, division, print_function, unicode_literals
from tensorflow_examples.models.pix2pix import pix2pix
import tensorflow_datasets as tfds
from IPython.display import clear_output
import matplotlib.pyplot as plt
dataset, info = tfds.load('coco', with_info=True)
Also tried breaking down the last command into assigning the tdfs.builder object and then running download_and_extract, and again got the same error.
There is enough space in disk - after download, still 50+GB available, while the dataset is supposed to be 37GB in its largest version (2014).
I have a similar problem with Windows 10 & COCO 2017. My solution is simple. Extract the ZIP file manually according to the folder path in the error message.
I'm trying to download only the most recent .csv files from my S3 bucket and am running into an error that says "TypeError: expected string or bytes-like object."
I currently have working code that identifies the last modified S3 objects, sorts these objects, and puts them into a list named latest_files.
session = boto3.Session()
s3_resource = boto3.resource('s3')
my_bucket = s3_resource.Bucket('chansbucket')
get_last_modified = lambda obj: int(obj.last_modified.strftime('%s'))
unsorted = []
# filters through the bucket and appends objects to the unsorted list
for file in my_bucket.objects.filter():
unsorted.append(file)
# sorts last five files in unsorted by last modified time
latest_files = [obj.key for obj in sorted(unsorted, key=get_last_modified, reverse=True)][0:5]
Now I want to loop through latest_files and download only those that end with .csv.
for file in latest_files:
if file.endswith('.csv'):
s3_resource.meta.client.download_file(my_bucket, file, '/Users/mikechan/projects/TT_product_analyses/raw_csv_files/' + file)
Here's where I get the error TypeError: expected string or bytes-like object
Here's the traceback:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-27-ca90c5ad9c53> in <module>()
1 for file in latest_files:
2 if file.endswith('.csv'):
----> 3 s3_resource.meta.client.download_file(my_bucket, str(file), '/Users/mikechan/projects/TT_product_analyses/raw_csv_files/' + str(file))
4
5
~/anaconda/lib/python3.6/site-packages/boto3/s3/inject.py in download_file(self, Bucket, Key, Filename, ExtraArgs, Callback, Config)
170 return transfer.download_file(
171 bucket=Bucket, key=Key, filename=Filename,
--> 172 extra_args=ExtraArgs, callback=Callback)
173
174
~/anaconda/lib/python3.6/site-packages/boto3/s3/transfer.py in download_file(self, bucket, key, filename, extra_args, callback)
305 bucket, key, filename, extra_args, subscribers)
306 try:
--> 307 future.result()
308 # This is for backwards compatibility where when retries are
309 # exceeded we need to throw the same error from boto3 instead of
~/anaconda/lib/python3.6/site-packages/s3transfer/futures.py in result(self)
71 # however if a KeyboardInterrupt is raised we want want to exit
72 # out of this and propogate the exception.
---> 73 return self._coordinator.result()
74 except KeyboardInterrupt as e:
75 self.cancel()
~/anaconda/lib/python3.6/site-packages/s3transfer/futures.py in result(self)
231 # final result.
232 if self._exception:
--> 233 raise self._exception
234 return self._result
235
~/anaconda/lib/python3.6/site-packages/s3transfer/tasks.py in _main(self, transfer_future, **kwargs)
253 # Call the submit method to start submitting tasks to execute the
254 # transfer.
--> 255 self._submit(transfer_future=transfer_future, **kwargs)
256 except BaseException as e:
257 # If there was an exception raised during the submission of task
~/anaconda/lib/python3.6/site-packages/s3transfer/download.py in _submit(self, client, config, osutil, request_executor, io_executor, transfer_future, bandwidth_limiter)
351 Bucket=transfer_future.meta.call_args.bucket,
352 Key=transfer_future.meta.call_args.key,
--> 353 **transfer_future.meta.call_args.extra_args
354 )
355 transfer_future.meta.provide_transfer_size(
~/.local/lib/python3.6/site-packages/botocore/client.py in _api_call(self, *args, **kwargs)
318 "%s() only accepts keyword arguments." % py_operation_name)
319 # The "self" in this scope is referring to the BaseClient.
--> 320 return self._make_api_call(operation_name, kwargs)
321
322 _api_call.__name__ = str(py_operation_name)
~/.local/lib/python3.6/site-packages/botocore/client.py in _make_api_call(self, operation_name, api_params)
594 }
595 request_dict = self._convert_to_request_dict(
--> 596 api_params, operation_model, context=request_context)
597
598 service_id = self._service_model.service_id.hyphenize()
~/.local/lib/python3.6/site-packages/botocore/client.py in _convert_to_request_dict(self, api_params, operation_model, context)
628 context=None):
629 api_params = self._emit_api_params(
--> 630 api_params, operation_model, context)
631 request_dict = self._serializer.serialize_to_request(
632 api_params, operation_model)
~/.local/lib/python3.6/site-packages/botocore/client.py in _emit_api_params(self, api_params, operation_model, context)
658 service_id=service_id,
659 operation_name=operation_name),
--> 660 params=api_params, model=operation_model, context=context)
661 return api_params
662
~/.local/lib/python3.6/site-packages/botocore/hooks.py in emit(self, event_name, **kwargs)
354 def emit(self, event_name, **kwargs):
355 aliased_event_name = self._alias_event_name(event_name)
--> 356 return self._emitter.emit(aliased_event_name, **kwargs)
357
358 def emit_until_response(self, event_name, **kwargs):
~/.local/lib/python3.6/site-packages/botocore/hooks.py in emit(self, event_name, **kwargs)
226 handlers.
227 """
--> 228 return self._emit(event_name, kwargs)
229
230 def emit_until_response(self, event_name, **kwargs):
~/.local/lib/python3.6/site-packages/botocore/hooks.py in _emit(self, event_name, kwargs, stop_on_response)
209 for handler in handlers_to_call:
210 logger.debug('Event %s: calling handler %s', event_name, handler)
--> 211 response = handler(**kwargs)
212 responses.append((handler, response))
213 if stop_on_response and response is not None:
~/.local/lib/python3.6/site-packages/botocore/handlers.py in validate_bucket_name(params, **kwargs)
216 return
217 bucket = params['Bucket']
--> 218 if VALID_BUCKET.search(bucket) is None:
219 error_msg = (
220 'Invalid bucket name "%s": Bucket name must match '
TypeError: expected string or bytes-like object
Can you help? I feel like it's something pretty simple, but I'm a total noob and have been pounding my head to my desk forever on this. Any help is appreciated.
Thanks!
The issue with this line:
s3_resource.meta.client.download_file(my_bucket, file, '/Users/mikechan/projects/TT_product_analyses/raw_csv_files/' + file)
is that
my_bucket = s3_resource.Bucket('chansbucket')
is returning a Bucket object while download_file() just wants a bucket name as a string, such as:
s3.meta.client.download_file('mybucket', 'hello.txt', '/tmp/hello.txt')
Also, I think that the latest_files =... line should not be indented.
I am trying to run the example for MaskedAutoregressiveFlow at https://www.tensorflow.org/api_docs/python/tf/contrib/distributions/bijectors/MaskedAutoregressiveFlow. It's a plain copy from the docs but I receive the following error. I've tried event_shape=[dims, 1] but that doesn't seem to help (different error). I'm not sure what to make of it.
Has anyone seen this as well?
import tensorflow as tf
import tensorflow.contrib.distributions as tfd
from tensorflow.contrib.distributions import bijectors as tfb
dims = 5
# A common choice for a normalizing flow is to use a Gaussian for the base
# distribution. (However, any continuous distribution would work.) E.g.,
maf = tfd.TransformedDistribution(
distribution=tfd.Normal(loc=0., scale=1.),
bijector=tfb.MaskedAutoregressiveFlow(
shift_and_log_scale_fn=tfb.masked_autoregressive_default_template(
hidden_layers=[512, 512])),
event_shape=[dims])
x = maf.sample() # Expensive; uses `tf.while_loop`, no Bijector caching.
maf.log_prob(x) # Almost free; uses Bijector caching.
maf.log_prob(0.) # Cheap; no `tf.while_loop` despite no Bijector caching.
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-2-3b2fcb2af309> in <module>()
11
12
---> 13 x = maf.sample() # Expensive; uses `tf.while_loop`, no Bijector caching.
14 maf.log_prob(x) # Almost free; uses Bijector caching.
15 maf.log_prob(0.) # Cheap; no `tf.while_loop` despite no Bijector caching.
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/distributions/distribution.py in sample(self, sample_shape, seed, name)
687 samples: a `Tensor` with prepended dimensions `sample_shape`.
688 """
--> 689 return self._call_sample_n(sample_shape, seed, name)
690
691 def _log_prob(self, value):
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/distributions/transformed_distribution.py in _call_sample_n(self, sample_shape, seed, name, **kwargs)
411 # work, it is imperative that this is the last modification to the
412 # returned result.
--> 413 y = self.bijector.forward(x, **kwargs)
414 y = self._set_sample_static_shape(y, sample_shape)
415
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/distributions/bijector_impl.py in forward(self, x, name)
618 NotImplementedError: if `_forward` is not implemented.
619 """
--> 620 return self._call_forward(x, name)
621
622 def _inverse(self, y):
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/distributions/bijector_impl.py in _call_forward(self, x, name, **kwargs)
599 if mapping.y is not None:
600 return mapping.y
--> 601 mapping = mapping.merge(y=self._forward(x, **kwargs))
602 self._cache(mapping)
603 return mapping.y
/usr/local/lib/python3.6/dist-packages/tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py in _forward(self, x)
245 y0 = array_ops.zeros_like(x, name="y0")
246 # call the template once to ensure creation
--> 247 _ = self._shift_and_log_scale_fn(y0)
248 def _loop_body(index, y0):
249 """While-loop body for autoregression calculation."""
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/template.py in __call__(self, *args, **kwargs)
358 custom_getter=self._custom_getter) as vs:
359 self._variable_scope = vs
--> 360 result = self._call_func(args, kwargs)
361 return result
362
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/template.py in _call_func(self, args, kwargs)
300 trainable_at_start = len(
301 ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
--> 302 result = self._func(*args, **kwargs)
303
304 if self._variables_created:
/usr/local/lib/python3.6/dist-packages/tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py in _fn(x)
478 activation=activation,
479 *args,
--> 480 **kwargs)
481 x = masked_dense(
482 inputs=x,
/usr/local/lib/python3.6/dist-packages/tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py in masked_dense(inputs, units, num_blocks, exclusive, kernel_initializer, reuse, name, *args, **kwargs)
386 *args,
387 **kwargs)
--> 388 return layer.apply(inputs)
389
390
/usr/local/lib/python3.6/dist-packages/tensorflow/python/layers/base.py in apply(self, inputs, *args, **kwargs)
807 Output tensor(s).
808 """
--> 809 return self.__call__(inputs, *args, **kwargs)
810
811 def _add_inbound_node(self,
/usr/local/lib/python3.6/dist-packages/tensorflow/python/layers/base.py in __call__(self, inputs, *args, **kwargs)
671
672 # Check input assumptions set before layer building, e.g. input rank.
--> 673 self._assert_input_compatibility(inputs)
674 if input_list and self._dtype is None:
675 try:
/usr/local/lib/python3.6/dist-packages/tensorflow/python/layers/base.py in _assert_input_compatibility(self, inputs)
1195 ', found ndim=' + str(ndim) +
1196 '. Full shape received: ' +
-> 1197 str(x.get_shape().as_list()))
1198 # Check dtype.
1199 if spec.dtype is not None:
ValueError: Input 0 of layer dense_1 is incompatible with the layer: : expected min_ndim=2, found ndim=1. Full shape received: [5]
originally defined at:
File "<ipython-input-2-3b2fcb2af309>", line 9, in <module>
hidden_layers=[512, 512])),
File "/usr/local/lib/python3.6/dist-packages/tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py", line 499, in masked_autoregressive_default_template
"masked_autoregressive_default_template", _fn)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/template.py", line 152, in make_template
**kwargs)
I am working on a Jupyter notebook server remotely and when I create a file using:
file = open("test.txt","w") file.write("test") file.close()
Everything works as expected and the file test.txt is written to the working directory. My problem arises wh trying to use the the Pandas to_hfs command:
data.to_hdf('raw_data.h5','raw_data_santodomingo',mode='w',format='f',data_columns=True)
I get the following error:
Opening raw_data.h5 in read-only mode
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
/opt/conda/lib/python3.6/site-packages/pandas/io/pytables.py in open(self, mode, **kwargs)
586 try:
--> 587 self._handle = tables.open_file(self._path, self._mode, **kwargs)
588 except (IOError) as e: # pragma: no cover
/opt/conda/lib/python3.6/site-packages/tables/file.py in open_file(filename, mode, title, root_uep, filters, **kwargs)
319 # Finally, create the File instance, and return it
--> 320 return File(filename, mode, title, root_uep, filters, **kwargs)
321
/opt/conda/lib/python3.6/site-packages/tables/file.py in __init__(self, filename, mode, title, root_uep, filters, **kwargs)
783 # Now, it is time to initialize the File extension
--> 784 self._g_new(filename, mode, **params)
785
tables/hdf5extension.pyx in tables.hdf5extension.File._g_new()
/opt/conda/lib/python3.6/site-packages/tables/utils.py in check_file_access(filename, mode)
178 raise IOError("directory ``%s`` exists but it can not be "
--> 179 "written" % (parentname,))
180 elif mode == 'a':
OSError: directory ``.`` exists but it can not be written
During handling of the above exception, another exception occurred:
OSError Traceback (most recent call last)
<ipython-input-182-479f2e98ea81> in <module>()
----> 1 pre_clean_data.to_hdf('raw_data.h5','raw_data_santodomingo',mode='w',format='f',data_columns=True)
/opt/conda/lib/python3.6/site-packages/pandas/core/generic.py in to_hdf(self, path_or_buf, key, **kwargs)
1136
1137 from pandas.io import pytables
-> 1138 return pytables.to_hdf(path_or_buf, key, self, **kwargs)
1139
1140 def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs):
/opt/conda/lib/python3.6/site-packages/pandas/io/pytables.py in to_hdf(path_or_buf, key, value, mode, complevel, complib, append, **kwargs)
267 if isinstance(path_or_buf, string_types):
268 with HDFStore(path_or_buf, mode=mode, complevel=complevel,
--> 269 complib=complib) as store:
270 f(store)
271 else:
/opt/conda/lib/python3.6/site-packages/pandas/io/pytables.py in __init__(self, path, mode, complevel, complib, fletcher32, **kwargs)
446 self._fletcher32 = fletcher32
447 self._filters = None
--> 448 self.open(mode=mode, **kwargs)
449
450 #property
/opt/conda/lib/python3.6/site-packages/pandas/io/pytables.py in open(self, mode, **kwargs)
589 if 'can not be written' in str(e):
590 print('Opening %s in read-only mode' % self._path)
--> 591 self._handle = tables.open_file(self._path, 'r', **kwargs)
592 else:
593 raise
/opt/conda/lib/python3.6/site-packages/tables/file.py in open_file(filename, mode, title, root_uep, filters, **kwargs)
318
319 # Finally, create the File instance, and return it
--> 320 return File(filename, mode, title, root_uep, filters, **kwargs)
321
322
/opt/conda/lib/python3.6/site-packages/tables/file.py in __init__(self, filename, mode, title, root_uep, filters, **kwargs)
782
783 # Now, it is time to initialize the File extension
--> 784 self._g_new(filename, mode, **params)
785
786 # Check filters and set PyTables format version for new files.
tables/hdf5extension.pyx in tables.hdf5extension.File._g_new()
/opt/conda/lib/python3.6/site-packages/tables/utils.py in check_file_access(filename, mode)
154 # The file should be readable.
155 if not os.access(filename, os.F_OK):
--> 156 raise IOError("``%s`` does not exist" % (filename,))
157 if not os.path.isfile(filename):
158 raise IOError("``%s`` is not a regular file" % (filename,))
OSError: ``raw_data.h5`` does not exist
These lines seem pertinent and are making me think write permission is the issue:
/opt/conda/lib/python3.6/site-packages/tables/utils.py in check_file_access(filename, mode)
154 # The file should be readable.
155 if not os.access(filename, os.F_OK):
--> 156 raise IOError("``%s`` does not exist" % (filename,))
157 if not os.path.isfile(filename):
158 raise IOError("``%s`` is not a regular file" % (filename,))
And
/opt/conda/lib/python3.6/site-packages/tables/utils.py in check_file_access(filename, mode)
154 # The file should be readable.
155 if not os.access(filename, os.F_OK):
--> 156 raise IOError("``%s`` does not exist" % (filename,))
157 if not os.path.isfile(filename):
158 raise IOError("``%s`` is not a regular file" % (filename,))
OSError: ``raw_data.h5`` does not exist
However that would confuse me as I can write text files in the working directory as mentioned above. All and any attempts at assistance appreciated.
EDIT: If i use the full path :/home/joyvan/work/raw_data.h5' I get a different error readout.
data.to_hdf('/home/joyvan/work/raw_data.h5','raw_data_santodomingo',mode='w',format='f',data_columns=True)
produces
OSError Traceback (most recent call last)
<ipython-input-185-de493145e6a7> in <module>()
----> 1 pre_clean_data.to_hdf('/home/joyvan/work/raw_data.h5','raw_data_santodomingo',mode='w',format='f',data_columns=True)
/opt/conda/lib/python3.6/site-packages/pandas/core/generic.py in to_hdf(self, path_or_buf, key, **kwargs)
1136
1137 from pandas.io import pytables
-> 1138 return pytables.to_hdf(path_or_buf, key, self, **kwargs)
1139
1140 def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs):
/opt/conda/lib/python3.6/site-packages/pandas/io/pytables.py in to_hdf(path_or_buf, key, value, mode, complevel, complib, append, **kwargs)
267 if isinstance(path_or_buf, string_types):
268 with HDFStore(path_or_buf, mode=mode, complevel=complevel,
--> 269 complib=complib) as store:
270 f(store)
271 else:
/opt/conda/lib/python3.6/site-packages/pandas/io/pytables.py in __init__(self, path, mode, complevel, complib, fletcher32, **kwargs)
446 self._fletcher32 = fletcher32
447 self._filters = None
--> 448 self.open(mode=mode, **kwargs)
449
450 #property
/opt/conda/lib/python3.6/site-packages/pandas/io/pytables.py in open(self, mode, **kwargs)
585
586 try:
--> 587 self._handle = tables.open_file(self._path, self._mode, **kwargs)
588 except (IOError) as e: # pragma: no cover
589 if 'can not be written' in str(e):
/opt/conda/lib/python3.6/site-packages/tables/file.py in open_file(filename, mode, title, root_uep, filters, **kwargs)
318
319 # Finally, create the File instance, and return it
--> 320 return File(filename, mode, title, root_uep, filters, **kwargs)
321
322
/opt/conda/lib/python3.6/site-packages/tables/file.py in __init__(self, filename, mode, title, root_uep, filters, **kwargs)
782
783 # Now, it is time to initialize the File extension
--> 784 self._g_new(filename, mode, **params)
785
786 # Check filters and set PyTables format version for new files.
tables/hdf5extension.pyx in tables.hdf5extension.File._g_new()
/opt/conda/lib/python3.6/site-packages/tables/utils.py in check_file_access(filename, mode)
172 parentname = '.'
173 if not os.access(parentname, os.F_OK):
--> 174 raise IOError("``%s`` does not exist" % (parentname,))
175 if not os.path.isdir(parentname):
176 raise IOError("``%s`` is not a directory" % (parentname,))
OSError: ``/home/joyvan/work`` does not exist
I ran into similar problem for this.
It turns out that as a current user i am running file does not have enough permission to write.
I ran same script with root user and it worked.
Note: This is late and not an answer to OP's Questions but similar situation for me and writing solution which worked for me.