Sage hangs when calling Mathematica - python

I recently installed Sage 6.3 on my Fedora 21 machine. I'm using version 6.3, which is slightly outdated, because it is the most recent thing available in yum's repositories. I also installed Mathematica on the same computer in the hope of being able to call it from within Sage.
Mathematica's terminal interface using the math command works, which according to this reference page should be all that I need. However, when I tell Sage to use mathematica either from the Sage command line or the notebook, Sage hangs. Here's a sample of my interaction with the terminal (the traceback is horrible, as is frequently the case in Sage):
sage: mathematica('hello world')
^CInterrupting Mathematica...
^C---------------------------------------------------------------------------
KeyboardInterrupt Traceback (most recent call last)
<ipython-input-1-9208bb19d841> in <module>()
----> 1 mathematica('hello world')
/usr/lib64/python2.7/site-packages/sage/interfaces/interface.pyc in __call__(self, x, name)
197
198 if isinstance(x, basestring):
--> 199 return cls(self, x, name=name)
200 try:
201 return self._coerce_from_special_method(x)
/usr/lib64/python2.7/site-packages/sage/interfaces/expect.pyc in __init__(self, parent, value, is_name, name)
1310 else:
1311 try:
-> 1312 self._name = parent._create(value, name=name)
1313 # Convert ValueError and RuntimeError to TypeError for
1314 # coercion to work properly.
/usr/lib64/python2.7/site-packages/sage/interfaces/interface.pyc in _create(self, value, name)
387 def _create(self, value, name=None):
388 name = self._next_var_name() if name is None else name
--> 389 self.set(name, value)
390 return name
391
/usr/lib64/python2.7/site-packages/sage/interfaces/mathematica.pyc in set(self, var, value)
508 cmd = '%s=%s;'%(var,value)
509 #out = self.eval(cmd)
--> 510 out = self._eval_line(cmd, allow_use_file=True)
511 if len(out) > 8:
512 raise TypeError("Error executing code in Mathematica\nCODE:\n\t%s\nMathematica ERROR:\n\t%s"%(cmd, out))
/usr/lib64/python2.7/site-packages/sage/interfaces/mathematica.pyc in _eval_line(self, line, allow_use_file, wait_for_prompt, restart_if_needed)
535 def _eval_line(self, line, allow_use_file=True, wait_for_prompt=True, restart_if_needed=False):
536 s = Expect._eval_line(self, line,
--> 537 allow_use_file=allow_use_file, wait_for_prompt=wait_for_prompt)
538 return str(s).strip('\n')
539
/usr/lib64/python2.7/site-packages/sage/interfaces/expect.pyc in _eval_line(self, line, allow_use_file, wait_for_prompt, restart_if_needed)
890 out = ''
891 except KeyboardInterrupt:
--> 892 self._keyboard_interrupt()
893 raise KeyboardInterrupt("Ctrl-c pressed while running %s"%self)
894 if self._terminal_echo:
/usr/lib64/python2.7/site-packages/sage/interfaces/mathematica.pyc in _keyboard_interrupt(self)
420 e = self._expect
421 e.sendline(chr(3)) # send ctrl-c
--> 422 e.expect('Interrupt> ')
423 e.sendline("a") # a -- abort
424 e.expect(self._prompt)
/usr/lib64/sagemath/site-packages/pexpect.pyc in expect(self, pattern, timeout, searchwindowsize)
914 """
915 compiled_pattern_list = self.compile_pattern_list(pattern)
--> 916 return self.expect_list(compiled_pattern_list, timeout, searchwindowsize)
917
918 def expect_list(self, pattern_list, timeout = -1, searchwindowsize = -1):
/usr/lib64/sagemath/site-packages/pexpect.pyc in expect_list(self, pattern_list, timeout, searchwindowsize)
965 raise TIMEOUT ('Timeout exceeded in expect_list().')
966 # Still have time left, so read more data
--> 967 c = self.read_nonblocking (self.maxread, timeout)
968 incoming = incoming + c
969 if timeout is not None:
/usr/lib64/sagemath/site-packages/pexpect.pyc in read_nonblocking(self, size, timeout)
546 raise EOF ('End Of File (EOF) in read_nonblocking(). Pokey platform.')
547
--> 548 r, w, e = select.select([self.child_fd], [], [], timeout)
549 if not r:
550 if not self.isalive():
/usr/lib64/python2.7/site-packages/sage/ext/c_lib.so in sage.ext.c_lib.sage_python_check_interrupt (build/cythonized/sage/ext/c_lib.c:1683)()
/usr/lib64/python2.7/site-packages/sage/ext/c_lib.so in sage.ext.c_lib.sig_raise_exception (build/cythonized/sage/ext/c_lib.c:769)()
KeyboardInterrupt:
sage:
The equivalent in Mathematica's own interface works just fine:
Mathematica 10.1.0 for Linux x86 (64-bit)
Copyright 1988-2015 Wolfram Research, Inc.
In[1]:= hello world
Out[1]= hello world
In[2]:=
Am I doing something wrong, or is this a bug in Sage? Would downloading and building the latest version (6.7) manually fix the problem?
EDIT:
I am using Mathematica 10.1.0. Could it be that be my problem, i.e. the older version of Sage doesn't know how to handle the newer version of Mathematica?

According to Trac 16703, this problem should be fixed in the latest Sage. I don't personally have a copy of Mathematica to test this on.

Related

OSError: [Errno 5] Input/output error when using Google Colaboratory

I was working just fine with Google Colaboratory and suddenly this error started to pop up each time I try to load any type of file. It first started when I was trying to read an hdf file, now everything won't open.
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
<ipython-input-8-65c4e8d1c435> in <module>()
----> 1 eo=EOPatch.load('./20.Clean_Textural_features/eopatch_0')
2 eo
9 frames
/usr/local/lib/python3.6/dist-packages/eolearn/core/eodata.py in load(path, features, lazy_loading, filesystem)
530 path = '/'
531
--> 532 return load_eopatch(EOPatch(), filesystem, path, features=features, lazy_loading=lazy_loading)
533
534 def time_series(self, ref_date=None, scale_time=1):
/usr/local/lib/python3.6/dist-packages/eolearn/core/eodata_io.py in load_eopatch(eopatch, filesystem, patch_location, features, lazy_loading)
76 loading_data = executor.map(lambda loader: loader.load(), loading_data)
77
---> 78 for (ftype, fname, _), value in zip(features, loading_data):
79 eopatch[(ftype, fname)] = value
80
/usr/lib/python3.6/concurrent/futures/_base.py in result_iterator()
584 # Careful not to keep a reference to the popped future
585 if timeout is None:
--> 586 yield fs.pop().result()
587 else:
588 yield fs.pop().result(end_time - time.monotonic())
/usr/lib/python3.6/concurrent/futures/_base.py in result(self, timeout)
423 raise CancelledError()
424 elif self._state == FINISHED:
--> 425 return self.__get_result()
426
427 self._condition.wait(timeout)
/usr/lib/python3.6/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
/usr/lib/python3.6/concurrent/futures/thread.py in run(self)
54
55 try:
---> 56 result = self.fn(*self.args, **self.kwargs)
57 except BaseException as exc:
58 self.future.set_exception(exc)
/usr/local/lib/python3.6/dist-packages/eolearn/core/eodata_io.py in <lambda>(loader)
74 if not lazy_loading:
75 with concurrent.futures.ThreadPoolExecutor() as executor:
---> 76 loading_data = executor.map(lambda loader: loader.load(), loading_data)
77
78 for (ftype, fname, _), value in zip(features, loading_data):
/usr/local/lib/python3.6/dist-packages/eolearn/core/eodata_io.py in load(self)
217 return self._decode(gzip_fp, self.path)
218
--> 219 return self._decode(file_handle, self.path)
220
221 def save(self, data, file_format, compress_level=0):
/usr/local/lib/python3.6/dist-packages/eolearn/core/eodata_io.py in _decode(file, path)
268
269 if FileFormat.NPY.extension() in path:
--> 270 return np.load(file)
271
272 raise ValueError('Unsupported data type.')
/usr/local/lib/python3.6/dist-packages/numpy/lib/npyio.py in load(file, mmap_mode, allow_pickle, fix_imports, encoding)
434 _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
435 N = len(format.MAGIC_PREFIX)
--> 436 magic = fid.read(N)
437 # If the file size is less than N, we need to make sure not
438 # to seek past the beginning of the file
OSError: [Errno 5] Input/output error
Also some notebooks won't open and this appears instead:
I looked around at similar posts here, but didn't understand anything. Therefore, any help would be highly appreciated.
PS: my files are in subfolders and not directly contained in 'My Drive'. I have a lso disabled all the adblocks ad the problem persists...
I think the file/link has been used for downloading beyond its weekly limit. This answer may help you.
Some discussion about Google's policy on hosting data on drive.
The solution is to wait for couple of hours/days and try again.

Error when using pandas dataframe in R cell, in rpy2, Jupyter Notebook

I want to use ggplot2 within Jupyter Notebook. However, when I try to make an R magic cell and introduce a variable, I get an error.
Here is the code (one paragraph indicates one cell):
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import rpy2
%matplotlib inline
from rpy2.robjects import pandas2ri
pandas2ri.activate()
%load_ext rpy2.ipython
%%R
library(ggplot2)
data = pd.read_csv('train_titanic.csv')
%%R -i data -w 900 -h 480 -u px
With this last cell, I get the following error (incl traceback):
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
~/anaconda3/envs/catenv/lib/python3.7/site-packages/rpy2/robjects/pandas2ri.py in py2rpy_pandasdataframe(obj)
54 try:
---> 55 od[name] = conversion.py2rpy(values)
56 except Exception as e:
~/anaconda3/envs/catenv/lib/python3.7/functools.py in wrapper(*args, **kw)
839
--> 840 return dispatch(args[0].__class__)(*args, **kw)
841
~/anaconda3/envs/catenv/lib/python3.7/site-packages/rpy2/robjects/pandas2ri.py in py2rpy_pandasseries(obj)
125 if type(x) is not homogeneous_type:
--> 126 raise ValueError('Series can only be of one type, or None.')
127 # TODO: Could this be merged with obj.type.name == 'O' case above ?
ValueError: Series can only be of one type, or None.
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
~/anaconda3/envs/catenv/lib/python3.7/site-packages/rpy2/rinterface_lib/sexp.py in from_object(cls, obj)
367 try:
--> 368 mv = memoryview(obj)
369 res = cls.from_memoryview(mv)
TypeError: memoryview: a bytes-like object is required, not 'Series'
During handling of the above exception, another exception occurred:
AttributeError Traceback (most recent call last)
<ipython-input-14-75e210679e4a> in <module>
----> 1 get_ipython().run_cell_magic('R', '-i data -w 900 -h 480 -u px', '\n\n')
~/anaconda3/envs/catenv/lib/python3.7/site-packages/IPython/core/interactiveshell.py in run_cell_magic(self, magic_name, line, cell)
2360 with self.builtin_trap:
2361 args = (magic_arg_s, cell)
-> 2362 result = fn(*args, **kwargs)
2363 return result
2364
</home/morgan/anaconda3/envs/catenv/lib/python3.7/site-packages/decorator.py:decorator-gen-130> in R(self, line, cell, local_ns)
~/anaconda3/envs/catenv/lib/python3.7/site-packages/IPython/core/magic.py in <lambda>(f, *a, **k)
185 # but it's overkill for just that one bit of state.
186 def magic_deco(arg):
--> 187 call = lambda f, *a, **k: f(*a, **k)
188
189 if callable(arg):
~/anaconda3/envs/catenv/lib/python3.7/site-packages/rpy2/ipython/rmagic.py in R(self, line, cell, local_ns)
721 raise NameError("name '%s' is not defined" % input)
722 with localconverter(converter) as cv:
--> 723 ro.r.assign(input, val)
724
725 tmpd = self.setup_graphics(args)
~/anaconda3/envs/catenv/lib/python3.7/site-packages/rpy2/robjects/functions.py in __call__(self, *args, **kwargs)
190 kwargs[r_k] = v
191 return (super(SignatureTranslatedFunction, self)
--> 192 .__call__(*args, **kwargs))
193
194
~/anaconda3/envs/catenv/lib/python3.7/site-packages/rpy2/robjects/functions.py in __call__(self, *args, **kwargs)
111
112 def __call__(self, *args, **kwargs):
--> 113 new_args = [conversion.py2rpy(a) for a in args]
114 new_kwargs = {}
115 for k, v in kwargs.items():
~/anaconda3/envs/catenv/lib/python3.7/site-packages/rpy2/robjects/functions.py in <listcomp>(.0)
111
112 def __call__(self, *args, **kwargs):
--> 113 new_args = [conversion.py2rpy(a) for a in args]
114 new_kwargs = {}
115 for k, v in kwargs.items():
~/anaconda3/envs/catenv/lib/python3.7/functools.py in wrapper(*args, **kw)
838 '1 positional argument')
839
--> 840 return dispatch(args[0].__class__)(*args, **kw)
841
842 funcname = getattr(func, '__name__', 'singledispatch function')
~/anaconda3/envs/catenv/lib/python3.7/site-packages/rpy2/robjects/pandas2ri.py in py2rpy_pandasdataframe(obj)
59 'The error is: %s'
60 % (name, str(e)))
---> 61 od[name] = StrVector(values)
62
63 return DataFrame(od)
~/anaconda3/envs/catenv/lib/python3.7/site-packages/rpy2/robjects/vectors.py in __init__(self, obj)
382
383 def __init__(self, obj):
--> 384 super().__init__(obj)
385 self._add_rops()
386
~/anaconda3/envs/catenv/lib/python3.7/site-packages/rpy2/rinterface_lib/sexp.py in __init__(self, obj)
286 super().__init__(obj)
287 elif isinstance(obj, collections.abc.Sized):
--> 288 super().__init__(type(self).from_object(obj).__sexp__)
289 else:
290 raise TypeError('The constructor must be called '
~/anaconda3/envs/catenv/lib/python3.7/site-packages/rpy2/rinterface_lib/sexp.py in from_object(cls, obj)
370 except (TypeError, ValueError):
371 try:
--> 372 res = cls.from_iterable(obj)
373 except ValueError:
374 msg = ('The class methods from_memoryview() and '
~/anaconda3/envs/catenv/lib/python3.7/site-packages/rpy2/rinterface_lib/conversion.py in _(*args, **kwargs)
26 def _cdata_res_to_rinterface(function):
27 def _(*args, **kwargs):
---> 28 cdata = function(*args, **kwargs)
29 # TODO: test cdata is of the expected CType
30 return _cdata_to_rinterface(cdata)
~/anaconda3/envs/catenv/lib/python3.7/site-packages/rpy2/rinterface_lib/sexp.py in from_iterable(cls, iterable, populate_func)
317 if populate_func is None:
318 cls._populate_r_vector(iterable,
--> 319 r_vector)
320 else:
321 populate_func(iterable, r_vector)
~/anaconda3/envs/catenv/lib/python3.7/site-packages/rpy2/rinterface_lib/sexp.py in _populate_r_vector(cls, iterable, r_vector)
300 r_vector,
301 cls._R_SET_VECTOR_ELT,
--> 302 cls._CAST_IN)
303
304 #classmethod
~/anaconda3/envs/catenv/lib/python3.7/site-packages/rpy2/rinterface_lib/sexp.py in _populate_r_vector(iterable, r_vector, set_elt, cast_value)
237 def _populate_r_vector(iterable, r_vector, set_elt, cast_value):
238 for i, v in enumerate(iterable):
--> 239 set_elt(r_vector, i, cast_value(v))
240
241
~/anaconda3/envs/catenv/lib/python3.7/site-packages/rpy2/rinterface_lib/sexp.py in _as_charsxp_cdata(x)
430 return x.__sexp__._cdata
431 else:
--> 432 return conversion._str_to_charsxp(x)
433
434
~/anaconda3/envs/catenv/lib/python3.7/site-packages/rpy2/rinterface_lib/conversion.py in _str_to_charsxp(val)
118 s = rlib.R_NaString
119 else:
--> 120 cchar = _str_to_cchar(val)
121 s = rlib.Rf_mkCharCE(cchar, _CE_UTF8)
122 return s
~/anaconda3/envs/catenv/lib/python3.7/site-packages/rpy2/rinterface_lib/conversion.py in _str_to_cchar(s, encoding)
97 def _str_to_cchar(s, encoding: str = 'utf-8'):
98 # TODO: use isStrinb and installTrChar
---> 99 b = s.encode(encoding)
100 return ffi.new('char[]', b)
101
AttributeError: 'float' object has no attribute 'encode'
So I find that it is not possible to even start an R magic cell while importing my pandas dataframe object. However, I have tried creating R vectors inside the cell, and find I can plot these using ggplot2 with no issues.
I am using Python 3.7.6, rpy2 3.1.0, jupyter-notebook 6.0.3and am using Ubuntu 18.04.2 LTS on Windows Subsystem for Linux.
The problem is most likely with one (or more) columns having more than one type - therefore it is impossible to transfer the data into an R vector (which can hold only one data type). The traceback may be overwhelming, but here is the relevant part:
ValueError: Series can only be of one type, or None.
Which column it is? Difficult to say without looking at the dataset that you load, but my general solution is to check the types in the columns:
types = data.applymap(type).apply(set)
types[types.apply(len) > 1]
Anything returned by the snippet above would be a candidate culprit. There are many different ways of dealing with the problem, depending on the exact nature of the data. Workarounds that I frequently use include:
calling data = data.infer_objects() - helps if the pandas did not catch up with a dtype change and still stores the data with (suboptimal) Python objects
filling NaN with an empty string or a string constant if you have missing values in a string column (e.g. str_columns = str_columns.fillna(''))
dates.apply(pd.to_datetime, axis=1) if you have datetime objects but the dtype is object
using df.applymap(lambda x: datetime.combine(x, datetime.min.time()) if not isinstance(x, datetime) else x) if you have a mixture of date and datetime objects
In some vary rare cases pandas stores the data differently than expected by rpy2 (following certain manipulations); then writing the dataframe down to a csv file and reading it from the disk again helps - but this is likely not what you are facing here, as you start from a newly read dataframe.
I just noticed there might be an even simpler reason for the problem. For some reason, pandas2ri requires you to call pandas2ri.activate()after importing it. This solved the problem for me.

"Error while extracting" from tensorflow datasets

I want to train a tensorflow image segmentation model on COCO, and thought I would leverage the dataset builder already included. Download seems to be completed but it crashes on extracting the zip files.
Running with TF 2.0.0 on a Jupyter Notebook under a conda environment. Computer is 64-bit Windows 10. The Oxford Pet III dataset used in the official image segmentation tutorial works fine.
Below is the error message (my local user name replaced with %user%).
---------------------------------------------------------------------------
OutOfRangeError Traceback (most recent call last)
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\extractor.py in _sync_extract(self, from_path, method, to_path)
88 try:
---> 89 for path, handle in iter_archive(from_path, method):
90 path = tf.compat.as_text(path)
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\extractor.py in iter_zip(arch_f)
176 with _open_or_pass(arch_f) as fobj:
--> 177 z = zipfile.ZipFile(fobj)
178 for member in z.infolist():
~\.conda\envs\tf-tutorial\lib\zipfile.py in __init__(self, file, mode, compression, allowZip64)
1130 if mode == 'r':
-> 1131 self._RealGetContents()
1132 elif mode in ('w', 'x'):
~\.conda\envs\tf-tutorial\lib\zipfile.py in _RealGetContents(self)
1193 try:
-> 1194 endrec = _EndRecData(fp)
1195 except OSError:
~\.conda\envs\tf-tutorial\lib\zipfile.py in _EndRecData(fpin)
263 # Determine file size
--> 264 fpin.seek(0, 2)
265 filesize = fpin.tell()
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\util\deprecation.py in new_func(*args, **kwargs)
506 instructions)
--> 507 return func(*args, **kwargs)
508
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\lib\io\file_io.py in seek(self, offset, whence, position)
166 elif whence == 2:
--> 167 offset += self.size()
168 else:
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\lib\io\file_io.py in size(self)
101 """Returns the size of the file."""
--> 102 return stat(self.__name).length
103
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\lib\io\file_io.py in stat(filename)
726 """
--> 727 return stat_v2(filename)
728
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\lib\io\file_io.py in stat_v2(path)
743 file_statistics = pywrap_tensorflow.FileStatistics()
--> 744 pywrap_tensorflow.Stat(compat.as_bytes(path), file_statistics)
745 return file_statistics
OutOfRangeError: C:\Users\%user%\tensorflow_datasets\downloads\images.cocodataset.org_zips_train20147eQIfmQL3bpVDgkOrnAQklNLVUtCsFrDPwMAuYSzF3U.zip; Unknown error
During handling of the above exception, another exception occurred:
ExtractError Traceback (most recent call last)
<ipython-input-27-887fa0198611> in <module>
1 cocoBuilder = tfds.builder('coco')
2 info = cocoBuilder.info
----> 3 cocoBuilder.download_and_prepare()
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\api_utils.py in disallow_positional_args_dec(fn, instance, args, kwargs)
50 _check_no_positional(fn, args, ismethod, allowed=allowed)
51 _check_required(fn, kwargs)
---> 52 return fn(*args, **kwargs)
53
54 return disallow_positional_args_dec(wrapped) # pylint: disable=no-value-for-parameter
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\dataset_builder.py in download_and_prepare(self, download_dir, download_config)
285 self._download_and_prepare(
286 dl_manager=dl_manager,
--> 287 download_config=download_config)
288
289 # NOTE: If modifying the lines below to put additional information in
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\dataset_builder.py in _download_and_prepare(self, dl_manager, download_config)
946 super(GeneratorBasedBuilder, self)._download_and_prepare(
947 dl_manager=dl_manager,
--> 948 max_examples_per_split=download_config.max_examples_per_split,
949 )
950
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\dataset_builder.py in _download_and_prepare(self, dl_manager, **prepare_split_kwargs)
802 # Generating data for all splits
803 split_dict = splits_lib.SplitDict()
--> 804 for split_generator in self._split_generators(dl_manager):
805 if splits_lib.Split.ALL == split_generator.split_info.name:
806 raise ValueError(
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\image\coco.py in _split_generators(self, dl_manager)
237 root_url = 'http://images.cocodataset.org/'
238 extracted_paths = dl_manager.download_and_extract({
--> 239 key: root_url + url for key, url in urls.items()
240 })
241
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\download_manager.py in download_and_extract(self, url_or_urls)
357 with self._downloader.tqdm():
358 with self._extractor.tqdm():
--> 359 return _map_promise(self._download_extract, url_or_urls)
360
361 #property
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\download_manager.py in _map_promise(map_fn, all_inputs)
393 """Map the function into each element and resolve the promise."""
394 all_promises = utils.map_nested(map_fn, all_inputs) # Apply the function
--> 395 res = utils.map_nested(_wait_on_promise, all_promises)
396 return res
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\utils\py_utils.py in map_nested(function, data_struct, dict_only, map_tuple)
127 return {
128 k: map_nested(function, v, dict_only, map_tuple)
--> 129 for k, v in data_struct.items()
130 }
131 elif not dict_only:
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\utils\py_utils.py in <dictcomp>(.0)
127 return {
128 k: map_nested(function, v, dict_only, map_tuple)
--> 129 for k, v in data_struct.items()
130 }
131 elif not dict_only:
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\utils\py_utils.py in map_nested(function, data_struct, dict_only, map_tuple)
141 return tuple(mapped)
142 # Singleton
--> 143 return function(data_struct)
144
145
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\download_manager.py in _wait_on_promise(p)
377
378 def _wait_on_promise(p):
--> 379 return p.get()
380
381 else:
~\.conda\envs\tf-tutorial\lib\site-packages\promise\promise.py in get(self, timeout)
508 target = self._target()
509 self._wait(timeout or DEFAULT_TIMEOUT)
--> 510 return self._target_settled_value(_raise=True)
511
512 def _target_settled_value(self, _raise=False):
~\.conda\envs\tf-tutorial\lib\site-packages\promise\promise.py in _target_settled_value(self, _raise)
512 def _target_settled_value(self, _raise=False):
513 # type: (bool) -> Any
--> 514 return self._target()._settled_value(_raise)
515
516 _value = _reason = _target_settled_value
~\.conda\envs\tf-tutorial\lib\site-packages\promise\promise.py in _settled_value(self, _raise)
222 if _raise:
223 raise_val = self._fulfillment_handler0
--> 224 reraise(type(raise_val), raise_val, self._traceback)
225 return self._fulfillment_handler0
226
~\.conda\envs\tf-tutorial\lib\site-packages\six.py in reraise(tp, value, tb)
694 if value.__traceback__ is not tb:
695 raise value.with_traceback(tb)
--> 696 raise value
697 finally:
698 value = None
~\.conda\envs\tf-tutorial\lib\site-packages\promise\promise.py in handle_future_result(future)
840 # type: (Any) -> None
841 try:
--> 842 resolve(future.result())
843 except Exception as e:
844 tb = exc_info()[2]
~\.conda\envs\tf-tutorial\lib\concurrent\futures\_base.py in result(self, timeout)
423 raise CancelledError()
424 elif self._state == FINISHED:
--> 425 return self.__get_result()
426
427 self._condition.wait(timeout)
~\.conda\envs\tf-tutorial\lib\concurrent\futures\_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~\.conda\envs\tf-tutorial\lib\concurrent\futures\thread.py in run(self)
54
55 try:
---> 56 result = self.fn(*self.args, **self.kwargs)
57 except BaseException as exc:
58 self.future.set_exception(exc)
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\extractor.py in _sync_extract(self, from_path, method, to_path)
92 except BaseException as err:
93 msg = 'Error while extracting %s to %s : %s' % (from_path, to_path, err)
---> 94 raise ExtractError(msg)
95 # `tf.io.gfile.Rename(overwrite=True)` doesn't work for non empty
96 # directories, so delete destination first, if it already exists.
ExtractError: Error while extracting C:\Users\%user%\tensorflow_datasets\downloads\images.cocodataset.org_zips_train20147eQIfmQL3bpVDgkOrnAQklNLVUtCsFrDPwMAuYSzF3U.zip to C:\Users\%user%\tensorflow_datasets\downloads\extracted\ZIP.images.cocodataset.org_zips_train20147eQIfmQL3bpVDgkOrnAQklNLVUtCsFrDPwMAuYSzF3U.zip : C:\Users\%user%\tensorflow_datasets\downloads\images.cocodataset.org_zips_train20147eQIfmQL3bpVDgkOrnAQklNLVUtCsFrDPwMAuYSzF3U.zip; Unknown error
The message seems cryptic to me. The folder to which it is trying to extract does not exist when the notebook is started - it is created by Tensorflow, and only at that command line. I obviously tried deleting it completely and running it again, to no effect.
The code that leads to the error is (everything runs fine until the last line):
import tensorflow as tf
from __future__ import absolute_import, division, print_function, unicode_literals
from tensorflow_examples.models.pix2pix import pix2pix
import tensorflow_datasets as tfds
from IPython.display import clear_output
import matplotlib.pyplot as plt
dataset, info = tfds.load('coco', with_info=True)
Also tried breaking down the last command into assigning the tdfs.builder object and then running download_and_extract, and again got the same error.
There is enough space in disk - after download, still 50+GB available, while the dataset is supposed to be 37GB in its largest version (2014).
I have a similar problem with Windows 10 & COCO 2017. My solution is simple. Extract the ZIP file manually according to the folder path in the error message.

Python multiprocessing Pool, OSError: Errno 2 No such file or directory

I'm trying to use this github repo to do some birdsong analysis. I've come across a problem in the stage where I collect all the sample's into one array('Collect Samples'). I'm getting an error that looks to be something to do with my system. Not sure where to start on fixing the error. (have a look at my link to the github repo to get more in depth.) Thanks for having a look!
def job(fn):
return load_sample(fn, sr=sr,
max_length=max_length, fixed_length=fixed_length)
pool = Pool()
%time results = pool.map(job, files[:limit])
print 'Processed', len(results), 'samples'
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
<ipython-input-8-5d12f8de2a12> in <module>()
3 max_length=max_length, fixed_length=fixed_length)
4 pool = Pool()
----> 5 get_ipython().magic(u'time results = pool.map(job, files[:limit])')
6 print 'Processed', len(results), 'samples'
/home/notebook/anaconda2/lib/python2.7/site-packages/IPython/core/interactiveshell.pyc in magic(self, arg_s)
2156 magic_name, _, magic_arg_s = arg_s.partition(' ')
2157 magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
-> 2158 return self.run_line_magic(magic_name, magic_arg_s)
2159
2160 #-------------------------------------------------------------------------
/home/notebook/anaconda2/lib/python2.7/site-packages/IPython/core/interactiveshell.pyc in run_line_magic(self, magic_name, line)
2077 kwargs['local_ns'] = sys._getframe(stack_depth).f_locals
2078 with self.builtin_trap:
-> 2079 result = fn(*args,**kwargs)
2080 return result
2081
<decorator-gen-59> in time(self, line, cell, local_ns)
/home/notebook/anaconda2/lib/python2.7/site-packages/IPython/core/magic.pyc in <lambda>(f, *a, **k)
186 # but it's overkill for just that one bit of state.
187 def magic_deco(arg):
--> 188 call = lambda f, *a, **k: f(*a, **k)
189
190 if callable(arg):
/home/notebook/anaconda2/lib/python2.7/site-packages/IPython/core/magics/execution.pyc in time(self, line, cell, local_ns)
1183 else:
1184 st = clock2()
-> 1185 exec(code, glob, local_ns)
1186 end = clock2()
1187 out = None
<timed exec> in <module>()
/home/notebook/anaconda2/lib/python2.7/multiprocessing/pool.pyc in map(self, func, iterable, chunksize)
249 '''
250 assert self._state == RUN
--> 251 return self.map_async(func, iterable, chunksize).get()
252
253 def imap(self, func, iterable, chunksize=1):
/home/notebook/anaconda2/lib/python2.7/multiprocessing/pool.pyc in get(self, timeout)
565 return self._value
566 else:
--> 567 raise self._value
568
569 def _set(self, i, obj):
OSError: [Errno 2] No such file or directory
Also in case it helps, I'm running Python 2.7.13 |Anaconda 4.4.0.
Oh and the line that causes the error is results = pool.map(job, files[:limit])
Thanks a lot in advance.
After following my comment i posted I tried installing ffmpeg using
sudo apt-get install libav-tools
from this link.
Not sure how that installs ffmpeg but it fixed the problem!
If I should delete this post please tell me, but I think it is useful for other people with a similar problem. ie. no need to learn about mappers and pools if you have a similar problem.

Python 3.5 Jupyter Notebook ggplot "name error"

I am getting NameError: name 'unicode' is not defined when using yHat's ggplot library for Python (see full error below) in a Jupyter Notebook, running Python 3.5.2. with the following import statements:
%matplotlib inline
from ggplot import *
The graph still renders, but I would like to eliminate the error, or hide it if its not causing a major conflict.
The full error is below. Many thanks in advance :)
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
//anaconda/lib/python3.5/site-packages/IPython/core/formatters.py in __call__(self, obj)
697 type_pprinters=self.type_printers,
698 deferred_pprinters=self.deferred_printers)
--> 699 printer.pretty(obj)
700 printer.flush()
701 return stream.getvalue()
//anaconda/lib/python3.5/site-packages/IPython/lib/pretty.py in pretty(self, obj)
381 if callable(meth):
382 return meth(obj, self, cycle)
--> 383 return _default_pprint(obj, self, cycle)
384 finally:
385 self.end_group()
//anaconda/lib/python3.5/site-packages/IPython/lib/pretty.py in _default_pprint(obj, p, cycle)
501 if _safe_getattr(klass, '__repr__', None) not in _baseclass_reprs:
502 # A user-provided repr. Find newlines and replace them with p.break_()
--> 503 _repr_pprint(obj, p, cycle)
504 return
505 p.begin_group(1, '<')
//anaconda/lib/python3.5/site-packages/IPython/lib/pretty.py in _repr_pprint(obj, p, cycle)
692 """A pprint that just redirects to the normal repr function."""
693 # Find newlines and replace them with p.break_()
--> 694 output = repr(obj)
695 for idx,output_line in enumerate(output.splitlines()):
696 if idx:
//anaconda/lib/python3.5/site-packages/ggplot/ggplot.py in __repr__(self)
113 Evaluates patsy expressions within the aesthetics. For example, 'x + 1'
114 , 'factor(x)', or 'pd.cut(price, bins=10)')
--> 115 """
116 for key, item in self._aes.items():
117 if item not in self.data:
//anaconda/lib/python3.5/site-packages/ggplot/ggplot.py in make(self)
//anaconda/lib/python3.5/site-packages/ggplot/ggplot.py in apply_axis_labels(self)
269 i, j = self.subplots.shape
270 i, j = int((i - 1) / 2), int(j - 1)
--> 271 ax = self.subplots[i][j]
272 make_legend(ax, legend)
273 elif self.facets.rowvar:
NameError: name 'unicode' is not defined
If you have anaconda installed try installing the code-forge version. I was able to fix this same problem by switching to that version for python 3.5
conda install -c conda-forge ggplot
Hope that helps

Categories

Resources