Pyinstaller subprocess exception When running on windowed mode - python

I'm using pyinstaller to create my application.In my django app, I'm using subprocess. When i execute pyinstaller in noconsole mode its throwing errors.
Error log:
Traceback (most recent call last):
File "site-packages\django\core\handlers\base.py", line 149, in get_response
File "site-packages\django\core\handlers\base.py", line 147, in get_response
File "crumbs_tableau\views.py", line 1603, in parser
File "site-packages\django\shortcuts.py", line 67, in render
File "site-packages\django\template\loader.py", line 96, in render_to_string
File "site-packages\django\template\loader.py", line 43, in get_template
django.template.exceptions.TemplateDoesNotExist: helpers/error.html
Internal Server Error: /
Traceback (most recent call last):
File "crumbs_tableau\views.py", line 286, in parser
File "crumbs_tableau\views.py", line 248, in mac_list
File "subprocess.py", line 336, in check_output
File "subprocess.py", line 403, in run
File "subprocess.py", line 667, in __init__
File "subprocess.py", line 905, in _get_handles
File "subprocess.py", line 955, in _make_inheritable
OSError: [WinError 6] The handle is invalid
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "site-packages\django\core\handlers\base.py", line 149, in get_response
File "site-packages\django\core\handlers\base.py", line 147, in get_response
File "crumbs_tableau\views.py", line 1603, in parser
File "site-packages\django\shortcuts.py", line 67, in render
File "site-packages\django\template\loader.py", line 96, in render_to_string
File "site-packages\django\template\loader.py", line 43, in get_template
django.template.exceptions.TemplateDoesNotExist: helpers/error.html
Not Found: /static/parser/bootstrap.css.map

You need to define stdin and stderr when calling a subprocess in windowed mode.
Example:
p = subprocess.Popen(["netstat", "-a", "-n", "-o"], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, stdin=subprocess.DEVNULL)
This is because windows tries to duplicate these handles if they're not set to null.

Related

Error during execution: docker-compose run --rm api python3 manage.py migrate

I am trying to install saleor on Linux Mint according to the instructions
https://docs.saleor.io/docs/3.0/developer/installation
When executing the command
docker-compose run --rm api python3 manage.py migrate
I get an error. This is the stack trace. How do I resolve this issue?
$docker-compose run --rm api python3 manage.py migrate
Starting saleor-platform_db_1 ...
Starting saleor-platform_jaeger_1 ... done
Starting saleor-platform_redis_1 ... done
ERROR: for saleor-platform_db_1 a bytes-like object is required, not 'str'
ERROR: for db a bytes-like object is required, not 'str'
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/docker/api/client.py", line 261, in _raise_for_status
response.raise_for_status()
File "/usr/lib/python3/dist-packages/requests/models.py", line 940, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 500 Server Error: Internal Server Error for url: http+docker://localhost/v1.22/containers/c015b9d2a6e0ba06c8cc393147db2a4eb1a0fc72d1ae2805e177b409bb8212db/start
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/compose/service.py", line 625, in start_container
container.start()
File "/usr/lib/python3/dist-packages/compose/container.py", line 241, in start
return self.client.start(self.id, **options)
File "/usr/lib/python3/dist-packages/docker/utils/decorators.py", line 19, in wrapped
return f(self, resource_id, *args, **kwargs)
File "/usr/lib/python3/dist-packages/docker/api/container.py", line 1095, in start
self._raise_for_status(res)
File "/usr/lib/python3/dist-packages/docker/api/client.py", line 263, in _raise_for_status
raise create_api_error_from_http_exception(e)
File "/usr/lib/python3/dist-packages/docker/errors.py", line 31, in create_api_error_from_http_exception
raise cls(e, response=response, explanation=explanation)
docker.errors.APIError: 500 Server Error: Internal Server Error ("b'driver failed programming external connectivity on endpoint saleor-platform_db_1 (1b57cb27e18e4e18fad1fde3f6bebb573260974514be140c7e4e0d74d663b7b0): Error starting userland proxy: listen tcp4 0.0.0.0:5432: bind: address already in use'")
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/bin/docker-compose", line 11, in <module>
load_entry_point('docker-compose==1.25.0', 'console_scripts', 'docker-compose')()
File "/usr/lib/python3/dist-packages/compose/cli/main.py", line 72, in main
command()
File "/usr/lib/python3/dist-packages/compose/cli/main.py", line 128, in perform_command
handler(command, command_options)
File "/usr/lib/python3/dist-packages/compose/cli/main.py", line 896, in run
run_one_off_container(
File "/usr/lib/python3/dist-packages/compose/cli/main.py", line 1343, in run_one_off_container
project.up(
File "/usr/lib/python3/dist-packages/compose/project.py", line 565, in up
results, errors = parallel.parallel_execute(
File "/usr/lib/python3/dist-packages/compose/parallel.py", line 112, in parallel_execute
raise error_to_reraise
File "/usr/lib/python3/dist-packages/compose/parallel.py", line 210, in producer
result = func(obj)
File "/usr/lib/python3/dist-packages/compose/project.py", line 548, in do
return service.execute_convergence_plan(
File "/usr/lib/python3/dist-packages/compose/service.py", line 567, in execute_convergence_plan
return self._execute_convergence_start(
File "/usr/lib/python3/dist-packages/compose/service.py", line 506, in _execute_convergence_start
_, errors = parallel_execute(
File "/usr/lib/python3/dist-packages/compose/parallel.py", line 112, in parallel_execute
raise error_to_reraise
File "/usr/lib/python3/dist-packages/compose/parallel.py", line 210, in producer
result = func(obj)
File "/usr/lib/python3/dist-packages/compose/service.py", line 508, in <lambda>
lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True),
File "/usr/lib/python3/dist-packages/compose/service.py", line 620, in start_container_if_stopped
return self.start_container(container)
File "/usr/lib/python3/dist-packages/compose/service.py", line 627, in start_container
if "driver failed programming external connectivity" in ex.explanation:
TypeError: a bytes-like object is required, not 'str'
Error in sys.excepthook:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/apport_python_hook.py", line 153, in apport_excepthook
with os.fdopen(os.open(pr_filename,
FileNotFoundError: [Errno 2] No such file or directory: '/var/crash/_usr_bin_docker-compose.1000.crash'
Original exception was:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/docker/api/client.py", line 261, in _raise_for_status
response.raise_for_status()
File "/usr/lib/python3/dist-packages/requests/models.py", line 940, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 500 Server Error: Internal Server Error for url: http+docker://localhost/v1.22/containers/c015b9d2a6e0ba06c8cc393147db2a4eb1a0fc72d1ae2805e177b409bb8212db/start
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/compose/service.py", line 625, in start_container
container.start()
File "/usr/lib/python3/dist-packages/compose/container.py", line 241, in start
return self.client.start(self.id, **options)
File "/usr/lib/python3/dist-packages/docker/utils/decorators.py", line 19, in wrapped
return f(self, resource_id, *args, **kwargs)
File "/usr/lib/python3/dist-packages/docker/api/container.py", line 1095, in start
self._raise_for_status(res)
File "/usr/lib/python3/dist-packages/docker/api/client.py", line 263, in _raise_for_status
raise create_api_error_from_http_exception(e)
File "/usr/lib/python3/dist-packages/docker/errors.py", line 31, in create_api_error_from_http_exception
raise cls(e, response=response, explanation=explanation)
docker.errors.APIError: 500 Server Error: Internal Server Error ("b'driver failed programming external connectivity on endpoint saleor-platform_db_1 (1b57cb27e18e4e18fad1fde3f6bebb573260974514be140c7e4e0d74d663b7b0): Error starting userland proxy: listen tcp4 0.0.0.0:5432: bind: address already in use'")
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/bin/docker-compose", line 11, in <module>
load_entry_point('docker-compose==1.25.0', 'console_scripts', 'docker-compose')()
File "/usr/lib/python3/dist-packages/compose/cli/main.py", line 72, in main
command()
File "/usr/lib/python3/dist-packages/compose/cli/main.py", line 128, in perform_command
handler(command, command_options)
File "/usr/lib/python3/dist-packages/compose/cli/main.py", line 896, in run
run_one_off_container(
File "/usr/lib/python3/dist-packages/compose/cli/main.py", line 1343, in run_one_off_container
project.up(
File "/usr/lib/python3/dist-packages/compose/project.py", line 565, in up
results, errors = parallel.parallel_execute(
File "/usr/lib/python3/dist-packages/compose/parallel.py", line 112, in parallel_execute
raise error_to_reraise
File "/usr/lib/python3/dist-packages/compose/parallel.py", line 210, in producer
result = func(obj)
File "/usr/lib/python3/dist-packages/compose/project.py", line 548, in do
return service.execute_convergence_plan(
File "/usr/lib/python3/dist-packages/compose/service.py", line 567, in execute_convergence_plan
return self._execute_convergence_start(
File "/usr/lib/python3/dist-packages/compose/service.py", line 506, in _execute_convergence_start
_, errors = parallel_execute(
File "/usr/lib/python3/dist-packages/compose/parallel.py", line 112, in parallel_execute
raise error_to_reraise
File "/usr/lib/python3/dist-packages/compose/parallel.py", line 210, in producer
result = func(obj)
File "/usr/lib/python3/dist-packages/compose/service.py", line 508, in <lambda>
lambda c: self.start_container_if_stopped(c, attach_logs=not detached, quiet=True),
File "/usr/lib/python3/dist-packages/compose/service.py", line 620, in start_container_if_stopped
return self.start_container(container)
File "/usr/lib/python3/dist-packages/compose/service.py", line 627, in start_container
if "driver failed programming external connectivity" in ex.explanation:
TypeError: a bytes-like object is required, not 'str'

TypeError from pkg_resources when using static assets in Pyramid

Trying to set up static assets on a Pyramid app. I used the following call:
config.add_static_view(name='static', path='toolsofknowledge:static')
The file main.css is stored under toolsofknowledge/static/main.css, as expected. The toolsofknowledge package is installed locally with an editable link, using pip3 install -e ..
Requesting http://localhost:8080/static/main.css with curl causes this exception on the server:
amoe#cslp019129 $ PYRAMID_RELOAD_TEMPLATES=1 pserve3 --reload development.ini
Starting monitor for PID 3796.
Starting server in PID 3796.
Serving on http://localhost:8080
Serving on http://localhost:8080
ERROR:waitress:Exception when serving /static/main.css
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/pyramid/tweens.py", line 13, in _error_handler
response = request.invoke_exception_view(exc_info)
File "/usr/lib/python3/dist-packages/pyramid/view.py", line 769, in invoke_exception_view
raise HTTPNotFound
pyramid.httpexceptions.HTTPNotFound: The resource could not be found.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/waitress/channel.py", line 336, in service
task.service()
File "/usr/lib/python3/dist-packages/waitress/task.py", line 175, in service
self.execute()
File "/usr/lib/python3/dist-packages/waitress/task.py", line 452, in execute
app_iter = self.channel.server.application(env, start_response)
File "/usr/lib/python3/dist-packages/pyramid/router.py", line 270, in __call__
response = self.execution_policy(environ, self)
File "/usr/lib/python3/dist-packages/pyramid/router.py", line 279, in default_execution_policy
return request.invoke_exception_view(reraise=True)
File "/usr/lib/python3/dist-packages/pyramid/view.py", line 768, in invoke_exception_view
reraise_(*exc_info)
File "/usr/lib/python3/dist-packages/pyramid/compat.py", line 179, in reraise
raise value
File "/usr/lib/python3/dist-packages/pyramid/router.py", line 277, in default_execution_policy
return router.invoke_request(request)
File "/usr/lib/python3/dist-packages/pyramid/router.py", line 249, in invoke_request
response = handle_request(request)
File "/usr/lib/python3/dist-packages/pyramid/tweens.py", line 43, in excview_tween
response = _error_handler(request, exc)
File "/usr/lib/python3/dist-packages/pyramid/tweens.py", line 17, in _error_handler
reraise(*exc_info)
File "/usr/lib/python3/dist-packages/pyramid/compat.py", line 179, in reraise
raise value
File "/usr/lib/python3/dist-packages/pyramid/tweens.py", line 41, in excview_tween
response = handler(request)
File "/usr/lib/python3/dist-packages/pyramid/router.py", line 148, in handle_request
registry, request, context, context_iface, view_name
File "/usr/lib/python3/dist-packages/pyramid/view.py", line 657, in _call_view
response = view_callable(context, request)
File "/usr/lib/python3/dist-packages/pyramid/viewderivers.py", line 401, in viewresult_to_response
result = view(context, request)
File "/usr/lib/python3/dist-packages/pyramid/static.py", line 102, in __call__
if resource_isdir(self.package_name, resource_path):
File "/usr/lib/python3/dist-packages/pkg_resources/__init__.py", line 1138, in resource_isdir
return get_provider(package_or_requirement).resource_isdir(
File "/usr/lib/python3/dist-packages/pkg_resources/__init__.py", line 364, in get_provider
return _find_adapter(_provider_factories, loader)(module)
File "/usr/lib/python3/dist-packages/pkg_resources/__init__.py", line 1392, in __init__
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
File "/usr/lib/python3.7/posixpath.py", line 156, in dirname
p = os.fspath(p)
TypeError: expected str, bytes or os.PathLike object, not NoneType
Python 3.7, Pyramid 1.10.2.
Solution: add __init__.py to toolsofknowledge directory.
The problem is quite simple -- the toolsofknowledge package was not a real package, despite being installed, as it was missing an __init__.py. This causes toolsofknowledge.__loader__ to be an instance of NamespaceLoader, when it should be SourceFileLoader. Obviously, pkg_resources cannot resolve resources relative to something that's not a real package. Arguably it should handle this case more smoothly though.

Pycharm debugger throws Bad file descriptor error when using dask distributed

I am using the most lightweight/simple dask multiprocessing which is the non-cluster local Client:
from distributed import Client
client = Client()
Even so: the first instance of invoking dask.bag.compute() results in the following:
Connected to pydev debugger (build 191.7141.48)
Traceback (most recent call last):
File "/Applications/PyCharm.app/Contents/helpers/pydev/_pydevd_bundle/pydevd_comm.py", line 383, in _on_run
r = self.sock.recv(1024)
OSError: [Errno 9] Bad file descriptor
Traceback (most recent call last):
File "/Applications/PyCharm.app/Contents/helpers/pydev/_pydevd_bundle/pydevd_comm.py", line 383, in _on_run
r = self.sock.recv(1024)
OSError: [Errno 9] Bad file descriptor
Traceback (most recent call last):
File "/Applications/PyCharm.app/Contents/helpers/pydev/_pydevd_bundle/pydevd_comm.py", line 383, in _on_run
r = self.sock.recv(1024)
OSError: [Errno 9] Bad file descriptor
The result is that you can more or less flip a coin on whether the program will proceed or error out with a communication exception. Here is what happens when the flip comes up "tails":
Connected to pydev debugger (build 191.7141.48)
Traceback (most recent call last):
File "/Applications/PyCharm.app/Contents/helpers/pydev/_pydevd_bundle/pydevd_comm.py", line 383, in _on_run
r = self.sock.recv(1024)
OSError: [Errno 9] Bad file descriptor
Process ForkServerProcess-3:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/distributed/core.py", line 178, in __init__
from .counter import Digest
ImportError: cannot import name 'Digest' from 'distributed.counter' (/usr/local/lib/python3.7/site-packages/distributed/counter.py)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.7/site-packages/distributed/process.py", line 181, in _run
target(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/distributed/nanny.py", line 587, in _run
worker = Worker(*worker_args, **worker_kwargs)
File "/usr/local/lib/python3.7/site-packages/distributed/worker.py", line 552, in __init__
**kwargs
File "/usr/local/lib/python3.7/site-packages/distributed/node.py", line 76, in __init__
io_loop=self.io_loop,
File "/usr/local/lib/python3.7/site-packages/distributed/core.py", line 180, in __init__
self.digests = defaultdict(partial(Digest, loop=self.io_loop))
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/contextlib.py", line 130, in __exit__
self.gen.throw(type, value, traceback)
File "/usr/local/lib/python3.7/site-packages/distributed/utils.py", line 179, in ignoring
yield
SystemError: error return without exception set
distributed.nanny - WARNING - Worker process 20417 exited with status 1
Traceback (most recent call last):
File "_pydevd_frame_eval/pydevd_frame_evaluator_darwin_37_64.pyx", line 95, in _pydevd_frame_eval.pydevd_frame_evaluator_darwin_37_64.get_bytecode_while_frame_eval
KeyError: '/usr/local/lib/python3.7/site-packages/distributed/bokeh/__init__.py'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Applications/PyCharm.app/Contents/helpers/pydev/pydevd.py", line 1758, in <module>
main()
File "/Applications/PyCharm.app/Contents/helpers/pydev/pydevd.py", line 1752, in main
globals = debugger.run(setup['file'], None, None, is_module)
File "/Applications/PyCharm.app/Contents/helpers/pydev/pydevd.py", line 1147, in run
pydev_imports.execfile(file, globals, locals) # execute the script
File "/Applications/PyCharm.app/Contents/helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "/git/huddl/python/hamspam/enron.py", line 205, in <module>
client = Client()
File "/usr/local/lib/python3.7/site-packages/distributed/client.py", line 712, in __init__
self.start(timeout=timeout)
File "/usr/local/lib/python3.7/site-packages/distributed/client.py", line 858, in start
sync(self.loop, self._start, **kwargs)
File "/usr/local/lib/python3.7/site-packages/distributed/utils.py", line 331, in sync
six.reraise(*error[0])
File "/usr/local/lib/python3.7/site-packages/six.py", line 693, in reraise
raise value
File "/usr/local/lib/python3.7/site-packages/distributed/utils.py", line 316, in f
result[0] = yield future
File "/usr/local/lib/python3.7/site-packages/tornado/gen.py", line 729, in run
value = future.result()
File "/usr/local/lib/python3.7/site-packages/tornado/gen.py", line 736, in run
yielded = self.gen.throw(*exc_info) # type: ignore
File "/usr/local/lib/python3.7/site-packages/distributed/client.py", line 928, in _start
yield self.cluster
File "/usr/local/lib/python3.7/site-packages/tornado/gen.py", line 729, in run
value = future.result()
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/asyncio/tasks.py", line 603, in _wrap_awaitable
return (yield from awaitable.__await__())
File "/usr/local/lib/python3.7/site-packages/tornado/gen.py", line 736, in run
yielded = self.gen.throw(*exc_info) # type: ignore
File "/usr/local/lib/python3.7/site-packages/distributed/deploy/local.py", line 284, in _start
yield [self._start_worker(**self.worker_kwargs) for i in range(n_workers)]
File "/usr/local/lib/python3.7/site-packages/tornado/gen.py", line 729, in run
value = future.result()
File "/usr/local/lib/python3.7/site-packages/tornado/gen.py", line 501, in callback
result_list.append(f.result())
File "/usr/local/lib/python3.7/site-packages/tornado/gen.py", line 742, in run
yielded = self.gen.send(value)
File "/usr/local/lib/python3.7/site-packages/distributed/deploy/local.py", line 316, in _start_worker
raise gen.TimeoutError("Worker failed to start")
tornado.util.TimeoutError: Worker failed to start
Any advice on this?
There will be even more issues/complications when trying to use a LocalCluster mode -but that will be saved for a different question.

RuntimeError: unable to open shared memory object, OSError: [Errno 24] Too many open files

I having trouble with loading indexes of document.
I am testing my code, so I set
batch_size = 4
number_of_sentences_in_document = 84
number_of_words_in_sentence = 80
that sums up one mini_batch with 80 * 84 * 4 indexes of documents.
The problem is that when I transform those indexes dataset into a DataLoader as below
and try to loop over trainloader, it results out so many error messages.
DataManager = DS.NewsDataset(data_examples_gen, Vocab)
trainloader = torch.utils.data.DataLoader(DataManager, batch_size=Args.args.batch_size, shuffle=True, num_workers=32)
The error messages are below.
Traceback (most recent call last): File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/process.py", line 258, in _bootstrap
self.run() File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/process.py", line 93, in run
self._target(*self._args, **self._kwargs) File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 61, in _worker_loop
data_queue.put((idx, samples)) File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/queues.py", line 341, in put File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/reduction.py", line 51, in dumps File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/multiprocessing/reductions.py", line 125, in reduce_storage File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/reduction.py", line 191, in DupFd File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/resource_sharer.py", line 48, in __init__ OSError: [Errno 24] Too many open files
During handling of the above exception, another exception occurred:
Traceback (most recent call last): File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/util.py", line 262, in _run_finalizers File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/util.py", line 186, in __call__ File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/shutil.py", line 476, in rmtree File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/shutil.py", line 474, in rmtree OSError: [Errno 24] Too many open files: '/tmp/pymp-be4nmgxw' Process Process-2: Traceback (most recent call last): File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/process.py", line 258, in _bootstrap
self.run() File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/process.py", line 93, in run
self._target(*self._args, **self._kwargs) File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 61, in _worker_loop
data_queue.put((idx, samples)) File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/queues.py", line 341, in put File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/reduction.py", line 51, in dumps File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/multiprocessing/reductions.py", line 125, in reduce_storage File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/reduction.py", line 191, in DupFd File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/resource_sharer.py", line 48, in __init__ OSError: [Errno 24] Too many open files Traceback (most recent call last): File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/process.py", line 258, in _bootstrap
self.run() File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/process.py", line 93, in run
self._target(*self._args, **self._kwargs) File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 61, in _worker_loop
data_queue.put((idx, samples)) File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/queues.py", line 341, in put File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/reduction.py", line 51, in dumps File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/multiprocessing/reductions.py", line 121, in reduce_storage RuntimeError: unable to open shared memory object </torch_54415_3383444026> in read-write mode at /opt/conda/conda-bld/pytorch_1525909934016/work/aten/src/TH/THAllocator.c:342
During handling of the above exception, another exception occurred:
Traceback (most recent call last): File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/util.py", line 262, in _run_finalizers File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/util.py", line 186, in __call__ File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/shutil.py", line 476, in rmtree File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/shutil.py", line 474, in rmtree OSError: [Errno 24] Too many open files: '/tmp/pymp-abguy87b' Process Process-1: Traceback (most recent call last): File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/process.py", line 258, in _bootstrap
self.run() File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/process.py", line 93, in run
self._target(*self._args, **self._kwargs) File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 61, in _worker_loop
data_queue.put((idx, samples)) File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/queues.py", line 341, in put File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/reduction.py", line 51, in dumps File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/multiprocessing/reductions.py", line 121, in reduce_storage RuntimeError: unable to open shared memory object </torch_54415_3383444026> in read-write mode at /opt/conda/conda-bld/pytorch_1525909934016/work/aten/src/TH/THAllocator.c:342 Traceback (most recent call last): File "/home/nlpgpu3/LinoHong/FakeNewsByTitle/main.py", line 26, in <module>
for mini_batch in trainloader : File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 280, in __next__
idx, batch = self._get_batch() File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 259, in _get_batch
return self.data_queue.get() File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/queues.py", line 335, in get
res = self._reader.recv_bytes() File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/connection.py", line 216, in recv_bytes
buf = self._recv_bytes(maxlength) File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/connection.py", line 407, in _recv_bytes
buf = self._recv(4) File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/multiprocessing/connection.py", line 379, in _recv
chunk = read(handle, remaining) File "/home/nlpgpu3/anaconda3/envs/linohong3/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 178, in handler
_error_if_any_worker_fails() RuntimeError: DataLoader worker (pid 54416) exited unexpectedly with exit code 1.
Process finished with exit code 1
I thought this is some kind of memory problem so I tried the same thing
only having two sentences for a document and it worked.
However, I am expecting this to get pretty much larger with
batch_size up to 32 or 64,
the number of sentences per document up to 84
the number of words per sentence up to 84.
I tried
$ ulimit -n 10000
but that one didn't work.
How can I manage this problem?
Any Idea???

Robot Frame work: Error While running .robot file in python interpreter using robot.run (windows environment)

I am trying to execute Run_Keyword.robot made with RIDE file which is in directory 'Robot' from python interpreter and getting following error. I am able to run this .py file from cmd without any error and it gives expected results. What can be the reason for this. I am a newbie to Robot Framework. Please help with this.
from robot import run
run('C:\\Users\\uvijayac\\Desktop\\Robot')
The Error I am getting is as follows.
Traceback (most recent call last):
File "C:\Users\uvijayac\Desktop\Robot\rf.py", line 27, in <module>
run_tests()
File "C:\Users\uvijayac\Desktop\Robot\rf.py", line 23, in run_tests
report=report_file)
File "C:\Python27\lib\site-packages\robot\run.py", line 471, in run
return RobotFramework().execute(*datasources, **options)
File "C:\Python27\lib\site-packages\robot\utils\application.py", line 83, in execute
return self._execute(list(arguments), options)
File "C:\Python27\lib\site-packages\robot\utils\application.py", line 89, in _execute
return self._report_error(unicode(err), help=True)
File "C:\Python27\lib\site-packages\robot\utils\application.py", line 110, in _report_error
self._logger.error(message)
File "C:\Python27\lib\site-packages\robot\output\loggerhelper.py", line 59, in error
self.write(msg, 'ERROR')
File "C:\Python27\lib\site-packages\robot\output\loggerhelper.py", line 62, in write
self.message(Message(message, level, html))
File "C:\Python27\lib\site-packages\robot\output\logger.py", line 109, in message
logger.message(msg)
File "C:\Python27\lib\site-packages\robot\output\monitor.py", line 66, in message
self._writer.error(msg.message, msg.level, clear=self._running_test)
File "C:\Python27\lib\site-packages\robot\output\monitor.py", line 142, in error
self._highlight('[ ', level, ' ] ' + message, error=True)
File "C:\Python27\lib\site-packages\robot\output\monitor.py", line 158, in _highlight
self._write(before, newline=False, error=error)
File "C:\Python27\lib\site-packages\robot\output\monitor.py", line 154, in _write
stream.flush()
IOError: [Errno 9] Bad file descriptor
>>>

Categories

Resources