problems with working compile and multiprocessing at the same time - python

i work in VSCode and when I run this file:
from multiprocessing import Process
def mp_setup_and_run(processes_num, *args):
processes = {}
for i in range(processes_num):
processes[i] = Process(
target=function_example,
args=args,
daemon=True,)
processes[i].start()
for i in range(processes_num):
processes[i].join()
def function_example(*data):
print(data)
if __name__ == "__main__":
compiled = compile("z**2 + c", "<string>", "eval")
mp_setup_and_run(3, compiled)
I get an exception/s:
PS C:\Python\projects\mondebrot_painter> cd 'c:\Python\projects\mondebrot_painter'; & 'C:\Program Files\Python38\python.exe' 'c:\Users\ASUS\.vscode\extensions\ms-python.python-2020.5.80290\pythonFiles\lib\python\debugpy\no_wheels\debugpy\launcher' '51560' '--' 'c:\Python\projects\mondebrot_painter\test.py'
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Program Files\Python38\lib\multiprocessing\spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "C:\Program Files\Python38\lib\multiprocessing\spawn.py", line 126, in _main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
Traceback (most recent call last):
File "C:\Program Files\Python38\lib\runpy.py", line 193, in _run_module_as_main
return _run_code(code, main_globals, None,
File "C:\Program Files\Python38\lib\runpy.py", line 86, in _run_code
exec(code, run_globals)
File "c:\Users\ASUS\.vscode\extensions\ms-python.python-2020.5.80290\pythonFiles\lib\python\debugpy\no_wheels\debugpy\__main__.py", line 45, in <module>
cli.main()
File "c:\Users\ASUS\.vscode\extensions\ms-python.python-2020.5.80290\pythonFiles\lib\python\debugpy\no_wheels\debugpy/..\debugpy\server\cli.py", line 430, in main
run()
File "c:\Users\ASUS\.vscode\extensions\ms-python.python-2020.5.80290\pythonFiles\lib\python\debugpy\no_wheels\debugpy/..\debugpy\server\cli.py", line 267, in run_file
runpy.run_path(options.target, run_name=compat.force_str("__main__"))
File "C:\Program Files\Python38\lib\runpy.py", line 263, in run_path
return _run_module_code(code, init_globals, run_name,
File "C:\Program Files\Python38\lib\runpy.py", line 96, in _run_module_code
_run_code(code, mod_globals, init_globals,
File "C:\Program Files\Python38\lib\runpy.py", line 86, in _run_code
exec(code, run_globals)
File "c:\Python\projects\mondebrot_painter\test.py", line 45, in <module>
result = mp_setup_and_run(3, compiled)
File "c:\Python\projects\mondebrot_painter\test.py", line 19, in mp_setup_and_run
processes[i].start()
File "C:\Program Files\Python38\lib\multiprocessing\process.py", line 121, in start
self._popen = self._Popen(self)
File "C:\Program Files\Python38\lib\multiprocessing\context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Program Files\Python38\lib\multiprocessing\context.py", line 326, in _Popen
return Popen(process_obj)
File "C:\Program Files\Python38\lib\multiprocessing\popen_spawn_win32.py", line 93, in __init__
reduction.dump(process_obj, to_child)
File "C:\Program Files\Python38\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot pickle 'code' object
and the debugger redirects me to the <string> file:
LOAD_CONST(0), LOAD_CONST(None), IMPORT_NAME(sys), STORE_NAME(sys), LOAD_NAME(sys.path), LOAD_METHOD(insert), LOAD_CONST(0), LOAD_CONST('c:\\Users\\ASUS\\.vscode\\extensions\\ms-python.python-2020.5.80290\\pythonFiles\\lib\\python\\debugpy\\no_wheels\\debugpy\\_vendored\\pydevd'), CALL_METHOD{2}, POP_TOP, LOAD_CONST(0), LOAD_CONST(None), IMPORT_NAME(pydevd), STORE_NAME(pydevd), LOAD_CONST('http_json'), LOAD_NAME(pydevd.PydevdCustomization), STORE_ATTR(DEFAULT_PROTOCOL), LOAD_NAME(pydevd.settrace), LOAD_CONST('127.0.0.1'), LOAD_CONST(51592), LOAD_CONST(False), LOAD_CONST(False), LOAD_CONST(True), LOAD_CONST(None), LOAD_CONST('92e8bb604eeece436b2401def85a7ab95455e6c26fd9d660cb8175e691d71bd0'), LOAD_CONST('127.0.0.1'), LOAD_CONST('92e8bb604eeece436b2401def85a7ab95455e6c26fd9d660cb8175e691d71bd0'), LOAD_CONST(True), LOAD_CONST(True), LOAD_CONST(51592), LOAD_CONST(9040), LOAD_CONST(False), LOAD_CONST(('client', 'client-access-token', 'json-dap-http', 'multiprocess', 'port', 'ppid', 'server')), BUILD_CONST_KEY_MAP{7}, LOAD_CONST(('host', 'port', 'suspend', 'trace_only_current_thread', 'patch_multiprocessing', 'access_token', 'client_access_token', '__setup_holder__')), CALL_FUNCTION_KW{8}, POP_TOP, LOAD_CONST(0), LOAD_CONST(('spawn_main',)), IMPORT_NAME(multiprocessing.spawn), IMPORT_FROM(spawn_main), STORE_NAME(spawn_main), POP_TOP, LOAD_NAME(spawn_main), LOAD_CONST(9040), LOAD_CONST(892), LOAD_CONST(('parent_pid', 'pipe_handle')), CALL_FUNCTION_KW{2}, POP_TOP, return None
if I run the program from the console, I get this message:
C:\Python\projects\mondebrot_painter>python set_generator.py
Traceback (most recent call last):
File "set_generator.py", line 121, in <module>
set_ = mp_setup_and_run(senter, length, quality, processes_num, max_iter, compiled, mode)
File "set_generator.py", line 86, in mp_setup_and_run
processes[i].start()
File "C:\Program Files\Python38\lib\multiprocessing\process.py", line 121, in start
self._popen = self._Popen(self)
File "C:\Program Files\Python38\lib\multiprocessing\context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Program Files\Python38\lib\multiprocessing\context.py", line 326, in _Popen
return Popen(process_obj)
File "C:\Program Files\Python38\lib\multiprocessing\popen_spawn_win32.py", line 93, in __init__
reduction.dump(process_obj, to_child)
File "C:\Program Files\Python38\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot pickle 'code' object
C:\Python\projects\mondebrot_painter>Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Program Files\Python38\lib\multiprocessing\spawn.py", line 107, in spawn_main
new_handle = reduction.duplicate(pipe_handle,
File "C:\Program Files\Python38\lib\multiprocessing\reduction.py", line 79, in duplicate
return _winapi.DuplicateHandle(
PermissionError: [WinError 5] Access Denied
I am somewhat lost and don’t understand what is happening and why I can't pass compiled.

If you simplify away the multiprocessing code and just use this from the console, you'll see the TypeError you are getting:
$ python
...
>>> compiled = compile("z**2 + c", "<string>", "eval")
>>> import pickle
>>> pickle.dumps(compiled)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: can't pickle code objects
This makes sense because the documentation tells us that pickle can handle:
None, True, and False
integers, floating point numbers, complex numbers
strings, bytes, bytearrays
tuples, lists, sets, and dictionaries containing only picklable objects
functions defined at the top level of a module (using def, not lambda)
built-in functions defined at the top level of a module
classes that are defined at the top level of a module
instances of such classes whose __dict__ or the result of calling __getstate__() is picklable (see section Pickling Class Instances for details).
and compiled is not one of these.1
What's not said here, but is crucial to know, is that the multiprocessing module must be able to use the pickle code to serialize objects, so as to send them from one Python process to another. Since your compiled expression is not serializable, it cannot be sent from one Python process to another.
The trick is to serialize the expression, not the compiled expression. That is, instead of:
mp_setup_and_run(3, compiled)
use:
mp_setup_and_run(3, "z**2 + c")
Then, in mp_setup_and_run, have it pass the expression to the function. Have each function make its own call to compile. You'll do three separate compiles, in your three separate processes that run with the multiprocessing module, but that's OK.
1Of course, the documentation also says:
Attempts to pickle unpicklable objects will raise the PicklingError exception
when you and I both got TypeError instead. But this is the reason for the TypeError.

Related

ModuleNotFoundError: No module named 'tornado' on ubuntu

When I am trying to run my application I got the following error
Traceback (most recent call last):
File "./app.py", line 32, in <module>
from pushservices.bootstrap import init_messaging_agents
File "/home/airnotifier/airnotifier/pushservices/bootstrap.py", line 2, in <module>
from .apns import *
File "/home/airnotifier/airnotifier/pushservices/apns.py", line 5, in <module>
from util import json_encode
File "/home/airnotifier/airnotifier/util.py", line 37, in <module>
import tornado
ModuleNotFoundError: No module named 'tornado'
I tried to run this command to install tornado
python3 -m pip install tornado
then I got the following message
Requirement already satisfied: tornado in /usr/local/lib/python3.8/dist-packages (6.0.3)
followed by the following error log
--- Logging error ---
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/pip/_internal/utils/logging.py", line 177, in emit
self.console.print(renderable, overflow="ignore", crop=False, style=style)
File "/usr/local/lib/python3.8/dist-packages/pip/_vendor/rich/console.py", line 1752, in print
extend(render(renderable, render_options))
File "/usr/local/lib/python3.8/dist-packages/pip/_vendor/rich/console.py", line 1390, in render
for render_output in iter_render:
File "/usr/local/lib/python3.8/dist-packages/pip/_internal/utils/logging.py", line 134, in __rich_console__
for line in lines:
File "/usr/local/lib/python3.8/dist-packages/pip/_vendor/rich/segment.py", line 245, in split_lines
for segment in segments:
File "/usr/local/lib/python3.8/dist-packages/pip/_vendor/rich/console.py", line 1368, in render
renderable = rich_cast(renderable)
File "/usr/local/lib/python3.8/dist-packages/pip/_vendor/rich/protocol.py", line 36, in rich_cast
renderable = cast_method()
File "/usr/local/lib/python3.8/dist-packages/pip/_internal/self_outdated_check.py", line 130, in __rich__
pip_cmd = get_best_invocation_for_this_pip()
File "/usr/local/lib/python3.8/dist-packages/pip/_internal/utils/entrypoints.py", line 58, in get_best_invocation_for_this_pip
if found_executable and os.path.samefile(
File "/usr/lib/python3.8/genericpath.py", line 101, in samefile
s2 = os.stat(f2)
FileNotFoundError: [Errno 2] No such file or directory: '/usr/bin/pip3.8'
Call stack:
File "/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.8/dist-packages/pip/__main__.py", line 31, in <module>
sys.exit(_main())
File "/usr/local/lib/python3.8/dist-packages/pip/_internal/cli/main.py", line 70, in main
return command.main(cmd_args)
File "/usr/local/lib/python3.8/dist-packages/pip/_internal/cli/base_command.py", line 101, in main
return self._main(args)
File "/usr/local/lib/python3.8/dist-packages/pip/_internal/cli/base_command.py", line 223, in _main
self.handle_pip_version_check(options)
File "/usr/local/lib/python3.8/dist-packages/pip/_internal/cli/req_command.py", line 148, in handle_pip_version_check
pip_self_version_check(session, options)
File "/usr/local/lib/python3.8/dist-packages/pip/_internal/self_outdated_check.py", line 237, in pip_self_version_check
logger.info("[present-rich] %s", upgrade_prompt)
File "/usr/lib/python3.8/logging/__init__.py", line 1446, in info
self._log(INFO, msg, args, **kwargs)
File "/usr/lib/python3.8/logging/__init__.py", line 1589, in _log
self.handle(record)
File "/usr/lib/python3.8/logging/__init__.py", line 1599, in handle
self.callHandlers(record)
File "/usr/lib/python3.8/logging/__init__.py", line 1661, in callHandlers
hdlr.handle(record)
File "/usr/lib/python3.8/logging/__init__.py", line 954, in handle
self.emit(record)
File "/usr/local/lib/python3.8/dist-packages/pip/_internal/utils/logging.py", line 179, in emit
self.handleError(record)
Message: '[present-rich] %s'
Arguments: (UpgradePrompt(old='22.1.2', new='22.2.2'),)
can anyone please tell me what should I do and what is the problem ?
Here's a possible scenario that could cause this:
you execute pip install tornado as a user but you run the tornado project as another user.
Sorry for my terrible english, hope you can understand

why multiprocess works differently in ubuntu vs macOS?

I have this simple code to make things simpler.
from multiprocessing import Process
def abc():
print("HI")
print(a)
a = 4
p = Process(target = abc)
p.start()
It works perfectly fine in ubuntu (Python 3.8.5) and provides the output:
HI
4
However, it fails in spyder (Python 3.9.5) "AttributeError: Can't get attribute 'abc' on <module 'main' (built-in)>" and macOS (Python 3.8.10, I tried other versions as well and failed) CLI "RuntimeError".
Spyder error:
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "multiprocessing/spawn.pyc", line 116, in spawn_main
File "multiprocessing/spawn.pyc", line 126, in _main
AttributeError: Can't get attribute 'abc' on <module '__main__' (built-in)>
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "multiprocessing/spawn.pyc", line 116, in spawn_main
File "multiprocessing/spawn.pyc", line 126, in _main
AttributeError: Can't get attribute 'abc' on <module '__main__' (built-in)>
MacOS-BigSur error:
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/Users/asavas/opt/anaconda3/lib/python3.8/multiprocessing/spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "/Users/asavas/opt/anaconda3/lib/python3.8/multiprocessing/spawn.py", line 125, in _main
prepare(preparation_data)
File "/Users/asavas/opt/anaconda3/lib/python3.8/multiprocessing/spawn.py", line 236, in prepare
_fixup_main_from_path(data['init_main_from_path'])
File "/Users/asavas/opt/anaconda3/lib/python3.8/multiprocessing/spawn.py", line 287, in _fixup_main_from_path
main_content = runpy.run_path(main_path,
File "/Users/asavas/opt/anaconda3/lib/python3.8/runpy.py", line 265, in run_path
return _run_module_code(code, init_globals, run_name,
File "/Users/asavas/opt/anaconda3/lib/python3.8/runpy.py", line 97, in _run_module_code
_run_code(code, mod_globals, init_globals,
File "/Users/asavas/opt/anaconda3/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/Users/asavas/delete.py", line 8, in <module>
p.start()
File "/Users/asavas/opt/anaconda3/lib/python3.8/multiprocessing/process.py", line 121, in start
self._popen = self._Popen(self)
File "/Users/asavas/opt/anaconda3/lib/python3.8/multiprocessing/context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "/Users/asavas/opt/anaconda3/lib/python3.8/multiprocessing/context.py", line 284, in _Popen
return Popen(process_obj)
File "/Users/asavas/opt/anaconda3/lib/python3.8/multiprocessing/popen_spawn_posix.py", line 32, in __init__
super().__init__(process_obj)
File "/Users/asavas/opt/anaconda3/lib/python3.8/multiprocessing/popen_fork.py", line 19, in __init__
self._launch(process_obj)
File "/Users/asavas/opt/anaconda3/lib/python3.8/multiprocessing/popen_spawn_posix.py", line 42, in _launch
prep_data = spawn.get_preparation_data(process_obj._name)
File "/Users/asavas/opt/anaconda3/lib/python3.8/multiprocessing/spawn.py", line 154, in get_preparation_data
_check_not_importing_main()
File "/Users/asavas/opt/anaconda3/lib/python3.8/multiprocessing/spawn.py", line 134, in _check_not_importing_main
raise RuntimeError('''
RuntimeError:
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.
Trying to understand why and how can I resolve this issue?
Thanks
It appears that this version of macOS has switched over to using the spawn rather than fork method of creating new processes. This means that when a new process is created, a new, empty address space is created, a new Python interpreter is launched and the source file is re-executed from the top. Consequently, any code that is at global scope and not within a block that begins if __name__ == '__main__': will get executed. That is why any code that creates processes must be in such a block or you will get in a recursive, process-creating loop that generates the errors you see. You simply need:
from multiprocessing import Process
def abc():
print("HI")
print(a)
# this will get re-executed in the new subprocess:
a = 4
if __name__ == '__main__':
p = Process(target=abc)
p.start()
p.join() # explicitly wait for process to complete

Pandas Dedupe not working . Multiprocessing and Permission error

I was trying to clean up duplicates in an excel file using dedupe.
The code worked fine at first and the code itself is simple. But whenever I run the code I get the below error. The code works fine if I delete all the temp files, restart pycharm or restart my computer and it won't run for the second time.
The data file is a csv file with a list of random similar name in column A with header as 'Name'. Please help tp resolve. Thank you.
Code
import pandas as pd
import pandas_dedupe
#loading data
df = pd.read_csv('duplicate.csv')
#deduplication process
df_final = pandas_dedupe.dedupe_dataframe(df,['Name'])
#save to csv
df_final.to_csv('cleansed_output.csv')
Getting error below
C:\Users\Username\AppData\Roaming\Python\Python38\site-packages\pandas_dedupe\utility_functions.py:17: FutureWarning: The default value of regex will change from True to False in a future version.
df[i] = df[i].str.replace('[^\w\s\.\-\(\)\,\:\/\\\\]','')
Reading from dedupe_dataframe_learned_settings
Clustering...
Traceback (most recent call last):
File "C:\Users\Username\AppData\Roaming\Python\Python38\site-packages\dedupe\api.py", line 103, in score
matches = core.scoreDuplicates(pairs,
File "C:\Users\Username\AppData\Roaming\Python\Python38\site-packages\dedupe\core.py", line 244, in scoreDuplicates
process.start()
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\multiprocessing\process.py", line 121, in start
self._popen = self._Popen(self)
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\multiprocessing\context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\multiprocessing\context.py", line 327, in _Popen
return Popen(process_obj)
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\multiprocessing\popen_spawn_win32.py", line 45, in __init__
prep_data = spawn.get_preparation_data(process_obj._name)
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\multiprocessing\spawn.py", line 154, in get_preparation_data
_check_not_importing_main()
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\multiprocessing\spawn.py", line 134, in _check_not_importing_main
raise RuntimeError('''
RuntimeError:
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\multiprocessing\spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\multiprocessing\spawn.py", line 125, in _main
prepare(preparation_data)
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\multiprocessing\spawn.py", line 236, in prepare
_fixup_main_from_path(data['init_main_from_path'])
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\multiprocessing\spawn.py", line 287, in _fixup_main_from_path
main_content = runpy.run_path(main_path,
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\runpy.py", line 265, in run_path
return _run_module_code(code, init_globals, run_name,
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\runpy.py", line 97, in _run_module_code
_run_code(code, mod_globals, init_globals,
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\runpy.py", line 87, in _run_code
exec(code, run_globals)
File "C:\Users\Username\PycharmProjects\Duplicate\main.py", line 10, in <module>
df_final = pandas_dedupe.dedupe_dataframe(df,['Name'])
File "C:\Users\Username\AppData\Roaming\Python\Python38\site-packages\pandas_dedupe\dedupe_dataframe.py", line 249, in dedupe_dataframe
clustered_df = _cluster(deduper, data_d, threshold, canonicalize)
File "C:\Users\Username\AppData\Roaming\Python\Python38\site-packages\pandas_dedupe\dedupe_dataframe.py", line 143, in _cluster
clustered_dupes = deduper.partition(data, threshold)
File "C:\Users\Username\AppData\Roaming\Python\Python38\site-packages\dedupe\api.py", line 170, in partition
pair_scores = self.score(pairs)
File "C:\Users\Username\AppData\Roaming\Python\Python38\site-packages\dedupe\api.py", line 108, in score
raise RuntimeError('''
RuntimeError:
You need to either turn off multiprocessing or protect
the calls to the Dedupe methods with a
`if __name__ == '__main__'` in your main module, see
https://docs.python.org/3/library/multiprocessing.html#the-spawn-and-forkserver-start-methods
Traceback (most recent call last):
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\shutil.py", line 616, in _rmtree_unsafe
os.unlink(fullname)
PermissionError: [WinError 32] The process cannot access the file because it is being used by another process: 'C:\\Users\\USERNAME~1.KAB\\AppData\\Local\\Temp\\tmpp9123_pc\\blocks.db'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\tempfile.py", line 802, in onerror
_os.unlink(path)
PermissionError: [WinError 32] The process cannot access the file because it is being used by another process: 'C:\\Users\\USERNAME~1.KAB\\AppData\\Local\\Temp\\tmpp9123_pc\\blocks.db'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\weakref.py", line 642, in _exitfunc
f()
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\weakref.py", line 566, in __call__
return info.func(*info.args, **(info.kwargs or {}))
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\tempfile.py", line 817, in _cleanup
cls._rmtree(name)
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\tempfile.py", line 813, in _rmtree
_shutil.rmtree(name, onerror=onerror)
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\shutil.py", line 740, in rmtree
return _rmtree_unsafe(path, onerror)
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\shutil.py", line 618, in _rmtree_unsafe
onerror(os.unlink, fullname, sys.exc_info())
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\tempfile.py", line 805, in onerror
cls._rmtree(path)
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\tempfile.py", line 813, in _rmtree
_shutil.rmtree(name, onerror=onerror)
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\shutil.py", line 740, in rmtree
return _rmtree_unsafe(path, onerror)
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\shutil.py", line 599, in _rmtree_unsafe
onerror(os.scandir, path, sys.exc_info())
File "C:\Users\Username\AppData\Local\Programs\Python\Python38\lib\shutil.py", line 596, in _rmtree_unsafe
with os.scandir(path) as scandir_it:
NotADirectoryError: [WinError 267] The directory name is invalid: 'C:\\Users\\USERNAME~1.KAB\\AppData\\Local\\Temp\\tmpp9123_pc\\blocks.db'
Process finished with exit code -1
The answer is in the error:
You need to either turn off multiprocessing or protect the calls to the Dedupe methods with a if __name__ == '__main__' in your main module
Change your code to the following, and try again:
import pandas as pd
import pandas_dedupe
if __name__ == "__main__":
#loading data
df = pd.read_csv('duplicate.csv')
#deduplication process
df_final = pandas_dedupe.dedupe_dataframe(df,['Name'])
#save to csv
df_final.to_csv('cleansed_output.csv')

Multiprocessing with Process and Error: TypeError: can't pickle _thread.lock objects

I just can't get any further on a problem and I don't understand why. I do multiprocessing which also works great in a mini example. But when I build it into the "big" code it doesn't work anymore. Maybe someone could explain the mistake to me "easier" or tell me what I am doing wrong?
Blabla Errorlog...
...
File "C:\Users\XXX\eclipse-workspace\ADAScarfs_v1.4\fem\_ModelGenerator.py", line 333, in _generateNodeObj
nodesAdh.start()
File "C:\Anaconda3\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\Anaconda3\lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Anaconda3\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "C:\Anaconda3\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
reduction.dump(process_obj, to_child)
File "C:\Anaconda3\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: can't pickle _thread.lock objects
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Anaconda3\lib\multiprocessing\spawn.py", line 105, in spawn_main
exitcode = _main(fd)
File "C:\Anaconda3\lib\multiprocessing\spawn.py", line 115, in _main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
The code where the error occours:
nodesAdh = multiprocessing.Process(target=myScarfMeshR3D.generateAdhesiveNodesObject, args=(_sectionAdh,))
nodesAdh.start()

Python inputs library - 'NoneType' object has no attribute 'terminate'

I am trying to use the inputs library to get user input from mice, gamepads, and keyboards.
I tried the following code which is supposed to read events from all devices:
import inputs
while True:
for device in inputs.devices:
for event in device.read():
print(event)
There is a problem when I run the code - I get the following error: AttributeError: 'NoneType' object has no attribute 'terminate'
I have also tried to read a single event:
import inputs
while True:
for device in inputs.devices:
event = device.read()
print(event)
This gives me the same error.
I am using Python3.6 and inputs==0.4 from pip
Does anyone know how to fix this error?
Full traceback:
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\python36\lib\multiprocessing\spawn.py", line 105, in spawn_main
exitcode = _main(fd)
File "C:\python36\lib\multiprocessing\spawn.py", line 114, in _main
prepare(preparation_data)
File "C:\python36\lib\multiprocessing\spawn.py", line 225, in prepare
_fixup_main_from_path(data['init_main_from_path'])
File "C:\python36\lib\multiprocessing\spawn.py", line 277, in _fixup_main_from_path
run_name="__mp_main__")
File "C:\python36\lib\runpy.py", line 263, in run_path
pkg_name=pkg_name, script_name=fname)
File "C:\python36\lib\runpy.py", line 96, in _run_module_code
mod_name, mod_spec, pkg_name, script_name)
File "C:\python36\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\Users\David\Documents\GitHub\Bubbles\testing.py", line 5, in <module>
event = device.read()
File "C:\python36\lib\site-packages\inputs.py", line 2313, in read
return next(iter(self))
File "C:\python36\lib\site-packages\inputs.py", line 2273, in __iter__
event = self._do_iter()
File "C:\python36\lib\site-packages\inputs.py", line 2292, in _do_iter
data = self._get_data(read_size)
File "C:\python36\lib\site-packages\inputs.py", line 2365, in _get_data
return self._pipe.recv_bytes()
File "C:\python36\lib\site-packages\inputs.py", line 2330, in _pipe
self._listener.start()
File "C:\python36\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\python36\lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\python36\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "C:\python36\lib\multiprocessing\popen_spawn_win32.py", line 33, in __init__
prep_data = spawn.get_preparation_data(process_obj._name)
File "C:\python36\lib\multiprocessing\spawn.py", line 143, in get_preparation_data
_check_not_importing_main()
File "C:\python36\lib\multiprocessing\spawn.py", line 136, in _check_not_importing_main
is not going to be frozen to produce an executable.''')
RuntimeError:
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.
Exception ignored in: <bound method InputDevice.__del__ of inputs.Keyboard("/dev/input/by-id/usb-A_Nice_Keyboard-event-kbd")>
Traceback (most recent call last):
File "C:\python36\lib\site-packages\inputs.py", line 2337, in __del__
File "C:\python36\lib\multiprocessing\process.py", line 116, in terminate
AttributeError: 'NoneType' object has no attribute 'terminate'
Define your steps under a method like:
def func():
while True:
for device in inputs.devices:
event = device.read()
print(event)
And then call your function using:
if __name__ == '__main__':
func()

Categories

Resources