I am trying to mock data1 and data2 and trying to provide a return value.
I have the following code:
import pandas
def main():
data1= pandas.read_excel('path1')
data2= pandas.read_excel('path2')
if __name__ == '__main__':
main()
import test1
from unittest.mock import patch
import pandas
class Testdata(unittest.TestCase):
#patch('test1.main.data1')
#patch('test1.main.data2')
def test_main(self, mock_data1, mock_data2):
mock_data1.return_value = pandas.DataFrame([some dataframe])
mock_data2.return_value = pandas.DataFrame([some dataframe])
test.main()
data1.assert_called_once()
data2.assert_called_once()
if __name__ == '__main__':
unittest.main()
I am getting the following error:
Error
Traceback (most recent call last):
File "C:\apps\python\3.6.2\lib\unittest\case.py", line 59, in testPartExecutor
yield
File "C:\apps\python\3.6.2\lib\unittest\case.py", line 605, in run
testMethod()
File "C:\apps\python\3.6.2\lib\unittest\mock.py", line 1171, in patched
arg = patching.__enter__()
File "C:\apps\python\3.6.2\lib\unittest\mock.py", line 1227, in __enter__
self.target = self.getter()
File "C:\apps\python\3.6.2\lib\unittest\mock.py", line 1397, in <lambda>
getter = lambda: _importer(target)
File "C:\apps\python\3.6.2\lib\unittest\mock.py", line 1080, in _importer
thing = __import__(import_path)
ModuleNotFoundError: No module named 'main'
How do I resolve this issue and how to mock data1 and data2 and provide a return value to it?
Can't say much before looking at the full-code but I thing adding import unitest in the starting shall do the job.
Related
I currently writting unittests. And I keep getting in this Problem:
I want to mock a function from an external class. When I run the foo.py every thing works great.
But I can't explain why this mocking isn't working.
Can please someone tell me whats I am doing wrong?
test.py
import unittest
import mock
from parentfolder.foo import Foo
class testFoo(unittest.TestCase):
#mock.patch('test.Handler.get_value', return_value='client')
def test_verify_client(self, mock1):
foo = Foo()
result = foo.verify_client()
self.assertTrue(result)
#mock.patch('handler.Handler.get_out', return_value='client')
def test_return_out0(self, mock1):
foo = Foo()
result = foo.return_out()
self.assertEqual(result, "This is function ouput")
if __name__ == "__main__":
unittest.main()
Error
Ran 2 tests in 0.006s
FAILED (errors=2)
Error
Traceback (most recent call last):
File "D:\dev\test_unittest\lib\site-packages\mock\mock.py", line 1343, in patched
with self.decoration_helper(patched,
File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.9_3.9.3568.0_x64__qbz5n2kfra8p0\lib\contextlib.py", line 119, in __enter__
return next(self.gen)
File "D:\dev\test_unittest\lib\site-packages\mock\mock.py", line 1325, in decoration_helper
arg = exit_stack.enter_context(patching)
File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.9_3.9.3568.0_x64__qbz5n2kfra8p0\lib\contextlib.py", line 448, in enter_context
result = _cm_type.__enter__(cm)
File "D:\dev\test_unittest\lib\site-packages\mock\mock.py", line 1398, in __enter__
self.target = self.getter()
File "D:\dev\test_unittest\lib\site-packages\mock\mock.py", line 1573, in <lambda>
getter = lambda: _importer(target)
File "D:\dev\test_unittest\lib\site-packages\mock\mock.py", line 1245, in _importer
thing = __import__(import_path)
ModuleNotFoundError: No module named 'Handler'
foo.py
from parentfolder.handler import Handler
class Foo:
def __init__(self):
self.handler = Handler()
def verify_client(self):
client = self.handler.get_value('client')
return client == 'client'
def return_out(self):
c = self.handler.get_out()
print(c)
return c
if __name__ == '__main__':
foo = Foo()
re = foo.verify_client()
print(re)
re2 = foo.return_out()
print(re2)
handler.py
class Handler:
def get_value(self, value):
return value
def get_out(self):
return "This is function ouput"
Folder Structure
Code:
from aiohttp import web
from aiortc.mediastreams import MediaStreamTrack
from aiortc import RTCPeerConnection, RTCSessionDescription
from aiortc.contrib.media import MediaPlayer
import asyncio
import json
import os
from multiprocessing import Process, freeze_support
from queue import Queue
import sys
import threading
from time import sleep
import fractions
import time
class RadioServer(Process):
def __init__(self,q):
super().__init__()
self.q = q
self.ROOT = os.path.dirname(__file__)
self.pcs = []
self.channels = []
self.stream_offers = []
self.requests = []
def run(self):
self.app = web.Application()
self.app.on_shutdown.append(self.on_shutdown)
self.app.router.add_get("/", self.index)
self.app.router.add_get("/radio.js", self.javascript)
self.app.router.add_get("/jquery-3.5.1.min.js", self.jquery)
self.app.router.add_post("/offer", self.offer)
threading.Thread(target=self.fill_the_queues).start()
web.run_app(self.app, access_log=None, host="192.168.1.20", port="8080", ssl_context=None)
def fill_the_queues(self):
while(True):
frame = self.q.get()
for stream_offer in self.stream_offers:
stream_offer.q.put(frame)
async def index(self,request):
content = open(os.path.join(self.ROOT, "index.html"), encoding="utf8").read()
return web.Response(content_type="text/html", text=content)
async def javascript(self,request):
content = open(os.path.join(self.ROOT, "radio.js"), encoding="utf8").read()
return web.Response(content_type="application/javascript", text=content)
async def jquery(self,request):
content = open(os.path.join(self.ROOT, "jquery-3.5.1.min.js"), encoding="utf8").read()
return web.Response(content_type="application/javascript", text=content)
async def offer(self,request):
params = await request.json()
offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"])
pc = RTCPeerConnection()
self.pcs.append(pc)
self.requests.append(request)
# prepare epalxeis media
self.stream_offers.append(CustomRadioStream())
pc.addTrack(self.stream_offers[-1])
#pc.on("iceconnectionstatechange")
async def on_iceconnectionstatechange():
if pc.iceConnectionState == "failed":
self.pcs.remove(pc)
self.requests.remove(request)
print(str(request.remote)+" disconnected from radio server")
print("Current peer connections:"+str(len(self.pcs)))
# handle offer
await pc.setRemoteDescription(offer)
# send answer
answer = await pc.createAnswer()
await pc.setLocalDescription(answer)
return web.Response(content_type="application/json",text=json.dumps({"sdp": pc.localDescription.sdp, "type": pc.localDescription.type}))
async def on_shutdown(self,app):
# close peer connections
if self.pcs:
coros = [pc.close() for pc in self.pcs]
await asyncio.gather(*coros)
self.pcs = []
self.channels = []
self.stream_offers = []
"""
some other classes here such as CustomRadioStream and RadioOutputStream
"""
if __name__ == "__main__":
freeze_support()
q = Queue()
custom_server_child_process = RadioServer(q)
custom_server_child_process.start()
Error
Traceback (most recent call last):
File "123.py", line 106, in <module>
custom_server_child_process.start()
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/process.py", line 121, i
n start
self._popen = self._Popen(self)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/context.py", line 224, i
n _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/context.py", line 327, i
n _Popen
return Popen(process_obj)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/popen_spawn_win32.py", l
ine 93, in __init__
reduction.dump(process_obj, to_child)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/reduction.py", line 60,
in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot pickle '_thread.lock' object
What I am doing wrong?
If I call the run function (instead of start) directly, then there is no problem, but i want to use processing for this class.
Edit: Ok with multiprocessing.Queue works fine but now with similar code there is this error:
$ python "Papinhio_player.py"
Traceback (most recent call last):
File "Papinhio_player.py", line 3078, in <module>
program = PapinhioPlayerCode()
File "Papinhio_player.py", line 250, in __init__
self.manage_decks_instance = Manage_Decks(self)
File "C:\python\scripts\Papinhio player\src\main\python_files/manage_decks.py"
, line 356, in __init__
self.custom_server_child_process.start()
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/process.py", line 121, i
n start
self._popen = self._Popen(self)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/context.py", line 224, i
n _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/context.py", line 327, i
n _Popen
return Popen(process_obj)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/popen_spawn_win32.py", l
ine 93, in __init__
reduction.dump(process_obj, to_child)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/reduction.py", line 60,
in dump
ForkingPickler(file, protocol).dump(obj)
File "stringsource", line 2, in av.audio.codeccontext.AudioCodecContext.__redu
ce_cython__
TypeError: self.parser,self.ptr cannot be converted to a Python object for pickl
ing
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/spawn.py", line 116, in
spawn_main
exitcode = _main(fd, parent_sentinel)
File "C:/msys64/mingw64/lib/python3.8/multiprocessing/spawn.py", line 126, in
_main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
Some objects cannot be serialized then unserialized.
The stack trace you posted mentions :
TypeError: cannot pickle '_thread.lock' object
a lock, which holds a state in memory and gives guarantees that no other process can own the same lock at the same moment, is typically a very bad candidate for this operation -- what should be created when you deserialize it ?
To fix this : choose a way to select the relevant fields of the object you want to serialize, and pickle/unpickle that part.
I'm trying to write a program to take screen cap using a global hot key. Below is the corresponding code:
from datetime import datetime
import os
from pynput import keyboard
import pyautogui
import pathlib
def on_activate():
today = datetime.now()
d = today.strftime("%Y-%m-%d-%H-%M-%S")
myScreenshot = pyautogui.screenshot(region=(200,200, 1720, 800))
time_stamp = '{:%y%d%m}'.format(today)
fpath = pathlib.Path("C:","Users","Desktop","TestScreenCap", time_stamp)
if not os.path.exists(fpath):
os.makedirs(fpath)
myScreenshot.save(pathlib.Path(fpath,d,".jpg"))
def for_canonical(f):
return lambda k: f(l.canonical(k))
hotkey = keyboard.HotKey(
keyboard.HotKey.parse('<ctrl>+<alt>+h'),
on_activate)
with keyboard.Listener(
on_press=for_canonical(hotkey.press),
on_release=for_canonical(hotkey.release)) as l:
l.join()
Error Message:
Unhandled exception in listener callback
Traceback (most recent call last):
File "C:\Users\PycharmProjects\untitled1\venv\lib\site-packages\PIL\Image.py", line 2138, in save format = EXTENSION[ext]
KeyError: ''
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:\Users\PycharmProjects\untitled1\venv\lib\site-packages\pynput\_util\__init__.py", line 211, in inner
return f(self, *args, **kwargs)
File "C:\Users\PycharmProjects\untitled1\venv\lib\site-packages\pynput\keyboard\_win32.py", line 280, in _process self.on_press(key)
File "C:\Users\PycharmProjects\untitled1\venv\lib\site-packages\pynput\_util\__init__.py", line 127, in inner
if f(*args) is False:
File "C:/Users/PycharmProjects/untitled1/123.py", line 18, in <lambda>
return lambda k: f(l.canonical(k))
File "C:\Users\PycharmProjects\untitled1\venv\lib\site-packages\pynput\keyboard\__init__.py", line 182, in press
self._on_activate()
File "C:/Users/PycharmProjects/untitled1/123.py", line 15, in on_activate
myScreenshot.save(pathlib.Path(fpath,d,".jpg"))
File "C:\Users\PycharmProjects\untitled1\venv\lib\site-packages\PIL\Image.py", line 2140, in save
raise ValueError("unknown file extension: {}".format(ext)) from e
ValueError: unknown file extension:
Traceback (most recent call last):
File "C:\Users\PycharmProjects\untitled1\venv\lib\site-packages\PIL\Image.py", line 2138, in save
format = EXTENSION[ext]
KeyError: ''
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:/Users/PycharmProjects/untitled1/123.py", line 26, in <module>
l.join()
File "C:\Users\PycharmProjects\untitled1\venv\lib\site-packages\pynput\_util\__init__.py", line 259, in join
six.reraise(exc_type, exc_value, exc_traceback)
File "C:\Users\PycharmProjects\untitled1\venv\lib\site-packages\six.py", line 702, in reraise
raise value.with_traceback(tb)
File "C:\Users\PycharmProjects\untitled1\venv\lib\site-packages\pynput\_util\__init__.py", line 211, in inner
return f(self, *args, **kwargs)
File "C:\Users\PycharmProjects\untitled1\venv\lib\site-packages\pynput\keyboard\_win32.py", line 280, in _process
self.on_press(key)
File "C:\Users\PycharmProjects\untitled1\venv\lib\site-packages\pynput\_util\__init__.py", line 127, in inner
if f(*args) is False:
File "C:/Users/PycharmProjects/untitled1/123.py", line 18, in <lambda>
return lambda k: f(l.canonical(k))
File "C:\Users\PycharmProjects\untitled1\venv\lib\site-packages\pynput\keyboard\__init__.py", line 182, in press
self._on_activate()
File "C:/Users/PycharmProjects/untitled1/123.py", line 15, in on_activate
myScreenshot.save(pathlib.Path(fpath,d,".jpg"))
File "C:\Users\PycharmProjects\untitled1\venv\lib\site-packages\PIL\Image.py", line 2140, in save
raise ValueError("unknown file extension: {}".format(ext)) from e
ValueError: unknown file extension:
I do not know how to save the image to the folder. I expect the code below will not work this:
myScreenshot.save(pathlib.Path(fpath,d,".jpg"))
Changing the function to on_activate with a simple action such as:
Print("Hello")
The hotkey script works perfectly.
I am new to programming, so any help is appreciated!
Here the fixed code:
from datetime import datetime
import os
from pynput import keyboard
import pyautogui
import pathlib
def on_activate():
today = datetime.now()
d = today.strftime("%Y-%m-%d-%H-%M-%S")
myScreenshot = pyautogui.screenshot(region=(200,200, 1720, 800))
time_stamp = '{:%y%d%m}'.format(today)
fpath = pathlib.Path("C:\\","Users","Desktop","TestScreenCap", time_stamp)
if not os.path.exists(fpath):
os.makedirs(fpath)
myScreenshot.save(pathlib.Path(fpath,d + ".jpg"))
def for_canonical(f):
return lambda k: f(l.canonical(k))
hotkey = keyboard.HotKey(
keyboard.HotKey.parse('<ctrl>+<alt>+h'),
on_activate)
with keyboard.Listener(
on_press=for_canonical(hotkey.press),
on_release=for_canonical(hotkey.release)) as l:
l.join()
The problem was that you were using:
myScreenshot.save(pathlib.Path(fpath,d,".jpg"))
The function pathlib.Path(fpath,d,".jpg") join the string adding the \ to the end of each, so the path where you are trying to save the file is:
C:Users\Desktop\TestScreenCap\200309\2020-09-03-16-50-20\.jpg
As you can see the filename is the extension, that throw the exception.
Just replace with this: myScreenshot.save(pathlib.Path(fpath,d + ".jpg"))
Another problem on the specified path is that C: is interpreted as Drive, so when you save the function, is saved to the path where your program is currently running, not at the real specified path. To fix this part, just replace C: with C:\\.
Reading your code I think you would like to save the screenshot in the current user Desktop, if so replace the line: fpath = pathlib.Path("C:\\","Users","Desktop","TestScreenCap", time_stamp) with fpath = pathlib.Path(os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop'), time_stamp) and just add on the top the import os. With this you don't need to care about the right generation for the path.
I am trying out AWS Neptune for the first time using Chalice.
This is the entire error
Traceback (most recent call last):
File "/var/task/chalice/app.py", line 1104, in _get_view_function_response
response = view_function(**function_args)
File "/var/task/app.py", line 44, in getPosts
raise e
File "/var/task/app.py", line 37, in getPosts
result = g.V().has('name', 'test1').toList()
File "/var/task/gremlin_python/process/traversal.py", line 58, in toList
return list(iter(self))
File "/var/task/gremlin_python/process/traversal.py", line 48, in __next__
self.traversal_strategies.apply_strategies(self)
File "/var/task/gremlin_python/process/traversal.py", line 573, in apply_strategies
traversal_strategy.apply(traversal)
File "/var/task/gremlin_python/driver/remote_connection.py", line 149, in apply
remote_traversal = self.remote_connection.submit(traversal.bytecode)
File "/var/task/gremlin_python/driver/driver_remote_connection.py", line 55, in submit
result_set = self._client.submit(bytecode)
File "/var/task/gremlin_python/driver/client.py", line 111, in submit
return self.submitAsync(message, bindings=bindings).result()
File "/var/task/gremlin_python/driver/client.py", line 127, in submitAsync
return conn.write(message)
File "/var/task/gremlin_python/driver/connection.py", line 55, in write
self.connect()
File "/var/task/gremlin_python/driver/connection.py", line 45, in connect
self._transport.connect(self._url, self._headers)
File "/var/task/gremlin_python/driver/tornado/transport.py", line 36, in connect
lambda: websocket.websocket_connect(url))
File "/var/task/tornado/ioloop.py", line 576, in run_sync
return future_cell[0].result()
tornado.simple_httpclient.HTTPStreamClosedError: Stream closed
and here is my code
import logging
from chalice import Chalice, BadRequestError, NotFoundError
from gremlin_python import statics
from gremlin_python.structure.graph import Graph
from gremlin_python.process.graph_traversal import __
from gremlin_python.process.strategies import *
from gremlin_python.process.traversal import T, P, Operator
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
from datetime import datetime
app = Chalice(app_name='chalice-neptune')
app.debug = True
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
def setup_graph():
try:
graph = Graph()
connstring = 'ws://NEPTUNE-ENDPOINT-HERE:8182/gremlin'
g = graph.traversal().withRemote(DriverRemoteConnection(connstring, 'g'))
logging.info('Connected to Neptune')
except Exception as e:
logging.error(e, exc_info = True)
raise BadRequestError("Could not connect to Neptune")
return g
#app.route('/getPosts')
def getPosts():
g = setup_graph()
try:
result = g.V().has('name', 'test1').toList()
response = {
'status_code': 200,
'data': result
}
except Exception as e:
raise e
return response
Any one who have tried this?
I have followed the example found in this bucket gremlin-python-example
I know I have not missed anything from the example but it is still throwing stream closed error.
Apparently the only thing I changed was my connection string and it is now working fine.
connstring = 'wss://NEPTUNE-ENDPOINT-HERE:8182/gremlin'
I changed it from ws to wss.
As to the difference between the two you can refer to this answer
Difference between ws and wss?
I have a python object - list of dictionaries which I want to fill with key-value pairs in each of those dicts but simultaneously using multiple processors and using the multiprocessing module in python. For that purpose I am using the Manager module for storing that python object. Here is the following code:
from pylab import *
from numpy.random import *
import multiprocessing
import threading
import random
def tasks_start(id, global_lists):
counter_lock = threading.Lock()
with counter_lock:
num = int(10*random.random())
global_lists[num] = {'1':'Random'}
print("Id: ", id)
print(global_lists[0])
if __name__ == '__main__':
numProcessors = 6
pool = multiprocessing.Pool(numProcessors)
global_list = multiprocessing.Manager().list(range(100))
for idx in range(100):
global_list[idx] = multiprocessing.Manager().dict()
tasks = []
for id in range(10):
tasks.append((id, global_list))
pool.starmap(tasks_start, tasks)
pool.close()
pool.join()
So what I am doing here is creating a list of dictionaries stored as global_list and then calling the tasks_start() method 10 times using the python's starmap() module (just so that I can later extend to multiple arguments) to fill the list of dictionaries. As a simple test case, I just use the random generator to randomly pick up one dictionary among the lists everytime and fill it with some value. When I run the program, the following error occurs:
multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "/usr/lib/python3.4/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/usr/lib/python3.4/multiprocessing/pool.py", line 47, in starmapstar
return list(itertools.starmap(args[0], args[1]))
File "/home/cysis/inhibition_soum/motif_temporal_patterns/code_versions/2016/09/09_08/parallel_test/test_error_manager.py", line 14, in tasks_start
print(global_lists[0])
File "<string>", line 2, in __getitem__
File "/usr/lib/python3.4/multiprocessing/managers.py", line 732, in _callmethod
kind, result = conn.recv()
File "/usr/lib/python3.4/multiprocessing/connection.py", line 251, in recv
return ForkingPickler.loads(buf.getbuffer())
File "/usr/lib/python3.4/multiprocessing/managers.py", line 852, in RebuildProxy
return func(token, serializer, incref=incref, **kwds)
File "/usr/lib/python3.4/multiprocessing/managers.py", line 706, in __init__
self._incref()
File "/usr/lib/python3.4/multiprocessing/managers.py", line 756, in _incref
conn = self._Client(self._token.address, authkey=self._authkey)
File "/usr/lib/python3.4/multiprocessing/connection.py", line 495, in Client
c = SocketClient(address)
File "/usr/lib/python3.4/multiprocessing/connection.py", line 624, in SocketClient
s.connect(address)
FileNotFoundError: [Errno 2] No such file or directory
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/cysis/inhibition_soum/motif_temporal_patterns/code_versions/2016/09/09_08/parallel_test/test_error_manager.py", line 29, in <module>
pool.starmap(tasks_start, tasks)
File "/usr/lib/python3.4/multiprocessing/pool.py", line 268, in starmap
return self._map_async(func, iterable, starmapstar, chunksize).get()
File "/usr/lib/python3.4/multiprocessing/pool.py", line 599, in get
raise self._value
FileNotFoundError: [Errno 2] No such file or directory
In my opinion before the last print(global_lists[0) is executed, the Manager exits and therefore is not able to find global_lists[0]. Can anybody shed some light on this sort of stuff?