I have this function:
def function_to_test(..):
# some stuff
response = requests.post("some.url", data={'dome': 'data'})
# some stuff with response
I want to make a test, but mocking requests.post("some.url", data={'dome': 'data'}) because I know it works. I need something like:
def my tets():
patch('requests.post', Mock(return_value={'some': 'values'})).start()
# test for *function_to_test*
Is that possible? If so, how?
Edited
I found these ways:
class MockObject(object):
status_code = 200
content = {'some': 'data'}
# First way
patch('requests.post', Mock(return_value=MockObject())).start()
# Second way
import requests
requests.post = Mock(return_value=MockObject())
Are these approaches good? which one is better? another one?
You can use HTTPretty(https://github.com/gabrielfalcao/HTTPretty), a library made just for that kind of mocks.
you can use flexmock for that. see the example below
Say you have function in file ./myfunc.py
# ./myfunc.py
import requests
def function_to_test(..):
# some stuff
response = requests.post("www.google.com", data={'dome': 'data'})
return response
Then the testcase is in ./test_myfunc.py
# ./test_myfunc.py
import flexmock as flexmock
import myfunc
def test_myfunc():
(flexmock(myfunc.requests).should_recieve("post").with_args("www.google.com", data={"dome":
"data"}).and_return({'some': 'values'}))
resp = myfunc.function_to_test()
assert resp["some"] == "values"
Try this, see if this works or let me know for more enhancement/improvement.
After continue testing and testing I prefer to use this to mock requests. Hope it helps others.
class MockObject(object):
status_code = 200
content = {'some': 'data'}
patch('requests.post', Mock(return_value=MockObject())).start()
Related
How can I mock or unit test a function/method that uses urllib.request.urlretrieve to save a file?
This is the part of code the I'm trying to to test:
from urllib import request
from config import cvs_site, proxy
class Collector(object):
"""Class Collector"""
...
def __init__(self, code_num=""):
self.code_num = sec_id.upper()
self.csv_file = "csv_files/code_num.csv"
# load proxy if it is configured
if proxy:
proxies = {"http": proxy, "https": proxy, "ftp": proxy}
proxy_connect = request.ProxyHandler(proxies)
opener = request.build_opener(proxy_connect)
request.install_opener(opener)
def _collect_data():
try:
print("\nAccessing to retrieve CVS informations.")
request.urlretrieve(cvs_site, self.cvs_file)
except error.URLError as e:
exit("\033[1;31m[ERROR]\033[1;00m {0}\n".format(e))
...
def some_function(self):
_collect_data()
...
Should I test all internal functions (_functions())?
How to mock it?
To solve it I did some modifications on my code, to then created the test with mock.
REMARK: I'm still learning unit tests and mock, any new comments here is good because I'm not confident that I went to correct way :)
the function _collect_data() there is not necessities to be inside of __init__(), then I moved it to outside.
the _collect_data is a function with specific action, save the file, but need to return something to able to mock works with this.
the argument was moved from Class to function
The code new looks like it:
from config import proxy
from config import cvs_site
from urllib import request
class Collector(object):
"""Class Collector"""
def __init__(self):
self.csv_file = "csv_files/code_num.csv"
# load proxy if it is configured
if proxy:
proxies = {"http": proxy, "https": proxy, "ftp": proxy}
proxy_connect = request.ProxyHandler(proxies)
opener = request.build_opener(proxy_connect)
request.install_opener(opener)
def _collect_data():
try:
print("\nAccessing to retrieve CVS informations.")
return request.urlretrieve(cvs_site, self.cvs_file)
except error.URLError as e:
return "\033[1;31m[ERROR]\033[1;00m {0}\n".format(e)
...
def some_function(self, code_num=""):
code_num = code_num.upper()
self._collect_data()
...
To test this code I create this:
import unittest
import mock
from saassist.datacollector import Collector
class TestCollector(unittest.TestCase):
def setUp(self):
self.apar_test = Collector()
#mock.patch("saassist.datacollector.request")
def test_collect_data(self, mock_collect_data):
mock_collect_data.urlretrieve.return_value = "File Collected OK"
self.assertEqual("File Collected OK", self.apar_test._collect_data())
Well, I don't know which more could be tested for it, but to start I think it is good :)
I am trying to GET a URL using Python and the response is JSON. However, when I run
import urllib2
response = urllib2.urlopen('https://api.instagram.com/v1/tags/pizza/media/XXXXXX')
html=response.read()
print html
The html is of type str and I am expecting a JSON. Is there any way I can capture the response as JSON or a python dictionary instead of a str.
If the URL is returning valid JSON-encoded data, use the json library to decode that:
import urllib2
import json
response = urllib2.urlopen('https://api.instagram.com/v1/tags/pizza/media/XXXXXX')
data = json.load(response)
print data
import json
import urllib
url = 'http://example.com/file.json'
r = urllib.request.urlopen(url)
data = json.loads(r.read().decode(r.info().get_param('charset') or 'utf-8'))
print(data)
urllib, for Python 3.4
HTTPMessage, returned by r.info()
"""
Return JSON to webpage
Adding to wonderful answer by #Sanal
For Django 3.4
Adding a working url that returns a json (Source: http://www.jsontest.com/#echo)
"""
import json
import urllib
url = 'http://echo.jsontest.com/insert-key-here/insert-value-here/key/value'
respons = urllib.request.urlopen(url)
data = json.loads(respons.read().decode(respons.info().get_param('charset') or 'utf-8'))
return HttpResponse(json.dumps(data), content_type="application/json")
Be careful about the validation and etc, but the straight solution is this:
import json
the_dict = json.load(response)
resource_url = 'http://localhost:8080/service/'
response = json.loads(urllib2.urlopen(resource_url).read())
Python 3 standard library one-liner:
load(urlopen(url))
# imports (place these above the code before running it)
from json import load
from urllib.request import urlopen
url = 'https://jsonplaceholder.typicode.com/todos/1'
you can also get json by using requests as below:
import requests
r = requests.get('http://yoursite.com/your-json-pfile.json')
json_response = r.json()
Though I guess it has already answered I would like to add my little bit in this
import json
import urllib2
class Website(object):
def __init__(self,name):
self.name = name
def dump(self):
self.data= urllib2.urlopen(self.name)
return self.data
def convJSON(self):
data= json.load(self.dump())
print data
domain = Website("https://example.com")
domain.convJSON()
Note : object passed to json.load() should support .read() , therefore urllib2.urlopen(self.name).read() would not work .
Doamin passed should be provided with protocol in this case http
This is another simpler solution to your question
pd.read_json(data)
where data is the str output from the following code
response = urlopen("https://data.nasa.gov/resource/y77d-th95.json")
json_data = response.read().decode('utf-8', 'replace')
None of the provided examples on here worked for me. They were either for Python 2 (uurllib2) or those for Python 3 return the error "ImportError: No module named request". I google the error message and it apparently requires me to install a the module - which is obviously unacceptable for such a simple task.
This code worked for me:
import json,urllib
data = urllib.urlopen("https://api.github.com/users?since=0").read()
d = json.loads(data)
print (d)
I am trying to GET a URL using Python and the response is JSON. However, when I run
import urllib2
response = urllib2.urlopen('https://api.instagram.com/v1/tags/pizza/media/XXXXXX')
html=response.read()
print html
The html is of type str and I am expecting a JSON. Is there any way I can capture the response as JSON or a python dictionary instead of a str.
If the URL is returning valid JSON-encoded data, use the json library to decode that:
import urllib2
import json
response = urllib2.urlopen('https://api.instagram.com/v1/tags/pizza/media/XXXXXX')
data = json.load(response)
print data
import json
import urllib
url = 'http://example.com/file.json'
r = urllib.request.urlopen(url)
data = json.loads(r.read().decode(r.info().get_param('charset') or 'utf-8'))
print(data)
urllib, for Python 3.4
HTTPMessage, returned by r.info()
"""
Return JSON to webpage
Adding to wonderful answer by #Sanal
For Django 3.4
Adding a working url that returns a json (Source: http://www.jsontest.com/#echo)
"""
import json
import urllib
url = 'http://echo.jsontest.com/insert-key-here/insert-value-here/key/value'
respons = urllib.request.urlopen(url)
data = json.loads(respons.read().decode(respons.info().get_param('charset') or 'utf-8'))
return HttpResponse(json.dumps(data), content_type="application/json")
Be careful about the validation and etc, but the straight solution is this:
import json
the_dict = json.load(response)
resource_url = 'http://localhost:8080/service/'
response = json.loads(urllib2.urlopen(resource_url).read())
Python 3 standard library one-liner:
load(urlopen(url))
# imports (place these above the code before running it)
from json import load
from urllib.request import urlopen
url = 'https://jsonplaceholder.typicode.com/todos/1'
you can also get json by using requests as below:
import requests
r = requests.get('http://yoursite.com/your-json-pfile.json')
json_response = r.json()
Though I guess it has already answered I would like to add my little bit in this
import json
import urllib2
class Website(object):
def __init__(self,name):
self.name = name
def dump(self):
self.data= urllib2.urlopen(self.name)
return self.data
def convJSON(self):
data= json.load(self.dump())
print data
domain = Website("https://example.com")
domain.convJSON()
Note : object passed to json.load() should support .read() , therefore urllib2.urlopen(self.name).read() would not work .
Doamin passed should be provided with protocol in this case http
This is another simpler solution to your question
pd.read_json(data)
where data is the str output from the following code
response = urlopen("https://data.nasa.gov/resource/y77d-th95.json")
json_data = response.read().decode('utf-8', 'replace')
None of the provided examples on here worked for me. They were either for Python 2 (uurllib2) or those for Python 3 return the error "ImportError: No module named request". I google the error message and it apparently requires me to install a the module - which is obviously unacceptable for such a simple task.
This code worked for me:
import json,urllib
data = urllib.urlopen("https://api.github.com/users?since=0").read()
d = json.loads(data)
print (d)
I'm want to test my web service (built on Tornado) using tornado.testing.AsyncHTTPTestCase. It says here that using POST for AsyncHttpClients should look like the following.
from tornado.testing import AsyncHTTPTestCase
from urllib import urlencode
class ApplicationTestCase(AsyncHTTPTestCase):
def get_app(self):
return app.Application()
def test_file_uploading(self):
url = '/'
filepath = 'uploading_file.zip' # Binary file
data = ??????? # Read from "filepath" and put the generated something into "data"
self.http_client.fetch(self.get_url(url),
self.stop,
method="POST",
data=urlencode(data))
response = self.wait()
self.assertEqual(response.code, 302) # Do assertion
if __name__ == '__main__':
unittest.main()
The problem is that I've no idea what to write at ???????. Are there any utility functions built in Tornado, or is it better to use alternative libraries like Requests?
P.S.
... actually, I've tried using Requests, but my test stopped working because probably I didn't do good for asynchronous tasking
def test_file_uploading(self):
url = '/'
filepath = 'uploading_file.zip' # Binary file
files = {'file':open(filepath,'rb')}
r = requests.post(self.get_url(url),files=files) # Freezes here
self.assertEqual(response.code, 302) # Do assertion
You need to construct a multipart/form-data request body. This is officially defined in the HTML spec. Tornado does not currently have any helper functions for generating a multipart body. However, you can use the MultipartEncoder class from the requests_toolbelt package. Just use the to_string() method instead of passing the encoder object directly to fetch().
Is there a cleaner way to modify some parts of a URL in Python 2?
For example
http://foo/bar -> http://foo/yah
At present, I'm doing this:
import urlparse
url = 'http://foo/bar'
# Modify path component of URL from 'bar' to 'yah'
# Use nasty convert-to-list hack due to urlparse.ParseResult being immutable
parts = list(urlparse.urlparse(url))
parts[2] = 'yah'
url = urlparse.urlunparse(parts)
Is there a cleaner solution?
Unfortunately, the documentation is out of date; the results produced by urlparse.urlparse() (and urlparse.urlsplit()) use a collections.namedtuple()-produced class as a base.
Don't turn this namedtuple into a list, but make use of the utility method provided for just this task:
parts = urlparse.urlparse(url)
parts = parts._replace(path='yah')
url = parts.geturl()
The namedtuple._replace() method lets you create a new copy with specific elements replaced. The ParseResult.geturl() method then re-joins the parts into a url for you.
Demo:
>>> import urlparse
>>> url = 'http://foo/bar'
>>> parts = urlparse.urlparse(url)
>>> parts = parts._replace(path='yah')
>>> parts.geturl()
'http://foo/yah'
mgilson filed a bug report (with patch) to address the documentation issue.
I guess the proper way to do it is this way.
As using _replace private methods or variables is not suggested.
from urlparse import urlparse, urlunparse
res = urlparse('http://www.goog.com:80/this/is/path/;param=paramval?q=val&foo=bar#hash')
l_res = list(res)
# this willhave ['http', 'www.goog.com:80', '/this/is/path/', 'param=paramval', 'q=val&foo=bar', 'hash']
l_res[2] = '/new/path'
urlunparse(l_res)
# outputs 'http://www.goog.com:80/new/path;param=paramval?q=val&foo=bar#hash'