SSL Error occurs on one computer but not the other? - python

I can't figure out why all of a sudden the below code that uses Asana's API generates the below SSL error. Something must have changed on my laptop, since it runs perfectly on my other computer.
from asana import asana
class Login(object):
def __init__(self):
api = 'API'
self.asana_api = asana.AsanaAPI(api, debug=False)
self.user_id = 7359085011308L
class Test(Login):
def Test(self):
Id = 2467584555313L
print self.asana_api.list_tasks(Id,self.user_id)
Traceback (most recent call last):
File "/Users/Chris/Dropbox/AsanaPullPush.py", line 75, in <module>
if __name__ == "__main__": main()
File "/Users/Chris/Dropbox/AsanaPullPush.py", line 72, in main
print Test().Test()
File "/Users/Chris/Dropbox/AsanaPullPush.py", line 15, in Test
print self.asana_api.list_tasks(Id,self.user_id)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/asana/asana.py", line 174, in list_tasks
return self._asana(target)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/asana/asana.py", line 74, in _asana
r = requests.get(target, auth=(self.apikey, ""))
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/requests/api.py", line 55, in get
return request('get', url, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/requests/api.py", line 44, in request
return session.request(method=method, url=url, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/requests/sessions.py", line 383, in request
resp = self.send(prep, **send_kwargs)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/requests/sessions.py", line 486, in send
r = adapter.send(request, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/requests/adapters.py", line 389, in send
raise SSLError(e)
requests.exceptions.SSLError: [Errno 1] _ssl.c:507: error:0D0890A1:asn1 encoding routines:ASN1_verify:unknown message digest algorithm

We recently changed our SSL key in response to the Heartbleed bug you may have heard about. http://blog.asana.com/2014/04/heartbleed/
It looks like your laptop may not have the right SSL. See https://github.com/pypa/pip/issues/829 for discussion of a similar issue.
You should be able to check SSL version on the two machines with python -c "import ssl; print ssl.OPENSSL_VERSION". If indeed the laptop is behind, you'll need to update your python's SSL.

Related

Download with Python from URL gives No Host Supplied error

I am trying to make app that download comics but whenever I try to download an image, it says no host supplied.
I really searched and there was nothing.
This is the code:
import requests,bs4
url='https://www.marvel.com/comics/issue/71314/edge_of_spider-geddon_2018_1'
res=requests.get(url,stream=True)
res.raise_for_status()
soup=bs4.BeautifulSoup(res.text)
elem=soup.select('div[class="row-item-image"] img')#.viewer-cnt .row .col-xs-12 #ppp img')
#print(elem)
comicurl='https:'+elem[0].get('src')
res=requests.get(comicurl,stream=True,allow_redirects=True)
res.raise_for_status()
with open(comicurl[comicurl.rfind('/')+1:],'wb') as i:
for chunk in res.iter_content(100000):
i.write(chunk)
I expect it to download the image but it gives me this error:
Traceback (most recent call last):
File "C:\Users\Islam\AppData\Local\Programs\Python\Python36\comicdownloader.py", line 10, in <module>
res=requests.get(comicurl,stream=True,allow_redirects=True)
File "C:\Users\Islam\AppData\Local\Programs\Python\Python36\lib\site-packages\requests\api.py", line 75, in get
return request('get', url, params=params, **kwargs)
File "C:\Users\Islam\AppData\Local\Programs\Python\Python36\lib\site-packages\requests\api.py", line 60, in request
return session.request(method=method, url=url, **kwargs)
File "C:\Users\Islam\AppData\Local\Programs\Python\Python36\lib\site-packages\requests\sessions.py", line 519, in request
prep = self.prepare_request(req)
File "C:\Users\Islam\AppData\Local\Programs\Python\Python36\lib\site-packages\requests\sessions.py", line 462, in prepare_request
hooks=merge_hooks(request.hooks, self.hooks),
File "C:\Users\Islam\AppData\Local\Programs\Python\Python36\lib\site-packages\requests\models.py", line 313, in prepare
self.prepare_url(url, params)
File "C:\Users\Islam\AppData\Local\Programs\Python\Python36\lib\site-packages\requests\models.py", line 390, in prepare_url
raise InvalidURL("Invalid URL %r: No host supplied" % url)
requests.exceptions.InvalidURL: Invalid URL 'https:https://i.annihil.us/u/prod/marvel/i/mg/6/b0/5b6c5e4154f75/portrait_uncanny.jpg': No host supplied
And it gives it to me whenever I try it on any website.
it looks like elem[0].get('src') evaluates to https://i.annihil.us/u/prod/marvel/i/mg/6/b0/5b6c5e4154f75/portrait_uncanny.jpg.
so on line comicurl='https:'+elem[0].get('src') you add http: in front of an already well formed url, making it invalid
Can't argue with this: Invalid URL 'https:https://i.annihil.us/u/prod -- the URL is really invalid, probably you should get rid of https in the following statement:
comicurl='https:'+elem[0].get('src')

How to resolve proxy error in reading & writing to HDFS using Python?

I have a HDFS to which I want to read and write using a Python script.
import requests
import json
import os
import kerberos
import sys
node = os.getenv("namenode").split(",")
print (node)
local_file_path = sys.argv[1]
remote_file_path = sys.argv[2]
read_or_write = sys.argv[3]
print (local_file_path,remote_file_path)
def check_node_status(node):
for name in node:
print (name)
request = requests.get("%s/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus"%name,
verify=False).json()
status = request["beans"][0]["State"]
if status =="active":
nnhost = request["beans"][0]["HostAndPort"]
splitaddr = nnhost.split(":")
nnaddress = splitaddr[0]
print(nnaddress)
break
return status,name,nnaddress
def kerberos_auth(nnaddress):
__, krb_context = kerberos.authGSSClientInit("HTTP#%s"%nnaddress)
kerberos.authGSSClientStep(krb_context, "")
negotiate_details = kerberos.authGSSClientResponse(krb_context)
headers = {"Authorization": "Negotiate " + negotiate_details,
"Content-Type":"application/binary"}
return headers
def kerberos_hdfs_upload(status,name,headers):
print("running upload function")
if status =="active":
print("if function")
data=open('%s'%local_file_path, 'rb').read()
write_req = requests.put("%s/webhdfs/v1%s?op=CREATE&overwrite=true"%(name,remote_file_path),
headers=headers,
verify=False,
allow_redirects=True,
data=data)
print(write_req.text)
def kerberos_hdfs_read(status,name,headers):
if status == "active":
read = requests.get("%s/webhdfs/v1%s?op=OPEN"%(name,remote_file_path),
headers=headers,
verify=False,
allow_redirects=True)
if read.status_code == 200:
data=open('%s'%local_file_path, 'wb')
data.write(read.content)
data.close()
else :
print(read.content)
status, name, nnaddress= check_node_status(node)
headers = kerberos_auth(nnaddress)
if read_or_write == "write":
kerberos_hdfs_upload(status,name,headers)
elif read_or_write == "read":
print("fun")
kerberos_hdfs_read(status,name,headers)
The code works on my own machine which is not behind any proxy. But when running it in the office machine, which is behind a proxy, it is giving the following proxy error:
$ python3 python_hdfs.py ./1.png /user/testuser/2018-02-07_1.png write
['https://<servername>:50470', 'https:// <servername>:50470']
./1.png /user/testuser/2018-02-07_1.png
https://<servername>:50470
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/urllib3/connectionpool.py", line 555, in urlopen
self._prepare_proxy(conn)
File "/usr/lib/python3/dist-packages/urllib3/connectionpool.py", line 753, in _prepare_proxy
conn.connect()
File "/usr/lib/python3/dist-packages/urllib3/connection.py", line 230, in connect
self._tunnel()
File "/usr/lib/python3.5/http/client.py", line 832, in _tunnel
message.strip()))
OSError: Tunnel connection failed: 504 Unknown Host
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/requests/adapters.py", line 376, in send
timeout=timeout
File "/usr/lib/python3/dist-packages/urllib3/connectionpool.py", line 610, in urlopen
_stacktrace=sys.exc_info()[2])
File "/usr/lib/python3/dist-packages/urllib3/util/retry.py", line 273, in increment
raise MaxRetryError(_pool, url, error or ResponseError(cause))
requests.packages.urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='<servername>', port=50470): Max retries exceeded with url: /jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus (Caused by ProxyError('Cannot connect to proxy.', OSError('Tunnel connection failed: 504 Unknown Host',)))
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "python_hdfs.py", line 68, in <module>
status, name, nnaddress= check_node_status(node)
File "python_hdfs.py", line 23, in check_node_status
verify=False).json()
File "/usr/lib/python3/dist-packages/requests/api.py", line 67, in get
return request('get', url, params=params, **kwargs)
File "/usr/lib/python3/dist-packages/requests/api.py", line 53, in request
return session.request(method=method, url=url, **kwargs)
File "/usr/lib/python3/dist-packages/requests/sessions.py", line 468, in request
resp = self.send(prep, **send_kwargs)
File "/usr/lib/python3/dist-packages/requests/sessions.py", line 576, in send
r = adapter.send(request, **kwargs)
File "/usr/lib/python3/dist-packages/requests/adapters.py", line 437, in send
raise ConnectionError(e, request=request)
requests.exceptions.ConnectionError: HTTPSConnectionPool(host='<server_name>', port=50470): Max retries exceeded with url: /jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus (Caused by ProxyError('Cannot connect to proxy.', OSError('Tunnel connection failed: 504 Unknown Host',)))
I tried giving proxy info in the code, like so:
proxies = {
"http": "<proxy_username>:<proxy_password>#<proxy_IP>:<proxy_port>",
"https": "<proxy_username>:<proxy_password>#<proxy_IP>:<proxy_port>",
}
node = os.getenv("namenode").split(",")
print (node)
local_file_path = sys.argv[1]
remote_file_path = sys.argv[2]
print (local_file_path,remote_file_path)
local_file_path = sys.argv[1]
remote_file_path = sys.argv[2]
read_or_write = sys.argv[3]
print (local_file_path,remote_file_path)
def check_node_status(node):
for name in node:
print (name)
request = requests.get("%s/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus"%name,proxies=proxies,
verify=False).json()
status = request["beans"][0]["State"]
if status =="active":
nnhost = request["beans"][0]["HostAndPort"]
splitaddr = nnhost.split(":")
nnaddress = splitaddr[0]
print(nnaddress)
break
return status,name,nnaddress
### Rest of the code is the same
Now it is giving the following error:
$ python3 python_hdfs.py ./1.png /user/testuser/2018-02-07_1.png write
['https://<servername>:50470', 'https:// <servername>:50470']
./1.png /user/testuser/2018-02-07_1.png
https://<servername>:50470
Traceback (most recent call last):
File "python_hdfs.py", line 73, in <module>
status, name, nnaddress= check_node_status(node)
File "python_hdfs.py", line 28, in check_node_status
verify=False).json()
File "/usr/lib/python3/dist-packages/requests/api.py", line 67, in get
return request('get', url, params=params, **kwargs)
File "/usr/lib/python3/dist-packages/requests/api.py", line 53, in request
return session.request(method=method, url=url, **kwargs)
File "/usr/lib/python3/dist-packages/requests/sessions.py", line 468, in request
resp = self.send(prep, **send_kwargs)
File "/usr/lib/python3/dist-packages/requests/sessions.py", line 576, in send
r = adapter.send(request, **kwargs)
File "/usr/lib/python3/dist-packages/requests/adapters.py", line 343, in send
conn = self.get_connection(request.url, proxies)
File "/usr/lib/python3/dist-packages/requests/adapters.py", line 254, in get_connection
proxy_manager = self.proxy_manager_for(proxy)
File "/usr/lib/python3/dist-packages/requests/adapters.py", line 160, in proxy_manager_for
**proxy_kwargs)
File "/usr/lib/python3/dist-packages/urllib3/poolmanager.py", line 281, in proxy_from_url
return ProxyManager(proxy_url=url, **kw)
File "/usr/lib/python3/dist-packages/urllib3/poolmanager.py", line 232, in __init__
raise ProxySchemeUnknown(proxy.scheme)
requests.packages.urllib3.exceptions.ProxySchemeUnknown: Not supported proxy scheme <proxy_username>
So, my question is, do I need to set up the proxy in kerberos for it to be working? If so, how? I am not too familiar with kerberos. I run kinit before running the python code, in order to enter into the kerberos realm, which runs fine and connects to the appropriate HDFS servers without the proxy. So I don't know why this error occurs when reading or writing to the same HDFS servers. Any help is appreciated.
I also have the proxy set up in /etc/apt/apt.conf like so:
Acquire::http::proxy "http://<proxy_username>:<proxy_password>#<proxy_IP>:<proxy_port>/";
Acquire::https::proxy "https://<proxy_username>:<proxy_password>#<proxy_IP>:<proxy_port>/";
I have also tried the following:
$ export http_proxy="http://<user>:<pass>#<proxy>:<port>"
$ export HTTP_PROXY="http://<user>:<pass>#<proxy>:<port>"
$ export https_proxy="http://<user>:<pass>#<proxy>:<port>"
$ export HTTPS_PROXY="http://<user>:<pass>#<proxy>:<port>"
import os
proxy = 'http://<user>:<pass>#<proxy>:<port>'
os.environ['http_proxy'] = proxy
os.environ['HTTP_PROXY'] = proxy
os.environ['https_proxy'] = proxy
os.environ['HTTPS_PROXY'] = proxy
#rest of the code is same
But the error persists.
UPDATE: I have also tried the following.
Somebody suggested that we already have a proxy set up in /etc/apt/apt.conf to connect to the web. But maybe we don't need proxy to connect to the HDFS. So, try commenting the proxies in /etc/apt/apt.conf, and run the python script again. I did that.
$ env | grep proxy
http_proxy=http://hfli:Test6969#192.168.44.217:8080
https_proxy=https://hfli:Test6969#192.168.44.217:8080
$ unset http_proxy
$ unset https_proxy
$ env | grep proxy
$
And ran the python script again - (i) without defining proxies in the python script, and also (ii) with the proxies defined in the python script. I got the same original proxy error in both cases.
I found the following Java program that supposedly gives access to run Java programs on the HDFS:
import com.sun.security.auth.callback.TextCallbackHandler;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import javax.security.auth.Subject;
import javax.security.auth.login.LoginContext;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
public class HDFS_RW_Secure
{
public static void main(String[] args) throws Exception
{
System.setProperty("java.security.auth.login.config", "/tmp/sc3_temp/hadoop_kdc.txt");
System.setProperty("java.security.krb5.conf", "/tmp/sc3_temp/hadoop_krb.txt");
Configuration hadoopConf= new Configuration();
//this example use password login, you can change to use Keytab login
LoginContext lc;
Subject subject;
lc = new LoginContext("JaasSample", new TextCallbackHandler());
lc.login();
System.out.println("login");
subject = lc.getSubject();
UserGroupInformation.setConfiguration(hadoopConf);
UserGroupInformation ugi = UserGroupInformation.getUGIFromSubject(subject);
UserGroupInformation.setLoginUser(ugi);
Path pt=new Path("hdfs://edhcluster"+args[0]);
FileSystem fs = FileSystem.get(hadoopConf);
//write
FSDataOutputStream fin = fs.create(pt);
fin.writeUTF("Hello!");
fin.close();
BufferedReader br=new BufferedReader(new InputStreamReader(fs.open(pt)));
String line;
line=br.readLine();
while (line != null)
{
System.out.println(line);
line=br.readLine();
}
fs.close();
System.out.println("This is the end.");
}
}
We need to take its jar file, HDFS.jar, and run the following shell script to enable Java programs to be run on the HDFS.
nano run.sh
# contents of the run.sh file:
/tmp/sc3_temp/jre1.8.0_161/bin/java -Djavax.net.ssl.trustStore=/tmp/sc3_temp/cacerts -Djavax.net.ssl.trustStorePassword=changeit -jar /tmp/sc3_temp/HDFS.jar $1
So, I can run this shell script with /user/testuser as the argument to give it access to run Java programs in the HDFS:
./run.sh /user/testuser/test2
which gives the following output:
Debug is true storeKey false useTicketCache false useKeyTab false doNotPrompt false ticketCache is null isInitiator true KeyTab is null refreshKrb5Config is false principal is null tryFirstPass is false useFirstPass is false storePass is false clearPass is false
Kerberos username [testuser]: testuser
Kerberos password for testuser:
[Krb5LoginModule] user entered username: testuser
principal is testuser#KRB.REALM
Commit Succeeded
login
2018-02-08 14:09:30,020 WARN [main] util.NativeCodeLoader (NativeCodeLoader.java:<clinit>(62)) - Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
Hello!
This is the end.
So, that's working I suppose. But how do I write an equivalent shell script to run Python codes?
I found the solution. It turns out, I was looking in the wrong place. It seems the user account was set up wrongly. I tried to do something simpler, like downloading a webpage into the server. And I noticed that it was downloading the page, but had no permissions to fix it. So I explored a bit more and found that when the user account was created, it was not assigned proper ownership. So, once I assigned the proper owner to the user account, the proxy error was gone. (Sigh, so much time wasted.)
I have written about it in more detail here.

for range to send post request by using requests.Session(), it alert 'module' object has no attribute 'kqueue'

macOS 10.12.3 python 2.7.13 requests 2.13.0
I use requests package to send post request.This request need to login before post data.So I use request.Session() and load a logined cookie.
Then I use this session to send post data in cycle mode.
It is no error that I used to run this code in Windows and Linux.
Simple Code:
s = request.Session()
s.cookies = cookieslib.LWPCookieJar('cookise')
s.cookies.load(ignore_discard=True)
for user_id in range(100,200):
url = 'http://xxxx'
data = { 'user': user_id, 'content': '123'}
r = s.post(url, data)
...
But the program frequently (about every interval) crash, the error isAttributeError: 'module' object has no attribute 'kqueue'
Traceback (most recent call last):
File "/Users/gasxia/Dev/Projects/TgbookSpider/kfz_send_msg.py", line 90, in send_msg
r = requests.post(url, data) # catch error if user isn't exist
File "/usr/local/lib/python2.7/site-packages/requests/sessions.py", line 535, in post
return self.request('POST', url, data=data, json=json, **kwargs)
File "/usr/local/lib/python2.7/site-packages/requests/sessions.py", line 488, in request
resp = self.send(prep, **send_kwargs)
File "/usr/local/lib/python2.7/site-packages/requests/sessions.py", line 609, in send
r = adapter.send(request, **kwargs)
File "/usr/local/lib/python2.7/site-packages/requests/adapters.py", line 423, in send
timeout=timeout
File "/usr/local/lib/python2.7/site-packages/requests/packages/urllib3/connectionpool.py", line 588, in urlopen
conn = self._get_conn(timeout=pool_timeout)
File "/usr/local/lib/python2.7/site-packages/requests/packages/urllib3/connectionpool.py", line 241, in _get_conn
if conn and is_connection_dropped(conn):
File "/usr/local/lib/python2.7/site-packages/requests/packages/urllib3/util/connection.py", line 27, in is_connection_dropped
return bool(wait_for_read(sock, timeout=0.0))
File "/usr/local/lib/python2.7/site-packages/requests/packages/urllib3/util/wait.py", line 33, in wait_for_read
return _wait_for_io_events(socks, EVENT_READ, timeout)
File "/usr/local/lib/python2.7/site-packages/requests/packages/urllib3/util/wait.py", line 22, in _wait_for_io_events
with DefaultSelector() as selector:
File "/usr/local/lib/python2.7/site-packages/requests/packages/urllib3/util/selectors.py", line 431, in __init__
self._kqueue = select.kqueue()
AttributeError: 'module' object has no attribute 'kqueue'
This looks like a problem that commonly arises if you're using something like eventlet or gevent, both of which monkeypatch the select module. If you're using those to achieve asynchrony, you will need to ensure that those monkeypatches are applied before importing requests. This is a known bug, being tracked in this issue.

py2neo (Neo4j) : py2neo.packages.httpstream.http.SocketError: Operation not permitted

I am running Neo4j 2.2.1 in ubuntu Amazon EC2 instance. When I am trying to connect through python using py2neo-2.0.7, I am getting following error :
py2neo.packages.httpstream.http.SocketError: Operation not permitted
I am able to access the web-interface through http://52.10.**.***:7474/browser/
CODE :-
from py2neo import Graph, watch, Node, Relationship
url_graph_conn = "https://neo4j:password#52.10.**.***:7474/db/data/"
print url_graph_conn
my_conn = Graph(url_graph_conn)
babynames = my_conn.find("BabyName")
for babyname in babynames:
print 2
Error message :-
https://neo4j:password#52.10.**.***:7474/db/data/
Traceback (most recent call last):
File "C:\Users\rharoon002\eclipse_workspace\peace\peace\core\graphconnection.py", line 39, in <module>
for babyname in babynames:
File "C:\Python27\lib\site-packages\py2neo\core.py", line 770, in find
response = self.cypher.post(statement, parameters)
File "C:\Python27\lib\site-packages\py2neo\core.py", line 667, in cypher
metadata = self.resource.metadata
File "C:\Python27\lib\site-packages\py2neo\core.py", line 213, in metadata
self.get()
File "C:\Python27\lib\site-packages\py2neo\core.py", line 258, in get
response = self.__base.get(headers=headers, redirect_limit=redirect_limit, **kwargs)
File "C:\Python27\lib\site-packages\py2neo\packages\httpstream\http.py", line 966, in get
return self.__get_or_head("GET", if_modified_since, headers, redirect_limit, **kwargs)
File "C:\Python27\lib\site-packages\py2neo\packages\httpstream\http.py", line 943, in __get_or_head
return rq.submit(redirect_limit=redirect_limit, **kwargs)
File "C:\Python27\lib\site-packages\py2neo\packages\httpstream\http.py", line 433, in submit
http, rs = submit(self.method, uri, self.body, self.headers)
File "C:\Python27\lib\site-packages\py2neo\packages\httpstream\http.py", line 362, in submit
raise SocketError(code, description, host_port=uri.host_port)
py2neo.packages.httpstream.http.SocketError: Operation not permitted
You are trying to access neo4j via https on the standard port for http (7474):
url_graph_conn = "https://neo4j:password#52.10.**.***:7474/db/data/"
The standard port for a https connection is 7473. Try:
url_graph_conn = "https://neo4j:password#52.10.**.***:7473/db/data/"
And make sure you can access the web interface via https:
https://52.10.**.***:7473/browser/
You can change/see the port settings in your neo4j-server.properties file.

Mega API using python programming language

I want to be able to access my Mega account using python. I looked to the following link but I have issues concerning the login process.
https://github.com/richardasaurus/mega.py
With the examples given, it looks quite easy
mega = Mega()
m = mega.login(email, password)
But when I do it gives the following error
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "mega\mega.py", line 26, in login
instance.login_user(email, password)
File "mega\mega.py", line 32, in login_user
resp = self.api_request({'a': 'us', 'user': email, 'uh': uh})
File "mega\mega.py", line 86, in api_request
timeout=self.timeout)
File "requests\api.py", line 84, in post
return request('post', url, data=data, **kwargs)
File "requests\api.py", line 39, in request
return s.request(method=method, url=url, **kwargs)
File "requests\sessions.py", line 200, in request
r.send(prefetch=prefetch)
File "requests\models.py", line 489, in send
cert_loc = __import__('certifi').where()
ImportError: No module named certifi
I guess I did not install correctly mega.py but I can't make it work.
Thanks
As seen in comments, the solution is just to install https://pypi.python.org/pypi/certifi

Categories

Resources