Response not defined? Get request to Gnip (Twitter enterprise) - python

I have a list of games in json file all of type string, and my goal is to filter through these games to make many requests to the Gnip API. But I get this error when I make the request:
Traceback (most recent call last):
File "GetRequest.py", line 53, in <module>
the_page = json.load(response)
NameError: name 'response' is not defined
here is my code:
#!/usr/bin/env python
import urllib2
import base64
import json
class RequestWithMethod(urllib2.Request):
def __init__(self, url, method, headers={}):
self._method = method
urllib2.Request.__init__(self, url, headers)
def get_method(self):
if self._method:
return self._method
else:
return urllib2.Request.get_method(self)
if __name__ == "__main__":
data = json.load(open('games.json'))
url = 'url'
UN = 'Username'
PWD = 'Password'
query = data[1].encode("UTF8")
fromDate = '201803010000'
toDate = '201803140000'
queryString = (url + "?query={}" + "&fromDate=" + fromDate + "&toDate=" + toDate).format(query)
base64string = base64.encodestring('%s:%s' % (UN, PWD)).replace('\n', '')
req = RequestWithMethod(queryString, 'GET')
req.add_header("Authorization", "Basic %s" % base64string)
try:
response = urllib2.urlopen(req)
except urllib2.HTTPError as e:
print e.read()
the_page = json.load(response)
print the_page['results'][1]
I have switched out the actual passwords, usernames, and url for security purposes.

Related

How to create paste on rentry.co with python?

How do I create a request to rentry.co in order to create pastes?
I've tried to solve this in Python but I get the following response:
403 reason: Forbidden ...
I tried changing the URL and adding my cookie.
My code looks currently as follows.
import requests
text = "Hello World!"
data = {"text":text}
r = requests.post("https://rentry.co/api", data=data)
print(f"status code: {r.status_code}")
print(f"reason: {r.reason}") ```
try this
#!/usr/bin/env python3
import http.cookiejar
import sys
import urllib.parse
import urllib.request
from http.cookies import SimpleCookie
from json import loads as json_loads
_headers = {"Referer": 'https://rentry.co'}
class UrllibClient:
"""Simple HTTP Session Client, keeps cookies."""
def __init__(self):
self.cookie_jar = http.cookiejar.CookieJar()
self.opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(self.cookie_jar))
urllib.request.install_opener(self.opener)
def get(self, url, headers={}):
request = urllib.request.Request(url, headers=headers)
return self._request(request)
def post(self, url, data=None, headers={}):
postdata = urllib.parse.urlencode(data).encode()
request = urllib.request.Request(url, postdata, headers)
return self._request(request)
def _request(self, request):
response = self.opener.open(request)
response.status_code = response.getcode()
response.data = response.read().decode('utf-8')
return response
def new(url, edit_code, text):
client, cookie = UrllibClient(), SimpleCookie()
cookie.load(vars(client.get('https://rentry.co'))['headers']['Set-Cookie'])
csrftoken = cookie['csrftoken'].value
payload = {
'csrfmiddlewaretoken': csrftoken,
'url': url,
'edit_code': edit_code,
'text': text
}
return json_loads(client.post('https://rentry.co/api/new', payload, headers=_headers).data)
def get_rentry_link(text):
url, edit_code = '', ''
response = new(url, edit_code, text)
if response['status'] != '200':
print('error: {}'.format(response['content']))
try:
for i in response['errors'].split('.'):
i and print(i)
sys.exit(1)
except:
sys.exit(1)
else:
pastebin_link = response['url']
print('Url: {}\nEdit code: {}'.format(response['url'], response['edit_code']))
return pastebin_link
if __name__ == '__main__':
link_list = ['https://stackoverflow.com/', 'https://www.youtube.com/', 'https://www.google.com/']
pastebin_link = get_rentry_link('\n'.join(map(str, link_list)))

How to Fix AttributeError: module 'requests.sessions' has no attribute 'post'

I want to create account checker with python
These is my code
import requests
from bs4 import BeautifulSoup
class output(object):
def tested(self, email, password):
print(f" Posted: {email} {password}")
# Get CSRF TOKEN
class checker_start(object):
def get_token(self):
data = requests.get("https://www.marlboro.id/auth/login")
soup = BeautifulSoup(data.text, "lxml")
decide_csrf = soup.find("input", {"name": "decide_csrf"})["value"]
return decide_csrf
def post(self, combo):
decide_csrf = self.get_token()
email = combo[0]
password = combo [1]
api = requests.sessions
# Creating POST METHOD #
param = {"_method": "POST", "decide_csrf": decide_csrf, "email": email, "password": password}
source = api.post("https://www.marlboro.id/auth/login", data=param).text
if """<div class="err__msg-big">Opps!</div>""" in source:
output().tested(email, password)
else:
output().tested(email, password)
def start(self):
file = input("Please input your txt file: ")
data = open(file, "r").readlines()
data_s = [lines.replace("\n", " ") for lines in data]
for lines in data_s:
combo = lines.split("|")
self.post(combo)
if __name__ == "__main__":
checker_start().start()
But when I run these code the output is:
Traceback (most recent call last): File
"/Users/erzajullian/PycharmProjects/Checker/marlboro.py", line 39, in
checker_start().start() File "/Users/erzajullian/PycharmProjects/Checker/marlboro.py", line 36, in
start
self.post(combo) File "/Users/erzajullian/PycharmProjects/Checker/marlboro.py", line 23, in
post
source = api.post("https://www.marlboro.id/auth/login", data=param).text AttributeError: module 'requests.sessions' has no
attribute 'post'
How to solve these problem? Thank you
You have
api = requests.sessions
api is not the requests module after this point, so it has no post() method.
If you replace
source = api.post("https://www.marlboro.id/auth/login", data=param).text
with
source = requests.post("https://www.marlboro.id/auth/login", data=param).text
Does that fix things? If so, you can remove the api = requests.sessions line.)
First of all you'll have to change your code a bit. Post request should be handeled differently.
import requests
with requests.Sessions as s:
# Define headers which you will get via post request
Headers = {'user-agent':'YOUR_HEADERS'}
url = 'some_url'
login_data{
'username':'user',
'password':'password'
# Any other login data your site requires}
r = s.post(url, data=login_data, headers=Headers)
# now you can use r.content to parse given html in bs4
hope this helps

Trying to call the AlchemyLanguage API

I have written code for calling the AlchemyLanguage API of Bluemix in Python. I need the keywords and entities, but it is only showing the first keyword and first entity for the text file. Where am I going wrong?
import requests
import urllib
import urllib2
def call_alchemy_api(text, API_KEY):
payload = {'outputMode':'json','extract':'entities,keywords','sentiment':'1','maxRetrieve':'1', 'url':'https://www.ibm.com/us-en/'}
payload['apikey'] = API_KEY
encoded_text = urllib.quote_plus(text)
payload['text'] = text
data = urllib.urlencode(payload)
url = 'https://gateway-a.watsonplatform.net/calls/text/TextGetCombinedData'
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
return response
if __name__ == "__main__":
api_key = 'xxxxxxxxxxxxxxxxxxxxxmyapi'
f = open('in0.txt','r')
text = f.read()
print text
response = call_alchemy_api(text, api_key)
print response.read()
Change the maxRetrieve keyword's value.
Example:
payload = {'outputMode':'json','extract':'entities,keywords','sentiment':'1','maxRetrieve':'3', 'url':'https://www.ibm.com/us-en/'}
API Link:
http://www.ibm.com/watson/developercloud/alchemy-language/api/v1/

Creating a python wrapper for an API using classes

I am trying to make a Python Wrapper for an API. I have been able to create scripts that work fine but don't use classes. I want to make a real wrapper of that API using classes. I am new to OOP in Python.
Following was my attempt but I am stuck at how to convert it to an OO type.
import urllib2
from urllib import urlencode
import json
class apiclient:
def __init__(self,
request_url,
hmh_api_key,
client_id,
grant_type="password",
username="username",
password="password"):
values = {
"client_id": client_id,
"grant_type": grant_type,
"username": username,
"password": password
}
data = urlencode(values)
req = urllib2.Request(request_url, data)
req.add_header("Api-Key", api_key)
response = urllib2.urlopen(req)
response_header = response.info().dict
response_body = response.read()
json_acceptable_string = response_body.replace("'", "\"")
response_body_dict = json.loads(json_acceptable_string)
return response_body_dict ## this is the response
if __name__ == "__main__":
API_KEY = "75b5cc58a5cdc0a583f91301cefedf0c"
CLIENT_ID = "ef5f7a03-58e8-48d7-a38a-abbd2696bdb6.hmhco.com"
REQUEST_URL = "http://some.url"
client = apiclient(request_url=REQUEST_URL,
api_key=API_KEY,
client_id=CLIENT_ID)
print client
Without classes, I get the response JSON as response_body_dict but with classes I get TypeError: __init__() should return None. How should I start designing my program.
I have shown only a part of the whole program, there are a lot many similar scripts that send requests to URLs and get JSON responses.
Thanks!
You should not return something from __init__ function.
EDIT:
If you need that value you should use the response_body_dict as a class member and get him from other method:
import urllib2
from urllib import urlencode
import json
class apiclient:
def __init__(self,
request_url,
api_key,
client_id,
grant_type="password",
username="username",
password="password"):
values = {
"client_id": client_id,
"grant_type": grant_type,
"username": username,
"password": password
}
data = urlencode(values)
req = urllib2.Request(request_url, data)
req.add_header("Api-Key", api_key)
response = urllib2.urlopen(req)
response_header = response.info().dict
response_body = response.read()
json_acceptable_string = response_body.replace("'", "\"")
self.response_body_dict = json.loads(json_acceptable_string)
def get_response_body(self):
return self.response_body_dict
if __name__ == "__main__":
API_KEY = "75b5cc58a5cdc0a583f91301cefedf0c"
CLIENT_ID = "ef5f7a03-58e8-48d7-a38a-abbd2696bdb6.hmhco.com"
REQUEST_URL = "http://some.url"
client = apiclient(request_url=REQUEST_URL,
api_key=API_KEY,
client_id=CLIENT_ID)
response = client.get_response_body()
print client

Streaming API statuses/filter.json?track not delivering JSON Python

I am outputing a Twitter stream ("https://stream.twitter.com/1.1/statuses/filter.json?track=term") to a file using Python. However, the file is outputting an "instance" instead of a "json" (see relevant part in bold):
import oauth2 as oauth
import urllib2 as urllib
import json
# Filled correctly, no authentication problem
api_key = "XXX"
api_secret = "XXX"
access_token_key = "XXX"
access_token_secret = "XXX"
_debug = 0
oauth_token = oauth.Token(key=access_token_key, secret=access_token_secret)
oauth_consumer = oauth.Consumer(key=api_key, secret=api_secret)
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()
http_method = "GET"
http_handler = urllib.HTTPHandler(debuglevel=_debug)
https_handler = urllib.HTTPSHandler(debuglevel=_debug)
'''
Construct, sign, and open a twitter request
using the hard-coded credentials above.
'''
def twitterreq(url, method, parameters):
req = oauth.Request.from_consumer_and_token(oauth_consumer,
token=oauth_token,
http_method=http_method,
http_url=url,
parameters=parameters)
req.sign_request(signature_method_hmac_sha1, oauth_consumer, oauth_token)
headers = req.to_header()
if http_method == "POST":
encoded_post_data = req.to_postdata()
else:
encoded_post_data = None
url = req.to_url()
opener = urllib.OpenerDirector()
opener.add_handler(http_handler)
opener.add_handler(https_handler)
response = opener.open(url, encoded_post_data)
return response
**def fetchcontinuousstream():
# For streaming of tweets use
url = "https://stream.twitter.com/1.1/statuses/filter.json?track=term"
parameters = []
response = twitterreq(url, "GET", parameters)
print "Type of the response"
print type(response)
for line in response:
print type(line)
if __name__ == '__main__':
fetchcontinuousstream()**
The result is this:
Type of the response
<type 'instance'>
<type 'str'>
So basically the response is an instance, not a json/dict, each line is just a string...
How can I obtain a JSON instead?
Try to parse the response with json.loads
import oauth2 as oauth
import urllib2 as urllib
from json import loads
# Filled correctly, no authentication problem
api_key = "xxxx"
api_secret = "xxxx"
access_token_key = "xxxx"
access_token_secret = "xxxx"
_debug = 0
oauth_token = oauth.Token(key=access_token_key, secret=access_token_secret)
oauth_consumer = oauth.Consumer(key=api_key, secret=api_secret)
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()
http_method = "GET"
http_handler = urllib.HTTPHandler(debuglevel=_debug)
https_handler = urllib.HTTPSHandler(debuglevel=_debug)
def twitterreq(url, method, parameters):
'''Construct, sign, and open a twitter request
using the hard-coded credentials above.
'''
req = oauth.Request.from_consumer_and_token(oauth_consumer,
token=oauth_token,
http_method=http_method,
http_url=url,
parameters=parameters)
req.sign_request(signature_method_hmac_sha1, oauth_consumer, oauth_token)
if http_method == "POST":
encoded_post_data = req.to_postdata()
else:
encoded_post_data = None
url = req.to_url()
opener = urllib.OpenerDirector()
opener.add_handler(http_handler)
opener.add_handler(https_handler)
response = opener.open(url, encoded_post_data)
return response
def fetchcontinuousstream():
# For streaming of tweets use
url = "https://stream.twitter.com/1.1/statuses/filter.json?track=term"
parameters = []
response = twitterreq(url, "GET", parameters)
for line in response:
print loads(line)
if __name__ == '__main__':
fetchcontinuousstream()
The json loader was reading an empty line and failing to decode, this code will work
def fetchcontinuousstream():
# For streaming of tweets use
url = "https://stream.twitter.com/1.1/statuses/filter.json?track=term"
parameters = []
response = twitterreq(url, "GET", parameters)
for line in response:
if line != "":
tweet = json.loads(line)
print tweet["text"]

Categories

Resources