I'd like python3 program to send JSON data to Django web server and print them on web page. This is what I have in Views.py:
def receive_json(request):
if request.method == 'POST':
received_data = json.loads(request.body.decode("utf-8"))
return StreamingHttpResponse('it was post request: ' + str(received_data))
return StreamingHttpResponse('it was get request')
And the python code:
import requests
import json
url = "http://127.0.0.1:8000/"
data = {"data": [{'key1':'val1'}, {'key2':'val2'}]}
headers = {'content-type':'application/json'}
r = requests.post(url, data=json.dumps(data), headers=headers)
r.text
However, it shows this message:
Forbidden (CSRF cookie not set.): /
[28/May/2021 16:51:31] "POST / HTTP/1.1" 403 2864
import requests
import json
url = "http://127.0.0.1:8000/"
data = {"data": [{'key1':'val1'}, {'key2':'val2'}]}
headers = {'content-type':'application/json','Cookie':'csrftoken=axXsa39e5hq8gqlTjJFHAbUWtg2FQgnSd3cxxkx9khatqgersthtSryDxtF0cVCk'}
r = requests.post(url, data=json.dumps(data), headers=headers)
r.text
i think these may works by adding Cookie but it would be better if u use #csrf_exempt decorater in receive_json function by these way
from django.views.decorators.csrf import csrf_exempt
#csrf_exempt
def receive_json(request):
if request.method == 'POST':
received_data = json.loads(request.body.decode("utf-8"))
return StreamingHttpResponse('it was post request: ' + str(received_data))
return StreamingHttpResponse('it was get request')
Essentially you need first to perform a GET request to get a csrftoken, than post that together with the data. There are a few ways to do this. Here is one (untested however):
import requests
import json
url = "http://127.0.0.1:8000/"
s = requests.Session()
s.get(url)
headers = {
'content-type':'application/json',
'X-CSRFToken': s.cookies["csrftoken"],
}
data = {"data": [{'key1':'val1'}, {'key2':'val2'}]}
r = s.post(url, data=json.dumps(data), headers=headers)
r.text
You can find more information in Django's CSRF documentation.
i have made an api where i am receiving an api request about the successful payment now in the same code i have to make a post request to an other external application to notify about the payment with a json data in request body.so i was trying to use python's requests to accomplish this, please help me if there is any other way
#api_view(['POST'])
def cashfree_request(request):
if request.method == 'POST':
data=request.POST.dict()
print(data)
payment_gateway_order_identifier= data['orderId']
amount = data['orderAmount']
order=Orders.objects.get(payment_gateway_order_identifier=payment_gateway_order_identifier)
payment = Payments(orders=order,amount=amount,)
payment.save()
URL = "" # external application url where webhook needs to send
request_data ={
order:{
'id':order.id,
'payment_collection_status': transaction_status,
'payment_collection_message': transaction_message
}
}
json_data = json.dumps(request_data)
response = requests.post(url = URL, data = json_data)
return Response(status=status.HTTP_200_OK
Hi I am trying to login to an outlook web application using python web crawler but I am not getting through the login page. From what I noticed the site will redirect upon the get request and set a cookie; namely OutlookSession. Then the post request goes to the same url having this cookie and this is the reason I am using requests.Session().
This is my code:
import requests
URL = "https://mail.guc.edu.eg/owa"
username = "username"
password = "password"
s = requests.Session()
s.get(URL)
login_data={"username":username, "password":password}
r = s.post("https://mail.guc.edu.eg/owa", data=login_data)
To expand on A Magoon's answer, there happens to be three additional form fields that OWA expects. This is what worked for me:
import requests
owa_login_form_url = 'https://mail.yourdomain.com/owa'
user_name = 'user'
pwd = 'pwd'
flags = '4'
forcedownlevel = '0'
sess = requests.Session()
payload = {'username': user_name, 'password': pwd, 'destination': owa_login_form_url, 'flags': flags, 'forcedownlevel': forcedownlevel }
resp = sess.post(owa_login_form_url + '/auth.owa', data=payload)
It looks like the form posts to https://mail.guc.edu.eg/owa/auth.owa.
import requests
URL = "https://mail.guc.edu.eg/owa"
username = "username"
password = "password"
s = requests.Session()
s.get(URL)
login_data={"username":username, "password":password}
r = s.post("https://mail.guc.edu.eg/owa/auth.owa", data=login_data)
I am using requests and cfscrape library to login to https://kissanime.to/Login
'''Login to website'''
def login(self, usr, pw):
login_url = 'https://kissanime.to/Login'
sess = requests.Session()
# login credentials
payload = {
'username': usr,
'password': pw,
'redirect': ''
}
# Creating cfscrape instance of the session
scraper_sess = cfscrape.create_scraper(sess)
a = scraper_sess.post(login_url, data=payload)
print(a.text)
print(a.status_code)
a.text gives me the same login page
a.status_code gives me 200
That means my login is not working at all. Am I missing something? According to chrome's network monitor, I should also get status code 302
POST DATA image:
I solved it using mechanicalsoup
Code:
import mechanicalsoup
'''Login to website'''
def login(self, usr, pw):
login_url = 'https://kissanime.to/Login'
# Creating cfscrape instance
self.r = cfscrape.create_scraper()
login_page = self.r.get(login_url)
# Creating a mechanicalsoup browser instance with
# response object of cfscrape
browser = mechanicalsoup.Browser(self.r)
soup = BeautifulSoup(login_page.text, 'html.parser')
# grab the login form
login_form = soup.find('form', {'id':'formLogin'})
# find login and password inputs
login_form.find('input', {'name': 'username'})['value'] = usr
login_form.find('input', {'name': 'password'})['value'] = pw
browser.submit(login_form, login_page.url)
This content is from Requests Documentation:
Many web services that require authentication accept HTTP Basic Auth. This is the simplest kind, and Requests supports it straight out of the box.
requests.get('https://api.github.com/user', auth=HTTPBasicAuth('user', 'pass'))
You have to send the payload as JSON..
import requests,json
'''Login to website'''
def login(self, usr, pw):
login_url = 'https://kissanime.to/Login'
sess = requests.Session()
# login credentials
payload = {
'username': usr,
'password': pw,
'redirect': ''
}
# Creating cfscrape instance of the session
scraper_sess = cfscrape.create_scraper(sess)
a = scraper_sess.post(login_url, data=json.dumps(payload))
print(a.text)
print(a.status_code)
Reference: http://docs.python-requests.org/en/master/user/authentication/
I am using the requests module.
I have figured out how to submit data to a login form on a website and retrieve the session key, but I can't see an obvious way to use this session key in subsequent requests.
Can someone fill in the ellipsis in the code below or suggest another approach?
>>> import requests
>>> login_data = {'formPosted': '1', 'login_email': 'me#example.com', 'password': 'pw'}
>>> r = requests.post('https://localhost/login.py', login_data)
>>>
>>> r.text
'You are being redirected here'
>>> r.cookies
{'session_id_myapp': '127-0-0-1-825ff22a-6ed1-453b-aebc-5d3cf2987065'}
>>>
>>> r2 = requests.get('https://localhost/profile_data.json', ...)
You can easily create a persistent session using:
s = requests.Session()
After that, continue with your requests as you would:
s.post('https://localhost/login.py', login_data)
# logged in! cookies saved for future requests.
r2 = s.get('https://localhost/profile_data.json', ...)
# cookies sent automatically!
# do whatever, s will keep your cookies intact :)
For more about Sessions: https://requests.readthedocs.io/en/latest/user/advanced/#session-objects
the other answers help to understand how to maintain such a session. Additionally, I want to provide a class which keeps the session maintained over different runs of a script (with a cache file). This means a proper "login" is only performed when required (timout or no session exists in cache). Also it supports proxy settings over subsequent calls to 'get' or 'post'.
It is tested with Python3.
Use it as a basis for your own code. The following snippets are release with GPL v3
import pickle
import datetime
import os
from urllib.parse import urlparse
import requests
class MyLoginSession:
"""
a class which handles and saves login sessions. It also keeps track of proxy settings.
It does also maintine a cache-file for restoring session data from earlier
script executions.
"""
def __init__(self,
loginUrl,
loginData,
loginTestUrl,
loginTestString,
sessionFileAppendix = '_session.dat',
maxSessionTimeSeconds = 30 * 60,
proxies = None,
userAgent = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1',
debug = True,
forceLogin = False,
**kwargs):
"""
save some information needed to login the session
you'll have to provide 'loginTestString' which will be looked for in the
responses html to make sure, you've properly been logged in
'proxies' is of format { 'https' : 'https://user:pass#server:port', 'http' : ...
'loginData' will be sent as post data (dictionary of id : value).
'maxSessionTimeSeconds' will be used to determine when to re-login.
"""
urlData = urlparse(loginUrl)
self.proxies = proxies
self.loginData = loginData
self.loginUrl = loginUrl
self.loginTestUrl = loginTestUrl
self.maxSessionTime = maxSessionTimeSeconds
self.sessionFile = urlData.netloc + sessionFileAppendix
self.userAgent = userAgent
self.loginTestString = loginTestString
self.debug = debug
self.login(forceLogin, **kwargs)
def modification_date(self, filename):
"""
return last file modification date as datetime object
"""
t = os.path.getmtime(filename)
return datetime.datetime.fromtimestamp(t)
def login(self, forceLogin = False, **kwargs):
"""
login to a session. Try to read last saved session from cache file. If this fails
do proper login. If the last cache access was too old, also perform a proper login.
Always updates session cache file.
"""
wasReadFromCache = False
if self.debug:
print('loading or generating session...')
if os.path.exists(self.sessionFile) and not forceLogin:
time = self.modification_date(self.sessionFile)
# only load if file less than 30 minutes old
lastModification = (datetime.datetime.now() - time).seconds
if lastModification < self.maxSessionTime:
with open(self.sessionFile, "rb") as f:
self.session = pickle.load(f)
wasReadFromCache = True
if self.debug:
print("loaded session from cache (last access %ds ago) "
% lastModification)
if not wasReadFromCache:
self.session = requests.Session()
self.session.headers.update({'user-agent' : self.userAgent})
res = self.session.post(self.loginUrl, data = self.loginData,
proxies = self.proxies, **kwargs)
if self.debug:
print('created new session with login' )
self.saveSessionToCache()
# test login
res = self.session.get(self.loginTestUrl)
if res.text.lower().find(self.loginTestString.lower()) < 0:
raise Exception("could not log into provided site '%s'"
" (did not find successful login string)"
% self.loginUrl)
def saveSessionToCache(self):
"""
save session to a cache file
"""
# always save (to update timeout)
with open(self.sessionFile, "wb") as f:
pickle.dump(self.session, f)
if self.debug:
print('updated session cache-file %s' % self.sessionFile)
def retrieveContent(self, url, method = "get", postData = None, **kwargs):
"""
return the content of the url with respect to the session.
If 'method' is not 'get', the url will be called with 'postData'
as a post request.
"""
if method == 'get':
res = self.session.get(url , proxies = self.proxies, **kwargs)
else:
res = self.session.post(url , data = postData, proxies = self.proxies, **kwargs)
# the session has been updated on the server, so also update in cache
self.saveSessionToCache()
return res
A code snippet for using the above class may look like this:
if __name__ == "__main__":
# proxies = {'https' : 'https://user:pass#server:port',
# 'http' : 'http://user:pass#server:port'}
loginData = {'user' : 'usr',
'password' : 'pwd'}
loginUrl = 'https://...'
loginTestUrl = 'https://...'
successStr = 'Hello Tom'
s = MyLoginSession(loginUrl, loginData, loginTestUrl, successStr,
#proxies = proxies
)
res = s.retrieveContent('https://....')
print(res.text)
# if, for instance, login via JSON values required try this:
s = MyLoginSession(loginUrl, None, loginTestUrl, successStr,
#proxies = proxies,
json = loginData)
Check out my answer in this similar question:
python: urllib2 how to send cookie with urlopen request
import urllib2
import urllib
from cookielib import CookieJar
cj = CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
# input-type values from the html form
formdata = { "username" : username, "password": password, "form-id" : "1234" }
data_encoded = urllib.urlencode(formdata)
response = opener.open("https://page.com/login.php", data_encoded)
content = response.read()
EDIT:
I see I've gotten a few downvotes for my answer, but no explaining comments. I'm guessing it's because I'm referring to the urllib libraries instead of requests. I do that because the OP asks for help with requests or for someone to suggest another approach.
The documentation says that get takes in an optional cookies argument allowing you to specify cookies to use:
from the docs:
>>> url = 'http://httpbin.org/cookies'
>>> cookies = dict(cookies_are='working')
>>> r = requests.get(url, cookies=cookies)
>>> r.text
'{"cookies": {"cookies_are": "working"}}'
http://docs.python-requests.org/en/latest/user/quickstart/#cookies
Upon trying all the answers above, I found that using "RequestsCookieJar" instead of the regular CookieJar for subsequent requests fixed my problem.
import requests
import json
# The Login URL
authUrl = 'https://whatever.com/login'
# The subsequent URL
testUrl = 'https://whatever.com/someEndpoint'
# Logout URL
testlogoutUrl = 'https://whatever.com/logout'
# Whatever you are posting
login_data = {'formPosted':'1',
'login_email':'me#example.com',
'password':'pw'
}
# The Authentication token or any other data that we will receive from the Authentication Request.
token = ''
# Post the login Request
loginRequest = requests.post(authUrl, login_data)
print("{}".format(loginRequest.text))
# Save the request content to your variable. In this case I needed a field called token.
token = str(json.loads(loginRequest.content)['token']) # or ['access_token']
print("{}".format(token))
# Verify Successful login
print("{}".format(loginRequest.status_code))
# Create your Requests Cookie Jar for your subsequent requests and add the cookie
jar = requests.cookies.RequestsCookieJar()
jar.set('LWSSO_COOKIE_KEY', token)
# Execute your next request(s) with the Request Cookie Jar set
r = requests.get(testUrl, cookies=jar)
print("R.TEXT: {}".format(r.text))
print("R.STCD: {}".format(r.status_code))
# Execute your logout request(s) with the Request Cookie Jar set
r = requests.delete(testlogoutUrl, cookies=jar)
print("R.TEXT: {}".format(r.text)) # should show "Request Not Authorized"
print("R.STCD: {}".format(r.status_code)) # should show 401
Save only required cookies and reuse them.
import os
import pickle
from urllib.parse import urljoin, urlparse
login = 'my#email.com'
password = 'secret'
# Assuming two cookies are used for persistent login.
# (Find it by tracing the login process)
persistentCookieNames = ['sessionId', 'profileId']
URL = 'http://example.com'
urlData = urlparse(URL)
cookieFile = urlData.netloc + '.cookie'
signinUrl = urljoin(URL, "/signin")
with requests.Session() as session:
try:
with open(cookieFile, 'rb') as f:
print("Loading cookies...")
session.cookies.update(pickle.load(f))
except Exception:
# If could not load cookies from file, get the new ones by login in
print("Login in...")
post = session.post(
signinUrl,
data={
'email': login,
'password': password,
}
)
try:
with open(cookieFile, 'wb') as f:
jar = requests.cookies.RequestsCookieJar()
for cookie in session.cookies:
if cookie.name in persistentCookieNames:
jar.set_cookie(cookie)
pickle.dump(jar, f)
except Exception as e:
os.remove(cookieFile)
raise(e)
MyPage = urljoin(URL, "/mypage")
page = session.get(MyPage)
snippet to retrieve json data, password protected
import requests
username = "my_user_name"
password = "my_super_secret"
url = "https://www.my_base_url.com"
the_page_i_want = "/my_json_data_page"
session = requests.Session()
# retrieve cookie value
resp = session.get(url+'/login')
csrf_token = resp.cookies['csrftoken']
# login, add referer
resp = session.post(url+"/login",
data={
'username': username,
'password': password,
'csrfmiddlewaretoken': csrf_token,
'next': the_page_i_want,
},
headers=dict(Referer=url+"/login"))
print(resp.json())
This will work for you in Python;
# Call JIRA API with HTTPBasicAuth
import json
import requests
from requests.auth import HTTPBasicAuth
JIRA_EMAIL = "****"
JIRA_TOKEN = "****"
BASE_URL = "https://****.atlassian.net"
API_URL = "/rest/api/3/serverInfo"
API_URL = BASE_URL+API_URL
BASIC_AUTH = HTTPBasicAuth(JIRA_EMAIL, JIRA_TOKEN)
HEADERS = {'Content-Type' : 'application/json;charset=iso-8859-1'}
response = requests.get(
API_URL,
headers=HEADERS,
auth=BASIC_AUTH
)
print(json.dumps(json.loads(response.text), sort_keys=True, indent=4, separators=(",", ": ")))