Python - Print specific cookie value from request - python

Code:
from bs4 import BeautifulSoup
import requests
#GET SESSION
s = requests.Session()
s.get('https://www.clos19.com/en-gb/')
#GET CSRFToken
r = requests.get('https://www.clos19.com/en-gb/login')
soup = BeautifulSoup(r.text, 'html.parser')
CSRFToken = soup.find('input', type='hidden')["value"]
print(soup.find('input', type='hidden')["value"])
#ADD TO CART
payload = {
'productCode': '100559',
'qty': '1',
'CSRFToken': CSRFToken
}
r = s.post('https://www.clos19.com/en-gb/ajax/addToCart', data=payload, allow_redirects=False)
print(r.status_code)
print(s.cookies.get_dict())
Terminal output:
{'JSESSIONID': '448858956754C2F8F2DCF1CC4B803833', 'ROUTEID': '.app1', 'mhPreferredCountry': 'GB', 'mhPreferredLanguage': 'en_GB'}
How do I print only the JSESSIONID? Therefore the desired printed value would be 448858956754C2F8F2DCF1CC4B803833

change the last line to
print(s.cookies['JSESSIONID'])

Related

Pagination SendinBlue Api Call

I'm been trying to get data from the SendinBlue API. The problem is the API have a limit of 100 registers per call and my Python loop is not working properly. This is what I have so far, the call works fine.
import requests
import pandas as pd
from pandas import json_normalize
import json
results = []
pagination = 0
url = "https://api.sendinblue.com/v3/smtp/statistics/events"
querystring = {"limit":"100","offset":pagination,"days":"15"}
headers = {
"Accept": "application/json",
"api-key": "XXXXXXX"
}
#respuesta de la API
response = requests.request("GET", url, headers=headers, params=querystring)
#convertir json a diccionario
data = json.loads(response.text)
#convertir diccionario a DataFrame
base = pd.json_normalize(data,record_path='events')
The data structure is like this:
{'events': [
{'email': 'chusperu#gmail.com',
'date': '2020-10-18T17:18:58.000-05:00',
'subject': 'Diego, ¡Gracias por registrarte! đŸ˜‰',
'messageId': '<202010181429.12179607081#smtp-relay.mailin.fr>',
'event': 'opened',
'tag': '',
'from': 'ventas01#grupodymperu.com',
{'email': 'cynthiaapurimac#gmail.com',
'date': '2020-10-18T17:52:56.000-05:00',
'subject': 'Alvarado, ¡Gracias por registrarte! đŸ˜‰',
'messageId': '<202010182252.53640747487#smtp-relay.mailin.fr>',
'event': 'requests',
'tag': '',
'from': 'ventas01#grupodymperu.com'},
....
The loop I have tried is this, but it only paginated the first 200 registers. What I'm doing wrong?
for i in data['events']:
results.append(i)
while response.status_code == 200:
pagination += 100
querystring ['offset'] = pagination
response = requests.request("GET", url, headers=headers, params=querystring)
data = json.loads(response.text)
for i in data['events']:
results.append(i)
else:
break
print(results)
Finally get it.
import requests
import pandas as pd
from pandas import json_normalize
import json
# Excel = "C:/Users/User/PycharmProjects/Primero/DataSendin.xlsx"
pagination = 0
url = "https://api.sendinblue.com/v3/smtp/statistics/events"
querystring = {"limit":"100","offset":f"{pagination}","days":"3"}
headers = {
"Accept": "application/json",
"api-key": "Your API key"
}
response = requests.request("GET", url, headers=headers, params=querystring)
#respuesta de la API
try:
#convertir json a diccionario
results = []
data = json.loads(response.text)
results.append(data)
if not data:
print("no hay data")
else:
while response.status_code == 200:
pagination += 100
querystring ['offset'] = pagination
response = requests.request("GET", url, headers=headers, params=querystring)
data = json.loads(response.text)
results.append(data)
if not data:
break
except ValueError:
"no data"
#convertir diccionario a DataFrame
final = list(filter(None, results))
base = pd.json_normalize(final,record_path='events')
base

scraping data from a web page with python requests

I am trying to scrape domain search page (where you can enter the keyword, and get some random results) and
I found this api url in network tab https://api.leandomainsearch.com/search?query=computer&count=all (for keyword: computer), but I am getting this error
{'error': True, 'message': 'Invalid API Credentials'}
here is the code
import requests
r = requests.get("https://api.leandomainsearch.com/search?query=cmputer&count=all")
print(r.json())
The site needs that you set Authorization and Referer HTTP headers.
For example:
import re
import json
import requests
kw = 'computer'
url = 'https://leandomainsearch.com/search/'
api_url = 'https://api.leandomainsearch.com/search'
api_key = re.search(r'"apiKey":"(.*?)"', requests.get(url, params={'q': kw}).text)[1]
headers = {'Authorization': 'Key ' + api_key, 'Referer': 'https://leandomainsearch.com/search/?q={}'.format(kw)}
data = requests.get(api_url, params={'query': kw, 'count': 'all'}, headers=headers).json()
# uncomment this to print all data:
# print(json.dumps(data, indent=4))
for d in data['domains']:
print(d['name'])
print()
print('Total:', data['_meta']['total_records'])
Prints:
...
blackopscomputer.com
allegiancecomputer.com
northpolecomputer.com
monumentalcomputer.com
fissioncomputer.com
hedgehogcomputer.com
blackwellcomputer.com
reflectionscomputer.com
towerscomputer.com
offgridcomputer.com
redefinecomputer.com
quantumleapcomputer.com
Total: 1727

How do I unpack a Python requests.response object and extract string representations of its data? [duplicate]

x = requests.post(url, data=data)
print x.cookies
I used the requests library to get some cookies from a website, but I can only get the cookies
from the Response, how to get the cookies from the Request? Thanks!
Alternatively, you can use requests.Session and observe cookies before and after a request:
>>> import requests
>>> session = requests.Session()
>>> print(session.cookies.get_dict())
{}
>>> response = session.get('http://google.com')
>>> print(session.cookies.get_dict())
{'PREF': 'ID=5514c728c9215a9a:FF=0:TM=1406958091:LM=1406958091:S=KfAG0U9jYhrB0XNf', 'NID': '67=TVMYiq2wLMNvJi5SiaONeIQVNqxSc2RAwVrCnuYgTQYAHIZAGESHHPL0xsyM9EMpluLDQgaj3db_V37NjvshV-eoQdA8u43M8UwHMqZdL-S2gjho8j0-Fe1XuH5wYr9v'}
If you need the path and thedomain for each cookie, which get_dict() is not exposes, you can parse the cookies manually, for instance:
[
{'name': c.name, 'value': c.value, 'domain': c.domain, 'path': c.path}
for c in session.cookies
]
url = "http://localhost:8070/web/session/authenticate"
data = {}
header = {"Content-Type": "application/json"}
x = requests.post(url, json=data, headers=header)
print(x.cookies.get_dict())

Differences of pythons urllib , urllib2 and requests libary

I have 2 scripts which submits a post request with Ajax parameters.
One script uses the requests libary (script that works).
The other one uses urllib & urllib2.
At the moment I have no idea why the urllib script does not work.
Can anybody help?
Script using Request:
import requests
s = requests.Session()
url = "https://www.shirtinator.de/?cT=search/motives&sq=junggesellenabschied"
data1 = {
'xajax': 'searchBrowse',
'xajaxr': '1455134430801',
'xajaxargs[]': ['1', 'true', 'true', 'motives', '100'],
}
r = s.post(url, data=data1, headers={'X-Requested-With': 'XMLHttpRequest'}, verify=False)
#r = requests.post(url, data=data1, headers={'X-Requested-With': 'XMLHttpRequest'}, verify=False)
#r = requests.post(url, verify=False)
result = r.text
print result
print result.count("motiveImageBox")
Script using urllib:
import urllib2
import urllib
#
url = "https://www.shirtinator.de/?cT=search/motives&sq=junggesellenabschied"
data = ({
'xajax': 'searchBrowse',
'xajaxr': '1455134430801',
'xajaxargs[]': ['1', 'true', 'true', 'motives', '100'],
})
encode_data = urllib.urlencode(data)
print encode_data
req = urllib2.Request(url,encode_data)
response = urllib2.urlopen(req)
d = response.read()
print d
print d.count("motiveImageBox")

Python: how to send html-code in a POST request

I try to send htm-code from python script to Joomla site.
description = "<h1>Header</h1><p>text</p>"
values = {'description' : description.encode(self.encoding),
'id = ' : 5,
}
data = urlencode(values)
binData = data.encode(self.encoding)
headers = { 'User-Agent' : self.userAgent,
'X-Requested-With' : 'XMLHttpRequest'}
req = urllib2.Request(self.addr, binData, headers)
response = urllib2.urlopen(req)
rawreply = response.read()
At Joomla-server I got the same string but without html:
$desc = JRequest::getVar('description', '', 'POST');
What's wrong?
You should use requests
pip install requests
then
import requests
description = "<h1>Header</h1><p>text</p>"
values = dict(description='description',id=5)
response = requests.post(self.addr,data=values)
if response.ok:
print response.json()
or if joomla didnt return json
print response.content
JRequest::getVar or JRequest::getString filter HTML code. But it can be turned off:
$desc = JRequest::getVar('description', '', 'POST', 'string', JREQUEST_ALLOWHTML);

Categories

Resources