Script gets stuck while sending post requests with parameters - python

I'm trying to populate json response issuing a post http requests with appropriate parameters from a webpage. When I run the script, I see that the script gets stuck and doesn't bring any result. It doesn't throw any error either. This is the site link. I chose three options from the three dropdowns from this form in that site before hitting Get times & tickets button.
I've tried with:
import requests
from bs4 import BeautifulSoup
url = 'https://www.thetrainline.com/'
link = 'https://www.thetrainline.com/api/journey-search/'
payload = {"passengers":[{"dateOfBirth":"1991-01-31"}],"isEurope":False,"cards":[],"transitDefinitions":[{"direction":"outward","origin":"1f06fc66ccd7ea92ae4b0a550e4ddfd1","destination":"7c25e933fd14386745a7f49423969308","journeyDate":{"type":"departAfter","time":"2021-02-11T22:45:00"}}],"type":"single","maximumJourneys":4,"includeRealtime":True,"applyFareDiscounts":True}
with requests.Session() as s:
s.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36'
s.headers['content-type'] = 'application/json'
s.headers['accept'] = 'application/json'
r = s.post(link,json=payload)
print(r.status_code)
print(r.json())
How can I get json response issuing post requests with parameters from that site?

You are missing the required headers: x-version and referer. The referer header is referring to the search form and you can build it. Before journey-search you have to post an availability request.
import requests
from requests.models import PreparedRequest
headers = {
'authority': 'www.thetrainline.com',
'pragma': 'no-cache',
'cache-control': 'no-cache',
'x-version': '2.0.18186',
'dnt': '1',
'accept-language': 'en-GB',
'sec-ch-ua-mobile': '?0',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/88.0.4324.96 Safari/537.36',
'content-type': 'application/json',
'accept': 'application/json',
'origin': 'https://www.thetrainline.com',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
}
with requests.Session() as s:
origin = "6e2242b3f38bbbd8d8124e1d84d319e1"
destination = "15bcf02bc44ea754837c8cf14569f608"
localDateTime = "2021-02-03T19:30:00"
dateOfBirth = "1991-02-03"
passenger_type = "single"
req = PreparedRequest()
url = "http://www.neo4j.com"
params = {
"origin": origin,
"destination": destination,
"outwardDate": localDateTime,
"outwardDateType": "departAfter",
"journeySearchType": passenger_type,
"passengers[]": dateOfBirth
}
req.prepare_url("https://www.thetrainline.com/book/results", params)
headers.update({"referer": req.url})
s.headers = headers
payload_availability = {
"origin": origin,
"destination": destination,
"outwardDefinition": {
"localDateTime": localDateTime,
"searchMethod": "DEPARTAFTER"
},
"passengerBirthDates": [{
"id": "PASSENGER-0",
"dateOfBirth": dateOfBirth
}],
"maximumNumberOfJourneys": 4,
"discountCards": []
}
r = s.post('https://www.thetrainline.com/api/coaches/availability', json=payload_availability)
r.raise_for_status()
payload_search = {
"passengers": [{"dateOfBirth": "1991-02-03"}],
"isEurope": False,
"cards": [],
"transitDefinitions": [{
"direction": "outward",
"origin": origin,
"destination": destination,
"journeyDate": {
"type": "departAfter",
"time": localDateTime}
}],
"type": passenger_type,
"maximumJourneys": 4,
"includeRealtime": True,
"applyFareDiscounts": True
}
r = s.post('https://www.thetrainline.com/api/journey-search/', json=payload_search)
r.raise_for_status()
print(r.json())

As Sers's reply, headers are missing.
When scrawling websites, you have to keep in mind anti-scrawling mechanism. The website will block your requests by taking into consideration your IP address, request headers, cookies, and various other factors.

Related

How can I add threads to this python code to make multiple request

i am creating a custom tool for login bruteforce on web application for bug bounty hunting so i came to a bug on one web application which i had to create my own tool to bruteforce this is not a complete tool but i need solution for the current code for adding threads
import requests
import re
exploit = open('password.txt', 'r').readlines()
headers = {
'Host': 'TARGET.COM',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 Firefox/91.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'close',
'Upgrade-Insecure-Requests': '1',
'Sec-Fetch-Dest': 'iframe',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-User': '?1'
}
for line in exploit:
params = {
'execution': '111u9342',
'client_id': 'client-23429df',
'tab_id': '234324',
}
password = line.strip()
http = requests.post('https://www.target.com/test',
params=params,
headers=headers,
data={'username':myname,'password':password},
verify=False,
proxies=proxies)
content = http.content
print("finished")
I am beginner in python
You can use it ThreadPoolExecuter;
from concurrent.futures import ThreadPoolExecutor
import requests
# ....Other code parts...
def base_post(url, header, data, proxies, timeout=10):
response = requests.post(url, headers=header, data=data, proxies=proxies, timeout=timeout)
return response
total_possibilities = []
exploit = []
for line in exploit:
params = {
'execution': '111u9342',
'client_id': 'client-23429df',
'tab_id': '234324',
}
password = line.strip()
total_possibilities.append({'url': "...",
"params": params,
"headers": headers,
"data": {'username': myname, 'password': password},
"verify": False,
"proxies": proxies
"content": http.content})
results = []
with ThreadPoolExecutor(max_workers=3) as executor:
for row in total_possibilities:
results.append(executor.submit(base_post, **row))
print(results)
Don't forget to update "max_workers" based on your needs.

Supreme adding to cart with Python Requests

So I'm currently trying to cart an item on supreme through requests. After posting the carting request I don't get any errors but just that it didn't work as a response.
#imports
import requests
from bs4 import BeautifulSoup
#constants
baseurl = "https://www.supremenewyork.com/"
product_category = ""
size = ["Medium"]
product_keywords = ["Supreme®/The North Face® Steep Tech Fleece Pant"]
product_style = ["Brown"]
#functions
def carting(url):
session = requests.Session()
r = session.get(baseurl+url)
soup = BeautifulSoup(r.text, "html.parser")
name = soup.find("h1", {"itemprop" : "name"}).text
style = soup.find("p", {"itemprop" : "model"}).text
for keyword in product_keywords:
if keyword in name:
for keyword in product_style:
if keyword in style:
print("Product Found! Adding to cart...")
form = soup.find("form", {"id" : "cart-add"})
payload = {
"utf8" : "✓",
"authenticity_token" : form.find("input", {"name" : "authenticity_token"})["value"],
"style" : form.find("input", {"name" : "style"})["value"],
"size" : "92001", #form.find("select", {"id" : "size"})["value"] Need to rework getting Size through Keyword
"qty" : "1"
}
headers = {
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36",
"origin" : "https://www.supremenewyork.com",
"referer" : baseurl + url,
"path" : form["action"],
'Host': 'www.supremenewyork.com',
'Accept': 'application/json',
'Proxy-Connection': 'keep-alive',
'X-Requested-With': 'XMLHttpRequest',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-us',
'Content-Type': 'application/x-www-form-urlencoded',
'Origin': 'http://www.supremenewyork.com',
'Connection': 'keep-alive',
}
response = session.post(baseurl+form["action"], data=payload)
print(response.text)
return(session)
carting("/shop/pants/mj1czv0pa/jcyp91a8w")
The answer I get printed is
Product Found! Adding to cart...
{"cart":[],"success":false}
I wondered if I just have to gamble on which of all the headers I have to include so it works since maybe the site wants to have a certain or maybe all headers included that would be sent manually.
Help appreciated!

Zillow scraper: Why I can't scrape full listing from Zillow search

I am trying to explore zillow housing data for analysis. but I found the data I scraped from Zillow would be much less then listing.
there is one exmaple:
I try to pull house-for-sale listing on 35216:
https://www.zillow.com/birmingham-al-35216/?searchQueryState=%7B%22usersSearchTerm%22%3A%2235216%22%2C%22mapBounds%22%3A%7B%22west%22%3A-86.93997505787829%2C%22east%22%3A-86.62926796559313%2C%22south%22%3A33.33562772711966%2C%22north%22%3A33.51819716059094%7D%2C%22regionSelection%22%3A%5B%7B%22regionId%22%3A73386%2C%22regionType%22%3A7%7D%5D%2C%22isMapVisible%22%3Atrue%2C%22filterState%22%3A%7B%22ah%22%3A%7B%22value%22%3Atrue%7D%2C%22sort%22%3A%7B%22value%22%3A%22globalrelevanceex%22%7D%7D%2C%22isListVisible%22%3Atrue%2C%22mapZoom%22%3A13%2C%22pagination%22%3A%7B%7D%7D
we can see there are 76 records. and if I use google chrome extension: Zillow-to-excel , all 76 houses in listing can be scraped.
https://chrome.google.com/webstore/detail/zillow-to-excel/aecdekdgjlncaadbdiciepplaobhcjgi/related
But when I use Python with request to scrape zillow data, only 18-20 records could be scraped.
here is my code:
import requests
import json
from bs4 import BeautifulSoup as soup
import pandas as pd
import numpy as np
cnt=0
stop_check=0
ele=[]
url='https://www.zillow.com/birmingham-al-35216/'
headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7,zh-TW;q=0.6',
'upgrade-insecure-requests': '1',
'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}
for i in range(1,2):
params = {
'searchQueryState':'{"pagination":{"currentPage":'+str(i)+'},"usersSearchTerm":"35216","mapBounds":{"west":-86.83314614582643,"east":-86.73781685417354,"south":33.32843303639682,"north":33.511017584543204},"regionSelection":[{"regionId":73386,"regionType":7}],"isMapVisible":true,"filterState":{"sort":{"value":"globalrelevanceex"},"ah":{"value":true}},"isListVisible":true,"mapZoom":13}'
}
page=requests.get(url, headers=headers,params=params,timeout=2)
sp=soup(page.content, 'lxml')
lst=sp.find_all('address',{'class':'list-card-addr'})
ele.extend(lst)
print(i, len(lst))
if len(lst)==0:
stop_check+=1
if stop_check>=3:
print('stop on three empty')
Headers and params comes from web using chrome develop tool. I also tried other search and found I only can scrape first 9-11 records on each pages.
I know there is a zillow API but it could be used for a general search like all houses in a zipcode. So I want to try web-scraping.
May I have some suggestions how to fix my code?
Thanks a lot!
You can try that
import requests
import json
url = 'https://www.zillow.com/search/GetSearchPageState.htm'
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9',
'upgrade-insecure-requests': '1',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36'
}
houses = []
for page in range(1, 3):
params = {
"searchQueryState": json.dumps({
"pagination": {"currentPage": page},
"usersSearchTerm": "35216",
"mapBounds": {
"west": -86.97413567189196,
"east": -86.57244804982165,
"south": 33.346263857015515,
"north": 33.48754107532057
},
"mapZoom": 12,
"regionSelection": [
{
"regionId": 73386, "regionType": 7
}
],
"isMapVisible": True,
"filterState": {
"isAllHomes": {
"value": True
},
"sortSelection": {
"value": "globalrelevanceex"
}
},
"isListVisible": True
}),
"wants": json.dumps(
{
"cat1": ["listResults", "mapResults"],
"cat2": ["total"]
}
),
"requestId": 3
}
# send request
page = requests.get(url, headers=headers, params=params)
# get json data
json_data = page.json()
# loop via data
for house in json_data['cat1']['searchResults']['listResults']:
houses.append(house)
# show data
print('Total houses - {}'.format(len(houses)))
# show info in houses
for house in houses:
if 'brokerName' in house.keys():
print('{}: {}'.format(house['brokerName'], house['price']))
else:
print('No broker: {}'.format(house['price']))
Total houses - 76
RealtySouth-MB-Crestline: $424,900
eXp Realty, LLC Central: $259,900
ARC Realty Mountain Brook: $849,000
Ray & Poynor Properties: $499,900
Hinge Realty: $1,550,000
...
P.S. do not forget to mark answer as correct if I help you :)

Python requests POST method unexpected content in response

I am tring to get the EPG data at the web page https://www.meo.pt/tv/canais-programacao/guia-tv using Python requests. I use this module a lot, but mainly the GET method. This request however is using POST. Everytime you scroll down the page, a request is sent to the API below using these params to load additional program data to the page:
import requests
#post request
url = 'https://www.meo.pt/_layouts/15/Ptsi.Isites.GridTv/GridTvMng.asmx/getProgramsFromChannels'
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-GB,en-US;q=0.9,en;q=0.8',
'Connection': 'keep-alive',
'Content-Length': '214',
'Content-type': 'application/json; charset=UTF-8',
'Host': 'www.meo.pt',
'Origin': 'https://www.meo.pt',
'Referer': 'https://www.meo.pt/tv/canais-programacao/guia-tv',
'sec-ch-ua': '"Google Chrome";v="89", "Chromium";v="89", ";Not A Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36',
'X-KL-Ajax-Request': 'Ajax_Request'
}
data = {"service":"channelsguide",
"channels":["LVTV","TOROTV","CAÇAP","CAÇAV","RTPACRS","CLUBB","MCM T","TRACE","24KITC","E!"],
"dateStart":"2021-04-20T23:00:00.000Z",
"dateEnd":"2021-04-21T23:00:00.000Z",
"accountID":""}
r = requests.post(url=url, headers=headers, data=data)
print(r.text)
I have tried this request, both with and without the headers used, as I don't know if they are needed for a POST request. However, both these options don't return what i was expecting, which was a JSON object containing the program data for these channels.
What am I doing wrong?
Consider using json argument instead of data in request function. The json argument parses your body to JSON format while data you are sending a raw dictionary.
data = {"service":"channelsguide",
"channels":["LVTV","TOROTV","CAÇAP","CAÇAV","RTPACRS","CLUBB","MCM T","TRACE","24KITC","E!"],
"dateStart":"2021-04-20T23:00:00.000Z",
"dateEnd":"2021-04-21T23:00:00.000Z",
"accountID":""}
r = requests.post(url=url, headers=headers, json=data)
If you want to keep using data argument you should parse data dictionary to JSON to send the correct body format.
You can use this example how to POST json data to the API Url:
import json
import requests
url = "https://www.meo.pt/_layouts/15/Ptsi.Isites.GridTv/GridTvMng.asmx/getProgramsFromChannels"
payload = {
"accountID": "",
"channels": [
"SCPHD",
"EURHD",
"EURS2HD",
"DISNY",
"CART",
"BIGGS",
"SICK",
"NICKELO",
"DISNYJ",
"PANDA",
],
"dateEnd": "2021-04-21T22:00:00.000Z",
"dateStart": "2021-04-20T22:00:00.000Z",
"service": "channelsguide",
}
data = requests.post(url, json=payload).json()
# pretty print the data:
print(json.dumps(data, indent=4))
Prints:
{
"d": {
"__type": "Ptsi.Isites.GridTv.CanaisService.GridTV",
"ExtensionData": {},
"services": [],
"channels": [
{
"__type": "Ptsi.Isites.GridTv.CanaisService.Channels",
"ExtensionData": {},
"id": 36,
"name": "SPORTING TV HD",
"sigla": "SCPHD",
"friendlyUrlName": "Sporting_TV_HD",
"url": "https://meogo.meo.pt/direto?canalUrl=Sporting_TV_HD",
"meogo": true,
"logo": "https://www.meo.pt/PublishingImages/canais/sporting-tv-hd.png",
"isAdult": false,
"categories": [
{
"ExtensionData": {},
"id": 2,
"name": "Desporto"
}
],
...
If you want to keep using data argument you should parse data
dictionary to JSON to send the correct body format.
And you should set headers as:
headers = {
'Content-type': 'application/json'
}
complete code is:
import json
import requests
url = "https://www.meo.pt/_layouts/15/Ptsi.Isites.GridTv/GridTvMng.asmx/getProgramsFromChannels"
headers = {
'Content-type': 'application/json'
}
payload = {
"accountID": "",
"channels": [
"SCPHD",
"EURHD",
"EURS2HD",
"DISNY",
"CART",
"BIGGS",
"SICK",
"NICKELO",
"DISNYJ",
"PANDA",
],
"dateEnd": "2021-04-21T22:00:00.000Z",
"dateStart": "2021-04-20T22:00:00.000Z",
"service": "channelsguide",
}
resp = requests.post(url,headers=headers,data=json.dumps(payload))
print(resp.text)

Python Requests AJAX Response Different from Browser Due to Cookie Handling

My request:
# python 3.7.3
import requests
from requests import Session
session = Session()
session.head('https://www.basspro.com/shop/en/blazer-brass-handgun-ammo')
cookies = requests.utils.cookiejar_from_dict(requests.utils.dict_from_cookiejar(session.cookies))
response = session.post(
url='https://www.basspro.com/shop/BPSGetInventoryStatusByIDView',
data={
'productId': '3074457345616736172',
'itemId': '3074457345616736949',
'isGunFlag': 'false',
},
cookies=cookies,
headers={
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'content-length': '72',
'content-type': 'application/x-www-form-urlencoded',
'origin': 'https://www.basspro.com',
'referer': 'https://www.basspro.com/shop/en/blazer-brass-handgun-ammo',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.92 Safari/537.36 Vivaldi/2.9.1705.38',
'x-requested-with': 'XMLHttpRequest',
},
)
print(response.text)
Output:
<input type="hidden" class="relativeToAbsolute" value="true" />
/*
{
"onlineInventory": {
"status": "Status Not Available",
"image": "widget_product_info/outofstock_icon.svg",
"altText": "Status Not Available",
"isDropShip": false,
"availableDate":""
},
"inStoreInventory": {
"stores": [],
"checkStoreText": "Check Store Availability",
"isInStoreInventory": true,
"isPickupInventory": false
}
}
*/
My output when inspecting and running the same AJAX request via browser:
/*
{
"onlineInventory": {
"status": "Backordered",
"image": "widget_product_info/backordered_icon.svg",
"altText": "Backordered",
"isDropShip": false,
"quantity": 0,
"availableDate":"May 1-8"
},
"inStoreInventory": {
"stores": [{
id: '715839555',
name: '83',
gunRestricted: 'false',
dsName: 'TX - Round Rock',
status: 'Unavailable',
statusText: 'Out of Stock',
image: 'widget_product_info/outofstock_icon.svg',
altText: 'Out of Stock',
availableDate: '',
availableQuantity: '',
availableQuantityDisplay: 'false',
cityState: 'Round Rock, TX',
ISPavailableDate: '',
ISPavailableQuantity: '',
pickupTime: 'by 2:00pm',
offerISPOnBPS: 'Yes',
offerISPOnCAB: 'No'}],
"checkStoreText": "Change Store",
"isInStoreInventory": true,
"isPickupInventory": true
}
}
*/
I tried assigning cookies this way as well:
url = "https://www.basspro.com/shop/en/blazer-brass-handgun-ammo"
r = requests.get(url)
cookies = r.cookies
# fails to pass the right cookie
If I instead copy the cookie verbatim from an inspected GET request at https://www.basspro.com/shop/en/blazer-brass-handgun-ammo and put that into the POST headers, it works. How do I get cookies to work properly programatically?
EDIT:
Here's my attempt at just using Session() for cookies:
# python 3.7.3
import requests
from requests import Session
session = Session()
session.get("https://www.basspro.com/shop/en/blazer-brass-handgun-ammo")
# session.head('https://www.basspro.com/shop/en/blazer-brass-handgun-ammo')
response = session.post(
url='https://www.basspro.com/shop/BPSGetInventoryStatusByIDView',
data={
'productId': '3074457345616736172',
'itemId': '3074457345616736949',
'isGunFlag': 'false',
},
headers={
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'content-length': '72',
'content-type': 'application/x-www-form-urlencoded',
'origin': 'https://www.basspro.com',
'referer': 'https://www.basspro.com/shop/en/blazer-brass-handgun-ammo',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.92 Safari/537.36 Vivaldi/2.9.1705.38',
'x-requested-with': 'XMLHttpRequest',
},
)
print(response.text)
I get the same result as before ("status": "Status Not Available", etc.)
Here's my attempt at the second solution:
# python 3.7.3
import requests
from requests import Session
url = "https://www.basspro.com/shop/en/blazer-brass-handgun-ammo"
r = requests.get(url)
cookies = r.cookies # the type is RequestsCookieJar
response = requests.post(
url='https://www.basspro.com/shop/BPSGetInventoryStatusByIDView',
data={
'productId': '3074457345616736172',
'itemId': '3074457345616736949',
'isGunFlag': 'false',
},
cookies=cookies,
headers={
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'content-length': '72',
'content-type': 'application/x-www-form-urlencoded',
'origin': 'https://www.basspro.com',
'referer': 'https://www.basspro.com/shop/en/blazer-brass-handgun-ammo',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.92 Safari/537.36 Vivaldi/2.9.1705.38',
'x-requested-with': 'XMLHttpRequest',
},
)
print(response.text)
Again, I get the same result as before. What am I doing wrong?
can you try like this
session = Session()
session.get("https://www.basspro.com/shop/en/blazer-brass-handgun-ammo")
Then all the following calls with
session.xxx
donot use cookies parameter in it
another way I have tested,
cookies = r.cookies # the type is RequestsCookieJar
requests.post(.... cookies=cookies...)
at last ,I tested this works:
Please compare carefully
from requests import Session
session = Session()
agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36'
r1 = session.get("https://www.basspro.com/shop/en/blazer-brass-handgun-ammo",headers={'user-agent': agent})
response = session.post(
url='https://www.basspro.com/shop/BPSGetOnlineInventoryStatusByIDView',
data={
'productId': '3074457345616736172',
'catalogId': '3074457345616676768',
'storeId': '715838534',
'langId':-1
},
headers={
'user-agent': agent,
'x-requested-with': 'XMLHttpRequest',
},
cookies=r1.cookies
)
print(response.text)

Categories

Resources