I want to access a site with python requests but I get 403 error despite i copy the header of browser and used it. Here is my code,Is there anybody that can solve this problem?
import requests
Url = 'https://bama.ir/'
session = requests.Session()
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:98.0) Gecko/20100101 Firefox/98.0',
'Accept-Language': 'en-US,en;q=0.5',
'Upgrade-Insecure-Requests': '1',
'Sec-Fetch-Dest': 'document',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'none',
'Sec-Fetch-User': '?1',
'Connection': 'keep-alive',
}
session.headers = headers
r = session.get(Url, headers=headers)
Seems like python-requests's getting detected.
You might try using the answer provided here
Related
I am trying to send a request to a website but I am getting a 503 status code. It seems like the website is protected by Cloudflare. Is it possible to send a request to the Cloudflare protected website with the python-requests library? I have sent cookies and headers along with the request but it didn't get through.
Below is my code.
import requests
cookies = {
'SSPV_C': 'BPwAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAA',
'locale': 'en',
'cookieID': '390778282271656143963365',
'uui': '800.606.6969|',
'cartId': '42951851197',
'mapp': '0',
'__cfruid': '7f4badea550ab7327454d1e2bac7cdec7c0701cf-1656593179',
'__cf_bm': 'gn_yevoOR3SIcU9B8oDZQ.r_e9300kA61vY264Hls64-1656593179-0-AaRJibHSFeN0Z7jkQTvjq+HQMc3lRPlhM850slZTqy7uy5QzXhmRx3M6rxlwk78kIU+zC8Vb7eDsPpuhdnNOhAkil4ZdBSaZW4pRvSMX53Xd',
'SSLB_C': '0',
'SSID_C': 'CQD72x0AAAAAAABbwLZiApVDB1vAtmICAAAAAAAAAAAAHJu9YgANyA',
'SSSC_C': '333.G7113084158674703618.2|0.0',
'SSRT_C': 'HJu9YgAAAA',
'lpi': 'cat=2,cur=USD,app=D,lang=E,view=L,lgdin=N,cache=release-WEB-20220629v10-BHJ-DVB31150-11,ipp=24,view=L,sort=BS,priv=N,state=',
'sessionKey': 'f6fbd948-2fed-41f9-bcf3-7defa626f36a',
'dpi': 'cat=2,cur=USD,app=D,lang=E,view=L,lgdin=N,cache=release-WEB-20220629v10-BHJ-DVB31150-11',
'utkn': '97655ab6781ce66340f0d2aa809c3f68',
'build': '20220629v10-20220629v10',
'aperture-be-commit-id': 'n/a',
'JSESSIONID': 'SUq0pea5K0bNUyQEJscyjUnJFvvEGjW7!622981770',
'sshc': '61f5b3f36d4907c548b3efc82cfcecd9',
'_pxhd': '53bIlMthB4XG3X644UXFOgn-jRSXY56BvM49fjHfOdSg53A7NqKSOXYc0jBByweKQ4NgEZR/R61UG9ouHxGSUw==:Kfr40D-EuMhLJ4qxdatLAMna184C2zbBIJV3xlOVy2hTdUEI3sN3kCGBQV73oDxdiOoVZAKilYlJZn--t492StGQHTm21i-GiwB5xxziLd8=',
'cf-colo': 'KHI',
'aperture-be-commit-id': 'n/a',
'TS0188dba5': '01ec39615f5a1331c083e7ac7ff7f2895322c069326ea3e7a0fb426c2906479f8fdba41c2cbebcf1669847d2488313d23495cf506ce0991eb9af796b9032458b1a715a28e71e7a31b64b6791644a6f092364bff1d8e79d027277b851adf5faa365dd8e2609',
'TS01d628c4': '01ec39615f8b9833712bd8ae68ec8c0798bd1df2e408a949a17c3772b8419cc7bbfe911b2b2798bd33f09b9e2fa7d6837ec5814f8ca97bd51f8eccc8779214eac7cd387b8f1f1d5097bca3b926c8d264dd80d59d7e4879197618d3a0ef6777bdb5902263106d9d95ac8fd7d92cd8458f02fb7c1409230f71f6b3a638107bbd8a73aa1629da3456ce69fd32f210cf1826979006e713',
'TopBarCart': '0|0',
'dlc': '%43%4D%5F%4D%4D%43%3D%7C%54%59%50%45%44%56%41%4C%55%45%3D%7C%45%4D%4C%45%3D%7C%55%4E%42%49%3D%6E%75%6C%6C%7C%4C%4F%4E%47%3D%37%30%2E%30%30%30%30%30%7C%4C%41%54%3D%33%30%2E%30%30%30%30%30%7C',
'app_cookie': '1656593927',
'TS01e1f1fd': '01ec39615fa2aeefd67a3c8e74158e94069993ea3308a949a17c3772b8419cc7bbfe911b2b55db8474b97fd606a862d187b6fdf539dfd177a32a93169e75a1c8599fc7428443914075f1081235d9564cc0fc8b69460d7a08aef755b5c296a42cf6b735f4953465ca238a6965b0625b2de8e4934e04',
'forterToken': 'a1ba6a2e88e74edb91df3bcf567bdd45_1656593924573_588_dUAL43-mnts-ants_13ck',
}
headers = {
'authority': 'www.bhphotovideo.com',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'accept-language': 'en-PK,en;q=0.9,ur-PK;q=0.8,ur;q=0.7,en-GB;q=0.6,en-US;q=0.5,sv;q=0.4,it;q=0.3',
'cache-control': 'no-cache',
# Requests sorts cookies= alphabetically
# 'cookie': 'SSPV_C=BPwAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAA; locale=en; cookieID=390778282271656143963365; uui=800.606.6969|; cartId=42951851197; mapp=0; __cfruid=7f4badea550ab7327454d1e2bac7cdec7c0701cf-1656593179; __cf_bm=gn_yevoOR3SIcU9B8oDZQ.r_e9300kA61vY264Hls64-1656593179-0-AaRJibHSFeN0Z7jkQTvjq+HQMc3lRPlhM850slZTqy7uy5QzXhmRx3M6rxlwk78kIU+zC8Vb7eDsPpuhdnNOhAkil4ZdBSaZW4pRvSMX53Xd; SSLB_C=0; SSID_C=CQD72x0AAAAAAABbwLZiApVDB1vAtmICAAAAAAAAAAAAHJu9YgANyA; SSSC_C=333.G7113084158674703618.2|0.0; SSRT_C=HJu9YgAAAA; lpi=cat=2,cur=USD,app=D,lang=E,view=L,lgdin=N,cache=release-WEB-20220629v10-BHJ-DVB31150-11,ipp=24,view=L,sort=BS,priv=N,state=; sessionKey=f6fbd948-2fed-41f9-bcf3-7defa626f36a; dpi=cat=2,cur=USD,app=D,lang=E,view=L,lgdin=N,cache=release-WEB-20220629v10-BHJ-DVB31150-11; utkn=97655ab6781ce66340f0d2aa809c3f68; build=20220629v10-20220629v10; aperture-be-commit-id=n/a; JSESSIONID=SUq0pea5K0bNUyQEJscyjUnJFvvEGjW7!622981770; sshc=61f5b3f36d4907c548b3efc82cfcecd9; _pxhd=53bIlMthB4XG3X644UXFOgn-jRSXY56BvM49fjHfOdSg53A7NqKSOXYc0jBByweKQ4NgEZR/R61UG9ouHxGSUw==:Kfr40D-EuMhLJ4qxdatLAMna184C2zbBIJV3xlOVy2hTdUEI3sN3kCGBQV73oDxdiOoVZAKilYlJZn--t492StGQHTm21i-GiwB5xxziLd8=; cf-colo=KHI; aperture-be-commit-id=n/a; TS0188dba5=01ec39615f5a1331c083e7ac7ff7f2895322c069326ea3e7a0fb426c2906479f8fdba41c2cbebcf1669847d2488313d23495cf506ce0991eb9af796b9032458b1a715a28e71e7a31b64b6791644a6f092364bff1d8e79d027277b851adf5faa365dd8e2609; TS01d628c4=01ec39615f8b9833712bd8ae68ec8c0798bd1df2e408a949a17c3772b8419cc7bbfe911b2b2798bd33f09b9e2fa7d6837ec5814f8ca97bd51f8eccc8779214eac7cd387b8f1f1d5097bca3b926c8d264dd80d59d7e4879197618d3a0ef6777bdb5902263106d9d95ac8fd7d92cd8458f02fb7c1409230f71f6b3a638107bbd8a73aa1629da3456ce69fd32f210cf1826979006e713; TopBarCart=0|0; dlc=%43%4D%5F%4D%4D%43%3D%7C%54%59%50%45%44%56%41%4C%55%45%3D%7C%45%4D%4C%45%3D%7C%55%4E%42%49%3D%6E%75%6C%6C%7C%4C%4F%4E%47%3D%37%30%2E%30%30%30%30%30%7C%4C%41%54%3D%33%30%2E%30%30%30%30%30%7C; app_cookie=1656593927; TS01e1f1fd=01ec39615fa2aeefd67a3c8e74158e94069993ea3308a949a17c3772b8419cc7bbfe911b2b55db8474b97fd606a862d187b6fdf539dfd177a32a93169e75a1c8599fc7428443914075f1081235d9564cc0fc8b69460d7a08aef755b5c296a42cf6b735f4953465ca238a6965b0625b2de8e4934e04; forterToken=a1ba6a2e88e74edb91df3bcf567bdd45_1656593924573_588_dUAL43-mnts-ants_13ck',
'pragma': 'no-cache',
'referer': 'https://www.bhphotovideo.com/c/buy/Notebooks/ci/6782/N/4110474287',
'sec-ch-ua': '".Not/A)Brand";v="99", "Google Chrome";v="103", "Chromium";v="103"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'document',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'same-origin',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36',
}
response = requests.get('https://www.bhphotovideo.com/c/product/1663923-REG/lenovo_82h801ekus_ip3_15itl6_i3_1115g4_8gb.html', cookies=cookies, headers=headers)
print(response)
You can use cloudscraper to get rid of cloudflare protection
from bs4 import BeautifulSoup
import cloudscraper
scraper = cloudscraper.create_scraper(delay=10, browser={'custom': 'ScraperBot/1.0',})
url = 'https://www.bhphotovideo.com/c/product/1663923-REG/lenovo_82h801ekus_ip3_15itl6_i3_1115g4_8gb.html'
req = scraper.get(url)
print(req)
Output:
<Response [200]>
cloudscraper
Im trying to log on into this website (https://phishtank.org/login.php) to use it with python, but the site use cookies. I tried this:
import urllib
cookies = {
'PHPSESSID': '3hdp8jeu933e8t4hvh240i8rp840p06j',
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:91.0) Gecko/20100101 Firefox/91.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3',
'Referer': 'https://phishtank.org/login_required.php',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'Sec-Fetch-Dest': 'document',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-User': '?1',
'Cache-Control': 'max-age=0',
'TE': 'trailers',
}
response = requests.get('https://phishtank.org/add_web_phish.php', headers=headers, cookies=cookies)
print(response.text)
It works, but after a few minutes the cookie just expires. What can I do to avoid this limitation? Maybe somethin that request new cookies for me and use it.
Use request sessions instead. It persists cookies for you
import requests
session = requests.Session()
session.headers.update(headers)
session.cookies.update(cookies)
session.get(<url>)
I'm trying to get data from etoro. This link works in my browser https://www.etoro.com/sapi/userstats/CopySim/Username/viveredidividend/OneYearAgo but it's forbidden via request.get() even if I add user agent, headers and even cookies.
import requests
url = "https://www.etoro.com/sapi/userstats/CopySim/Username/viveredidividend/OneYearAgo"
headers = {
'Host': 'www.etoro.com',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:89.0) Gecko/20100101 Firefox/89.0',
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br',
'Connection': 'keep-alive',
'Referer': 'https://www.etoro.com/people/viveredidividend/chart',
'Cookie': 'XXX',
'TE': 'Trailers'
}
requests.get(url, headers=headers)
>>> <Response [403]>
How to solve it without selenium?
This error gives when you doesn't authenticate the python code in browser. When you login with website it is authenticate and its remember it, thats why you can use and works fine in browser by site.
In order to solve this problem you first need to authenticate the browser in your python code.
To authenticate,
import requests
response = requests.get(url, auth=(username, password))
The error 403 tells that the request you are making is getting blocked. Actually, the website is protected by cloudflare which is preventing the website to get scraped. You can check it by executing print(response.text) in your code and you'll see Access denied | www.etoro.com used Cloudflare to restrict access in the returned cloudflare HTML inside title tag.
Under the hood, when you sent the requests it goes through the cloudflare server and verify whether it's coming from the real browser or not. If the request pass the verification then only it forward the request to website server which returns the valid response. Otherwise, the cloudflare block the request.
It's difficult to bypass cloudflare. Nevertheless, you can try your luck with the code given below.
Code
import urllib.request
url = 'https://www.etoro.com/sapi/userstats/CopySim/Username/viveredidividend/OneYearAgo'
headers = {
'authority': 'www.etoro.com',
'pragma': 'no-cache',
'cache-control': 'no-cache',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="91", "Chromium";v="91"',
'accept': 'application/json, text/plain, */*',
'accounttype': 'Real',
'applicationidentifier': 'ReToro',
'sec-ch-ua-mobile': '?0',
'applicationversion': '331.0.2',
'user-agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:89.0) Gecko/20100101 Firefox/89.0',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://www.etoro.com/discover/markets/cryptocurrencies',
'accept-language': 'en-US,en;q=0.9',
'cookie': '__cfruid=e7f40231e2946a1a645f6fa0eb19af969527087e-1624781498; _gcl_au=1.1.279416294.1624782732; _gid=GA1.2.518227313.1624782732; _scid=64860a19-28e4-4e83-9f65-252b26c70796; _fbp=fb.1.1624782732733.795190273; __adal_ca=so%3Ddirect%26me%3Dnone%26ca%3Ddirect%26co%3D%28not%2520set%29%26ke%3D%28not%2520set%29; __adal_cw=1624782733150; _sctr=1|1624732200000; _gaexp=GAX1.2.eSuc0QBTRhKbpaD4vT_-oA.18880.x331; _hjTLDTest=1; _hjid=bb69919f-e61b-4a94-a03b-db7b1f4ec4e4; hp_preferences=%7B%22locale%22%3A%22en-gb%22%7D; funnelFromId=38; eToroLocale=en-gb; G_ENABLED_IDPS=google; marketing_visitor_regulation_id=10; marketing_visitor_country=96; __cflb=0KaS4BfEHptJdJv5nwPFxhdSsqV6GxaSK8BuVNBmVkuj6hYxsLDisSwNTSmCwpbFxkL3LDuPyToV1fUsaeNLoSNtWLVGmBErMgEeYAyzW4uVUEoJHMzTirQMGVAqNKRnL; __cf_bm=6ef9d6f250ee71d99f439672839b52ac168f7c89-1624785170-1800-ASu4E7yXfb+ci0NsW8VuCgeJiCE72Jm9uD7KkGJdy1XyNwmPvvg388mcSP+hTCYUJvtdLyY2Vl/ekoQMAkXDATn0gyFR0LbMLl0b7sCd1Fz/Uwb3TlvfpswY1pv2NvCdqJBy5sYzSznxEsZkLznM+IGjMbvSzQffBIg6k3LDbNGPjWwv7jWq/EbDd++xriLziA==; _uetsid=2ba841e0d72211eb9b5cc3bdcf56041f; _uetvid=2babee20d72211eb97efddb582c3c625; _ga=GA1.2.1277719802.1624782732; _gat_UA-2056847-65=1; __adal_ses=*; __adal_id=47f4f887-c22b-4ce0-8298-37d6a0630bdd.1624782733.2.1624785174.1624782818.770dd6b7-1517-45c9-9554-fc8d210f1d7a; _gat=1; TS01047baf=01d53e5818a8d6dc983e2c3d0e6ada224b4742910600ba921ea33920c60ab80b88c8c57ec50101b4aeeb020479ccfac6c3c567431f; outbrain_cid_fetch=true; _ga_B0NS054E7V=GS1.1.1624785164.2.1.1624785189.35; TMIS2=9a74f8b353780f2fbe59d8dc1d9cd901437be0b823f8ee60d0ab36264e2503993c5e999eaf455068baf761d067e3a4cf92d9327aaa1db627113c6c3ae3b39cd5e8ea5ce755fb8858d673749c5c919fe250d6297ac50c5b7f738927b62732627c5171a8d3a86cdc883c43ce0e24df35f8fe9b6f60a5c9148f0a762e765c11d99d; mp_dbbd7bd9566da85f012f7ca5d8c6c944_mixpanel=%7B%22distinct_id%22%3A%20%2217a4c99388faa1-0317c936b045a4-34647600-13c680-17a4c993890d70%22%2C%22%24device_id%22%3A%20%2217a4c99388faa1-0317c936b045a4-34647600-13c680-17a4c993890d70%22%2C%22%24initial_referrer%22%3A%20%22%24direct%22%2C%22%24initial_referring_domain%22%3A%20%22%24direct%22%7D',
}
request = urllib.request.Request(url, headers=headers)
response = urllib.request.urlopen(request).read()
print(response.decode('utf-8'))
I have an issue with the particular website https://damas.terna.it/DMSPCAS08.
I am trying to either scrape the data or to fetch the excel file that it is included.
I tried to fetch the excel file with a post request.
import requests
from bs4 import BeautifulSoup
import json
import datetime
url = 'https://damas.terna.it/api/Ntc/GetNtc'
headers = {
'Host': 'damas.terna.it',
'Connection': 'keep-alive',
'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="90", "Google Chrome";v="90"',
'sec-ch-ua-mobile': '?0',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-User': '?1',
'Sec-Fetch-Dest': 'document',
'Referer': 'https://damas.terna.it/DMSPCAS08',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9',
'Cookie': '__RequestVerificationToken=5mfiSM2dKte532I8fd3MRdn6nnHbSezkQX29r3fyF2tMegXsvnOInpxy8JvFuDyRVS6pZs03y-NL3CsNsItN1yboc128Kk51yEiuUU0mel41; pers_damas_2019=378972352.20480.0000; rxVisitor=1619766836714T8SRPFLUAH62F1R9K6KG3EKK104BSDFE; dtCookie=7$EC34ED7BFB4A503D379D8D8C69242174|846cd19ce7947380|1; rxvt=1619774462088|1619771198468; dtPC=7$369396404_351h-vIDMPHTCURIGKVKBWVVEWOAMRMKDNWCUH-0e1; DamasNetCookie=F-evmYb7kIS_YTMr2mwuClMB1zazemmhl9vzSXynWeuCII_keDb_jQr4VLSYif9t3juDS6LkOuIXKFfe8pydxSzHPfZzGveNB6xryj2Czp9J1qeWFFT9dYFlRXFWAHuaEIyUQQDJmzWfDBrFCWr309mZoE6hkCKzDtoJgIoor9bed1kQgcdeymAH9lrtrKxwsheaQm2qA-vWWqKjCiruO1VkJ6II_bcrAXU2A_ZPQPznE1_8AEC_AwXmBXETubMQwFAnDXsOLDfEYeQ61TGAupF3d-wz3aZfRs5eCA3kw-en-kpEbS0trwFBQzB-098610GIbIPki9ThVitZ2LN2zza6nn1A8qchqfQC_CZEgu6Jt1glfhHceWS6tvWCuyOEqo2jJpxAajMYXPB6mzlHzX13TiV-jgeFSPehugMAgms_exqocw9w27e4lI5laYZu0rkKkznpZ1mJLOhORwny8-bKa3nRUt7erFv7ul3nLLrgd3FP907tHpTh-qXt1Bmr6OqknDZr_EBN8GY_B2YHV-8hC0AjdqQqpS0xOpp7z_CzzgByTOHSNdeKjVgQfZLQ7amnp71lhxgPeJZvOIl_mIWOr_gWRy_iK6UuzrA3udCTV7bAnUXKB8gX89d9ShQf5tZDxPFchrAQBtdmDChQOA; dtLatC=2; dtSa=true%7CC%7C-1%7CExport%20to%20.xls%7C-%7C1619772685174%7C369396404_351%7Chttps%3A%2F%2Fdamas.terna.it%2FDMSPCAS08%7CTerna%20%5Ep%20NTC%7C1619772662568%7C%7C'
}
parameters = {
'busDay': "2021-05-01",
'busDayTill': "2021-05-01",
'borderDirId': '1',
'borderDirName': "TERNA-APG"
}
response = requests.post(url, data=parameters, headers=headers)
soup = BeautifulSoup(response.text, "html.parser")
print(soup.prettify())
I am receiving this error:
The parameters dictionary contains an invalid entry for parameter 'parameters' for method 'System.Web.Mvc.ActionResult GetNtc(Damas.Core.Data.DataSource.Data.ParametersModel)' in 'Terna.Web.Controllers.CapacityManagement.NtcController'. The dictionary contains a value of type 'System.Collections.Generic.Dictionary`2[System.String,System.Object]', but the parameter requires a value of type 'Damas.Core.Data.DataSource.Data.ParametersModel'.
Parameter name: parameters
Please don't post the answer to your question in the question's body; instead, post it in the answer box:
response = requests.post(url, data=json.dumps(parameters), headers=headers) seems to solve the issue.
I'm trying to get into papara.com using Python. When I make a request it always gives 403 as a response. I got cookies from my browser. Here is my code:
import requests
headers = {
'authority': 'www.papara.com',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'none',
'sec-fetch-mode': 'navigate',
'sec-fetch-user': '?1',
'sec-fetch-dest': 'document',
'accept-language': 'en-US,en;q=0.9',
'cookie': '__cfruid=64370d0d06d80a1e1a701ae8bee5a4b85c1de1af-1610296629',
}
response = requests.get('https://www.papara.com/', headers=headers)
I tried different user agents, I tried removing the cookie from the headers but didn't work.