Send POST request using API python - python

I am executing below post request and getting error "Premature end of file"
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
import os, unittest, time, re, random, datetime
import requests
class Sauce(unittest.TestCase):
def test_create_contract(self):
payload = {'PO_ID': '3557698',' MAN': 'HQF01'}
r = requests.post(
"https://time.tps.com/xml/Msg.jsp?msg=MSG_RAM_INFO&user=Dist1&pass=ome1")
print r
print r.text
if __name__ == "__main__":
unittest.main()
Where as when I tried with soap client using XML file its working fine
My XML file is
<SO>
<PO_ID>3557698</PO_ID>
<MAN>HQF01</MAN>
</SO>
I need to send a post request and need to get response.

r=requests.post("https://time.tps.com/xml/Msg.jsp?msg=MSG_RAM_INFO&user=Dist1&pass=ome1")
But you're really not sending the post data. You should add data = payload to that post request.
url = "https://time.tps.com/xml/Msg.jsp?msg=MSG_RAM_INFO&user=Dist1&pass=ome1"
payload={'PO_ID': '3557698','MAN': 'HQF01'}
r=requests.post(url,data = payload)
EDIT :
Try this
payload = "<SO><PO_ID>3557698</PO_ID><MAN>HQF01</MAN></SO>"
headers = { 'Content-Type': 'application/xml' }
r=requests.post(url, data = payload, headers = headers)

Related

requests.exceptions.MissingSchema: Invalid URL Python API Get request

I am trying to pull the data using get request through the below URL and I'm getting below error.
I'm not understanding what is the issue with the URL.
Any help would be appreciated
import requests
from requests.auth import HTTPBasicAuth
import json
import urllib.parse
url = """https://msi.abc.com/admin/ui/feedbackCSV/reports
/5d14de32309baf0001501fb7 ?reports[]=pageviews&reports[]=
searches&from=Oct 1 2020&to=Oct 2 2020"""
encode_url = urllib.parse.quote(url,encoding='utf-8')
response = requests.get(encode_url,auth = HTTPBasicAuth('admin#abc.com', 'Summer2020'))
print(response.content)
Error
raise MissingSchema(error)
requests.exceptions.MissingSchema: Invalid URL 'https%3A%2F%2Fmsi.abc.com%2Fadmin%2Fui%2FfeedbackCSV%2Freports%0D%0A%2F5d14de32309baf0001501fb7%20%3Freports%5B%5D%3Dpageviews%26reports%5B%5D%3D%0D%0Asearches%26from%3DOct%201%202020%26to%3DOct%202%202020': No schema supplied. Perhaps you meant http:https%3A%2F%2Fmsi.abc.com%2Fadmin%2Fui%2FfeedbackCSV%2Freports%0D%0A%2F5d14de32309baf0001501fb7%20%3Freports%5B%5D%3Dpageviews%26reports%5B%5D%3D%0D%0Asearches%26from%3DOct%201%202020%26to%3DOct%202%202020?
This might be due to that you pass the whole URL to the urllib.parse.quote function. Try passing the params only to the urllib.parse.quote or use requests params like in the below example:
import requests
from requests.auth import HTTPBasicAuth
import json
import urllib.parse
url = "https://msi.abc.com/admin/ui/feedbackCSV/reports/5d14de32309baf0001501fb7"
payload = {'reports[]':'pageviews', 'reports[]':'searches','from':'Oct 1 2020', 'to':'Oct 2 2020' }
authParams = HTTPBasicAuth('admin#abc.com', 'Summer2020')
response = requests.get(url,params=payload, auth =authParams )
print(response.content)

Bad Request with multiple URLs using BeautifulSoup

I am trying to scrape 3 products on a website. I have attached all 3 into a .txt file.
When running, for some reason I get a "Bad Request" on the first 2 URLs and it successfully scraps the 3rd URL and sends a webhook to discord. No matter what order I put the URLs it only seems to do the last URL, whether I do it with proxies or not. (I added proxies as I want to do a lot more URLs than just the 3.)
Here is my current code:
import requests
import lxml.html
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
from dhooks import Webhook, Embed
import random
ua = UserAgent()
header = {'User-Agent':ua.chrome}
# Proxies
proxy_list = []
for line in open('proxies.txt', 'r'):
line = line.replace('\n', '')
proxy_list.append(line)
def get_proxy():
proxy = random.choice(proxy_list)
proxies = {
"http": f'{str(proxy)}',
"https": f'{str(proxy)}'
}
return proxies
# Opening URL file
with open('urls.txt','r') as file:
for url in file.readlines():
proxies = get_proxy()
result = requests.get(url,headers=header,timeout=3,proxies=proxies)
soup = BeautifulSoup(result.content, 'lxml')
Thanks for helping.

How to get all request from API with rate limiting using python

I am trying to get all request of API with rate limit of 100 request per minute. This API request is in json format. I used import urllib, import requests and import json and from time import sleep. My code below returns the same data
import urllib
import requests
import json
import urllib.request as urllib2
from time import sleep
data = []
#resp
for i in range(4):
url = "https://api.leadfeed.com/accounts/*?start_date=2018-01-01&end_date=2020-06-01"
header = {"Authorization": 'Bearer GnHbmjtUEQDXCFOlwKAyy9hJedZtDg'}
resp = requests.get(url, headers = header)
#print(i)
data.append(resp.text)
sleep(60)
print(data)
It returns 4 the same json data as shown below:
[
"{\"data\":[{\"id\":\"b50b0cf4-06f1-11ea_p6q3nDbB2C4m9KjK9gndy\",\"type\.....}"
"{\"data\":[{\"id\":\"b50b0cf4-06f1-11ea_p6q3nDbB2C4m9KjK9gndy\",\"type\.....}"
"{\"data\":[{\"id\":\"b50b0cf4-06f1-11ea_p6q3nDbB2C4m9KjK9gndy\",\"type\.....}"
"{\"data\":[{\"id\":\"b50b0cf4-06f1-11ea_p6q3nDbB2C4m9KjK9gndy\",\"type\.....}"
]
I don't know the length of the json that why i used range(4) to return 4 json data.
How can i get data from API with rate limit request?

Download a compressed zip file from URL

I have a problem statement where I have to login to a website and then download a zip file. I have written the below code so far to login to website(able to print authentication successful message) and create a session. How can I download the zip file now ?
import requests
import urllib
import urllib.request
import zipfile
import io
import shutil
post_login_url = 'https://www.ims-dm.com/mvc/page/customer-sign-in/cgi/cookie.php'
request_url = 'http://www.ims-dm.com/cgi/securedownload.php?p=WPNFTPD#prodtype=wpn/WPN-FULL-20180306.TXT.zip'
payload = {
'sendusername':'xxxxxxxxxx',
'password':'xxxxxx'
}
with requests.Session() as session:
post = session.post(post_login_url,data=payload)
if post.status_code == 200:
print("Authentication sucessful !!")
url = session.get(request_url)

How to get cookies from urllib.request?

How to get cookie from an urllib.request?
import urllib.request
import urllib.parse
data = urllib.parse.urlencode({
'user': 'user',
'pass': 'pass'
})
data = data.encode('utf-8')
request = urllib.request.urlopen('http://example.com', data)
print(request.info())
request.info() returns cookies but not in very usable way.
response.info() is a dict type object. so you can parse any info you need. Here is a demo written in python3:
from urllib import request
from urllib.error import HTTPError
# declare url, header_params
req = request.Request(url, data=None, headers=header_params, method='GET')
try:
response = request.urlopen(req)
cookie = response.info().get_all('Set-Cookie')
content_type = response.info()['Content-Type']
except HTTPError as err:
print("err status: {0}".format(err))
return
You can now, parse cookie variable as your application requirement.
Just used the following code to get cookie from Python Challenge #17, hope it helps (Python 3.8 being used):
import http.cookiejar
import urllib
cookiejar = http.cookiejar.CookieJar()
cookieproc = urllib.request.HTTPCookieProcessor(cookiejar)
opener = urllib.request.build_opener(cookieproc)
response = opener.open(url)
for cookie in cookiejar:
print(cookie.name, cookie.value)
I think using the requests package is a much better choice these days. Try this sample code that shows google setting cookies when you visit:
import requests
url = "http://www.google.com"
r = requests.get(url,timeout=5)
if r.status_code == 200:
for cookie in r.cookies:
print(cookie) # Use "print cookie" if you use Python 2.
Gives:
Cookie NID=67=n0l3ME1Jl3-wwlH7oE5pvxJ_CfU12hT5Kh65wh21bvE3hrKFAo1sJVj_UcuLCr76Ubi3yxENROaYNEitdgW4IttL43YZGlf8xAPl1IbzoLG31KP5U2tiP2y4DzVOJ2fA for .google.se/
Cookie PREF=ID=ce66d1288fc0d977:FF=0:TM=1407525509:LM=1407525509:S=LxQv7q8fju-iHJPZ for .google.se/

Categories

Resources