API Shows image only if ModHeader is on - python

I am requesting a user image from an api but the image only shows up if I'm using the chrome extension ModHeader which takes the authorization header and url pattern. In my code I am passing the header info so I'm not sure why it doesn't display.
#app.route('/cardholder/<name>', methods=['GET', 'POST'])
def cardholder(name):
response = requests.get("https://commandcentre-api-us.security.gallagher.cloud/api/cardholders", verify=False, headers=Headers)
r = json.loads(response.text)
data = {}
for x in r['results']:
data[x["firstName"]] = x["id"]
chid = data[''+name+'']
url = ("https://commandcentre-api-us.security.gallagher.cloud/api/cardholders/" + chid)
ch = requests.get(url, verify=False, headers=Headers)
chl = json.loads(ch.text)
try:
chimage = chl["#DL"]["href"]
except:
chimage = "../static/img/noimg.png"
return render_template('cardholder.html', name=name, chid=chid, chl=chl, chimage=chimage)

Related

Python Falcon - Post calls are being ignored

I'm trying to set up a simple reverse proxy with Falcon in Python.
I have:
import falcon
import requests
class ReverseProxyResource:
def on_get(self, req, resp, text=None):
print("GET")
if(text):
destination = "[destination_url]/" + text
else:
destination = "[destination_url]"
result = requests.get(destination)
resp.body = result.text
resp.status = result.status_code
def on_post(self, req, resp, text=None):
print("POST")
if(text):
destination = "[destination_url]/" + text
else:
destination = "[destination_url]"
result = requests.post(destination, data=req.bounded_stream.read())
resp.body = result.text
resp.status = result.status_code
proxy_api = application = falcon.API()
proxy_api.req_options.auto_parse_form_urlencoded = True
proxy_api.add_route('/{text}', ReverseProxyResource())
proxy_api.add_route('/', ReverseProxyResource())
Get requests to the proxy are returned correctly.
However, Post requests are only returned a 404 error from the api. The "POST" print statement is not shown, indicating on_post isn't called at all. (The post requests only included Header Content-Type: application/json and a simple JSON body, which work correctly when called directly against the destination url)
EDIT: Interestingly enough, if I change GET call in postman to POST (ie: no body, headers, or anything else added) on_post() is called when I hit the endpoint. So it seems like an issue where post requests that contain a body are being automtically 404'ed without calling on_post()
Try adding user agent and content type before making the post call
headers = {"Content-Type": "text/plain", "User-Agent": "PostmanRuntime/7.30.0"}
result = requests.post(url = destination, data=req.bounded_stream.read(), headers=headers)
below code works for me
import falcon
import requests
class ReverseProxyResource:
def on_get(self, req, resp, text=None):
print("GET")
if(text):
destination = "https://cat-fact.herokuapp.com/" + text
else:
destination = "https://cat-fact.herokuapp.com/facts/"
result = requests.get(destination)
resp.body = result.text
resp.status = result.status_code
def on_post(self, req, resp, text=None):
print("POST")
if(text):
destination = "https://dummy.restapiexample.com/api/v1/"+ text
else:
destination = "https://dummy.restapiexample.com/api/v1/create"
headers = {"Content-Type": "text/plain", "User-Agent": "PostmanRuntime/7.30.0"}
result = requests.post(url = destination, data=req.bounded_stream.read(), headers=headers)
resp.text = result.text
resp.status = result.status_code
proxy_api = application = falcon.App()
proxy_api.req_options.auto_parse_form_urlencoded = True
proxy_api.add_route('/{text}', ReverseProxyResource())
proxy_api.add_route('/', ReverseProxyResource())

Extract specifit pages and make new pdf from list of pdfs

I have been trying to get spacfic pages extract from each pdf and then merge all the extracted pdf in once.
I have list of pdfs
I am using pdfrw this library but getting error while extracting the pages
from pdfrw import PdfReader, PdfWriter
import os
files = [f for f in os.listdir(
'.') if os.path.isfile(f) and f.endswith('.pdf')]
print(files)
for pdf in files:
pages = PdfReader(pdf).pages
parts = [(6, 7)]
for part in parts:
title = pdf.title().split('.')[0]
outdata = PdfWriter(f'{title}_{part[0]}_.pdf')
for pagenum in range(*part):
outdata.addpage(pages[pagenum-1])
outdata.write()
Please help if possible
raise PdfParseError('Invalid PDF header: %s' %
pdfrw.errors.PdfParseError: Invalid PDF header: '<!doctype html>'
Manas,
One way to achieve your requirement is to use API. For example, consider following code snippet where it splits PDF from uploaded file.
import os
import requests # pip install requests
# The authentication key (API Key).
# Get your own by registering at https://app.pdf.co
API_KEY = "*********************************"
# Base URL for PDF.co Web API requests
BASE_URL = "https://api.pdf.co/v1"
# Source PDF file
SourceFile = ".\\sample.pdf"
# Comma-separated list of page numbers (or ranges) to process. Example: '1,3-5,7-'.
Pages = "1-2,3-"
def main(args = None):
uploadedFileUrl = uploadFile(SourceFile)
if (uploadedFileUrl != None):
splitPDF(uploadedFileUrl)
def splitPDF(uploadedFileUrl):
"""Split PDF using PDF.co Web API"""
# Prepare requests params as JSON
# See documentation: https://apidocs.pdf.co
parameters = {}
parameters["pages"] = Pages
parameters["url"] = uploadedFileUrl
# Prepare URL for 'Split PDF' API request
url = "{}/pdf/split".format(BASE_URL)
# Execute request and get response as JSON
response = requests.post(url, data=parameters, headers={ "x-api-key": API_KEY })
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# Download generated PNG files
part = 1
for resultFileUrl in json["urls"]:
# Download Result File
r = requests.get(resultFileUrl, stream=True)
localFileUrl = f"Page{part}.pdf"
if r.status_code == 200:
with open(localFileUrl, 'wb') as file:
for chunk in r:
file.write(chunk)
print(f"Result file saved as \"{localFileUrl}\" file.")
else:
print(f"Request error: {response.status_code} {response.reason}")
part = part + 1
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
def uploadFile(fileName):
"""Uploads file to the cloud"""
# 1. RETRIEVE PRESIGNED URL TO UPLOAD FILE.
# Prepare URL for 'Get Presigned URL' API request
url = "{}/file/upload/get-presigned-url?contenttype=application/octet-stream&name={}".format(
BASE_URL, os.path.basename(fileName))
# Execute request and get response as JSON
response = requests.get(url, headers={ "x-api-key": API_KEY })
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# URL to use for file upload
uploadUrl = json["presignedUrl"]
# URL for future reference
uploadedFileUrl = json["url"]
# 2. UPLOAD FILE TO CLOUD.
with open(fileName, 'rb') as file:
requests.put(uploadUrl, data=file, headers={ "x-api-key": API_KEY, "content-type": "application/octet-stream" })
return uploadedFileUrl
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
return None
if __name__ == '__main__':
main()
Now, to merge PDF file you can use similar to following code snippet.
import os
import requests # pip install requests
# The authentication key (API Key).
# Get your own by registering at https://app.pdf.co
API_KEY = "**********************************"
# Base URL for PDF.co Web API requests
BASE_URL = "https://api.pdf.co/v1"
# Source PDF files
SourceFile_1 = ".\\sample1.pdf"
SourceFile_2 = ".\\sample2.pdf"
# Destination PDF file name
DestinationFile = ".\\result.pdf"
def main(args = None):
UploadedFileUrl_1 = uploadFile(SourceFile_1)
UploadedFileUrl_2 = uploadFile(SourceFile_2)
if (UploadedFileUrl_1 != None and UploadedFileUrl_2!= None):
uploadedFileUrls = "{},{}".format(UploadedFileUrl_1, UploadedFileUrl_2)
mergeFiles(uploadedFileUrls, DestinationFile)
def mergeFiles(uploadedFileUrls, destinationFile):
"""Perform Merge using PDF.co Web API"""
# Prepare requests params as JSON
# See documentation: https://apidocs.pdf.co
parameters = {}
parameters["name"] = os.path.basename(destinationFile)
parameters["url"] = uploadedFileUrls
# Prepare URL for 'Merge PDF' API request
url = "{}/pdf/merge".format(BASE_URL)
# Execute request and get response as JSON
response = requests.post(url, data=parameters, headers={ "x-api-key": API_KEY })
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# Get URL of result file
resultFileUrl = json["url"]
# Download result file
r = requests.get(resultFileUrl, stream=True)
if (r.status_code == 200):
with open(destinationFile, 'wb') as file:
for chunk in r:
file.write(chunk)
print(f"Result file saved as \"{destinationFile}\" file.")
else:
print(f"Request error: {response.status_code} {response.reason}")
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
def uploadFile(fileName):
"""Uploads file to the cloud"""
# 1. RETRIEVE PRESIGNED URL TO UPLOAD FILE.
# Prepare URL for 'Get Presigned URL' API request
url = "{}/file/upload/get-presigned-url?contenttype=application/octet-stream&name={}".format(
BASE_URL, os.path.basename(fileName))
# Execute request and get response as JSON
response = requests.get(url, headers={ "x-api-key": API_KEY })
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# URL to use for file upload
uploadUrl = json["presignedUrl"]
# URL for future reference
uploadedFileUrl = json["url"]
# 2. UPLOAD FILE TO CLOUD.
with open(fileName, 'rb') as file:
requests.put(uploadUrl, data=file, headers={ "x-api-key": API_KEY, "content-type": "application/octet-stream" })
return uploadedFileUrl
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
return None
if __name__ == '__main__':
main()
In this sample I am using pdf.co API. Refer to following links for more information.
https://apidocs.pdf.co/30-pdf-split, https://apidocs.pdf.co/31-pdf-merge
Thanks!

Url unshortner python and urilib Request

Im traying to unshort a few urls that have been shortened. For such thing I'm using https://pypi.org/project/fake-headers/ to create the headers
but i cant get it to work I get 400 and 406 status codes.
url = 'http:// bit.ly /377n4o6' #i had to add spaces end url is https://es.banggood.com/Xiaomi-Mi-A1-MiA1-Dual-Rear-Camera-5_5-inch-4GB-RAM-64GB-Snapdragon-625-Octa-core-4G-Smartphone-p-1196064.html
parsed = urllib.parse.urlparse(url)
if parsed.scheme == '':
url = 'http://' + url
parsed = urllib.parse.urlparse(url)
h = http.client.HTTPConnection(parsed.netloc, timeout=5)
path = quote(parsed.path, safe='')
header = Headers(
browser="chrome", # Generate only Chrome UA
os="win", # Generate ony Windows platform
headers=True # generate misc headers
)
#request is asking to have a parans, so I got one from another code.
params = urlencode({'#number': 12524, '#type': 'issue', '#action': 'show'})
h.request('HEAD', path,params, header.generate())
response = h.getresponse()
print(response.status)
what could be wrong with this that fails?
thanks

Python 301 POST

So basically I'm trying to make a request to this website - https://panel.talonro.com/login/ which is supposed to be 301 redirect.
I send data as I should but in the end there is no Location header in my request and status code is 200 instead of 301.
I can't figure out what I am doing wrong. Please help
def do_request():
req = requests.get('https://panel.talonro.com/login/').text
soup = BeautifulSoup(req, 'html.parser')
csrf = soup.find('input', {'name':'csrfKey'}).get('value')
ref = soup.find('input', {'name':'ref'}).get('value')
post_data = {
'auth':'mylogin',
'password':'mypassword',
'login__standard_submitted':'1',
'csrfKey':csrf,
'ref':ref,
'submit':'Go'
}
post = requests.post(url = 'https://forum.talonro.com/login/', data = post_data, headers = {'referer':'https://panel.talonro.com/login/'})
Right now push_data is in do_request(), so you cannot access it outside of that function.
Instead, try this where you return that info and then pass it in:
import requests
from bs4 import BeautifulSoup
def do_request():
req = requests.get('https://panel.talonro.com/login/').text
soup = BeautifulSoup(req, 'html.parser')
csrf = soup.find('input', {'name':'csrfKey'}).get('value')
ref = soup.find('input', {'name':'ref'}).get('value')
post_data = {
'auth':'mylogin',
'password':'mypassword',
'login__standard_submitted':'1',
'csrfKey':csrf,
'ref':ref,
'submit':'Go'
}
return post_data
post = requests.post(url = 'https://forum.talonro.com/login/', data = do_request(), headers = {'referer':'https://panel.talonro.com/login/'})

Add torrent with web api

I'm trying to add a torrent with uTorrent web api (http://www.utorrent.com/community/developers/webapi), in Python using Requests library.
import requests
import re
UTORRENT_URL = 'http://%s:%s/gui/' % ('localhost', '55655')
UTORRENT_URL_TOKEN = '%stoken.html' % UTORRENT_URL
REGEX_UTORRENT_TOKEN = r'<div[^>]*id=[\"\']token[\"\'][^>]*>([^<]*)</div>'
auth = requests.auth.HTTPBasicAuth('x', 'x')
headers = {'content-type': 'application/json'}
r = requests.get(UTORRENT_URL_TOKEN, auth=auth, headers=headers)
token = re.search(REGEX_UTORRENT_TOKEN, r.text).group(1)
guid = r.cookies['GUID']
cookies = dict(GUID = guid)
headers = {'content-type': 'multipart/form-data'}
params = {'action':'add-file','token': token}
files = {'torrent_file':'C:\\x.torrent'}
r = requests.post(UTORRENT_URL, auth=auth, cookies=cookies, headers=headers, params=params, files=files)
print r.json()
Error - torrent file content not supplied in form parameter
Any help appreciated
Ok, the problem was setting the headers. Removing them and it finally works!
import requests
import re
UTORRENT_URL = 'http://%s:%s/gui/' % ('192.168.1.80', '55655')
UTORRENT_URL_TOKEN = '%stoken.html' % UTORRENT_URL
REGEX_UTORRENT_TOKEN = r'<div[^>]*id=[\"\']token[\"\'][^>]*>([^<]*)</div>'
auth = requests.auth.HTTPBasicAuth('x', 'x')
r = requests.get(UTORRENT_URL_TOKEN, auth=auth)
token = re.search(REGEX_UTORRENT_TOKEN, r.text).group(1)
guid = r.cookies['GUID']
cookies = dict(GUID = guid)
params = {'action':'add-file','token': token}
files = {'torrent_file': open('C:\\x.torrent', 'rb')}
r = requests.post(url=UTORRENT_URL, auth=auth, cookies=cookies, params=params, files=files)
You aren't sending the file data. You should read the relevant section of the documentation. The correct code would be this (I marked the line I changed):
import requests
import re
UTORRENT_URL = 'http://%s:%s/gui/' % ('localhost', '55655')
UTORRENT_URL_TOKEN = '%stoken.html' % UTORRENT_URL
REGEX_UTORRENT_TOKEN = r'<div[^>]*id=[\"\']token[\"\'][^>]*>([^<]*)</div>'
auth = requests.auth.HTTPBasicAuth('x', 'x')
headers = {'content-type': 'application/json'}
r = requests.get(UTORRENT_URL_TOKEN, auth=auth, headers=headers)
token = re.search(REGEX_UTORRENT_TOKEN, r.text).group(1)
guid = r.cookies['GUID']
cookies = dict(GUID = guid)
headers = {'content-type': 'multipart/form-data'}
params = {'action':'add-file','token': token}
files = {'torrent_file': open('C:\\x.torrent', 'rb')} # Changed this line
r = requests.post(UTORRENT_URL, auth=auth, cookies=cookies, headers=headers, params=params, files=files)
print r.json()

Categories

Resources