I have a script for fetching/checking the specific file extension hosted on the web server (which is .txt). I am wondering why i am not getting a result:
#!/usr/bin/python3
import requests
import urllib3
requests.packages.urllib3.disable_warnings()
from urllib3 import disable_warnings
from urllib3.exceptions import InsecureRequestWarning
disable_warnings(InsecureRequestWarning)
PYTHONWARNINGS="ignore:Unverified HTTPS request"
url = "https://test.site/"`
pref = "testdir"
extension=[".asp", ".aspx", ".bat", ".sql", ".txt", ".xml"]
for i in extension:
new_url = url + pref + i
res = requests.get(new_url, verify=False)
if res.status_code == 200:
print("[+] %s existing!" % new_url)
Am I missing an import, package or undeclared line?
Related
Code is working but it seems requests still not redirected to the proxy server?!!
What is wrong in this code?!!
import urllib.request as req
proxy = req.ProxyHandler({'http': r'UserName:pass#URL:Port'})
auth = req.HTTPBasicAuthHandler()
opener = req.build_opener(proxy, auth, req.HTTPHandler)
req.install_opener(opener)
conn = req.urlopen('https://google.com')
return_str = conn.read()
return_str = str(return_str)
print (return_str)
Doc: https://developer.atlassian.com/cloud/jira/platform/rest/v2/api-group-issues/#api-rest-api-2-issue-issueidorkey-get-example
API: https://your-domain.atlassian.net/rest/api/2/issue/{issueIdOrKey}/comment
python script:
url = "https://your-domain.atlassian.net/rest/api/2/issue/{issueIdOrKey}"
auth = HTTPBasicAuth("email#example.com", "<api_token>")
headers = {
"Accept": "application/json"
}
response = requests.request(
"GET",
url,
headers=headers,
auth=auth
)
print(json.dumps(json.loads(response.text), sort_keys=True, indent=4, separators=(",", ": ")))
Error
Update1
File "test.py", line 1, in <module>
import requests
ModuleNotFoundError: No module named 'requests'
Update2
Install requests then get another issue, please check the pic.
And I try to install json via python -m pip install json but cannot do this.
you are missing an import statement.
at the top of the script try adding:
import requests
from requests.auth import HTTPBasicAuth
import json
This demonstrates how to log into a website. It is very fragile but
the goal is to demonstrate how to deal with forms, submit them, and
use a session to maintain cookies across page reads
this program assumes that you've done the research and you know what
fields you need to fill in.
import sys
import requests
import lxml
import getpass
from bs4 import BeautifulSoup
sys.path.append("../lib")
from agentsGalore import agentsGalore
from formHelper import formHelper
start a session
session = requests.Session()
ag = agentsGalore()
def openURL(url,cookie=None):
global session
global ag
headers = ag.makeHeader("MacFirefox58","default","default","langUS")
try:
if cookie:
r = session.get(url, cookies=cookie, headers=headers)
else:
r = session.get(url, headers=headers)
except requests.exceptions.RequestException as e:
print(e)
exit(1)
return r
this function does a POST to the URL with the params in a hash
def postURL(url,params):
global ag
global session
headers = ag.makeHeader("MacFirefox58","default","default","langUS")
try:
r = session.post(url, data=params)
except requests.exceptions.RequestException as e:
print(e)
exit(1)
return r
first, open the login page
url = 'https://my.wlc.edu/ICS/'
resp = openURL(url)
fh = formHelper(resp.text)
get a populated param structure. You can call fh.analyzeInputs() to
see what all the input fields are.
params = fh.populateFormInputs(fh.getFormById('MAINFORM'))
userid = getpass.getpass("enter your userid:",sys.stderr)
password = getpass.getpass("enter your password:",sys.stderr)
params['userName'] = userid
params['password'] = password
formurl = 'https://my.wlc.edu/ICS/'
res = postURL(formurl,params)
You can try to run:
apt-get install Python-bs4
or
pip install beautifulsoup4
or
easy_install beautifulsoup4
in the command line to install the package beautifulsoup4.
I have a problem statement where I have to login to a website and then download a zip file. I have written the below code so far to login to website(able to print authentication successful message) and create a session. How can I download the zip file now ?
import requests
import urllib
import urllib.request
import zipfile
import io
import shutil
post_login_url = 'https://www.ims-dm.com/mvc/page/customer-sign-in/cgi/cookie.php'
request_url = 'http://www.ims-dm.com/cgi/securedownload.php?p=WPNFTPD#prodtype=wpn/WPN-FULL-20180306.TXT.zip'
payload = {
'sendusername':'xxxxxxxxxx',
'password':'xxxxxx'
}
with requests.Session() as session:
post = session.post(post_login_url,data=payload)
if post.status_code == 200:
print("Authentication sucessful !!")
url = session.get(request_url)
Im having abit of issue downloading an xlsx file over https.
Here my code to scrape the sight to get the download url, but its seems to redirect me to a new site. but when i put the link in my browswer, it downloads the file straight away.
Is there something im doing wrong?
here is the code i used for scraping the site:
import contextlib
import OpenSSL.crypto
import os
import requests
import ssl
import tempfile
import http.client
import shutil
from OpenSSL import crypto
import pem
import html2text
url = "https://signonssl.site.com"
base_url = "basedownloadurl"
p12_cert = "cert_path"
password = "password"
#contextlib.contextmanager
def pfx_to_pem(p12_path, pfx_password):
''' Decrypts the .p12 file to be used with requests. '''
with tempfile.NamedTemporaryFile(suffix='.pem') as t_pem:
f_pem = open(t_pem.name, 'wb')
pfx = open(p12_path, 'rb').read()
p12 = OpenSSL.crypto.load_pkcs12(pfx, pfx_password)
f_pem.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, p12.get_privatekey()))
f_pem.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, p12.get_certificate()))
ca = p12.get_ca_certificates()
f_pem.close()
yield t_pem.name
with pfx_to_pem(p12_cert, password) as cert:
html_response = requests.get(url, cert=cert).content.decode("utf-8")
htmlconv = html2text.html2text(html_response).split("name")[1]
dl_link = htmlconv.split(")")[0].split("(")[1]
dl = requests.get(dl_link, cert=cert, stream=True, allow_redirects=False)
output = open('test.xlsx', 'wb')
output.write(dl.content)
output.close()
Any guidance is much appreciated.
Thanks!
Pon