I have following code
import unittest
import requests
import time
import json
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
token = ''
headers = {'Content-Type': 'application/json',
'Token': token}
class ChromeSearch(unittest.TestCase):
#classmethod
def setUpClass(cls):
cls.driver = webdriver.Chrome()
driver = cls.driver
base_url = 'http://127.0.0.1:8080/'
driver.get(base_url)
print("Generating Token")
usernameStr = 'user1a'
passwordStr = 'user'
response = requests.get('http://127.0.0.1:8080/api/auth/token', auth=(usernameStr, passwordStr))
print(response)
data = response.json()
print(data)
if response.status_code == 200 and data["status"] == "SUCCESS":
token = data["token"]
else:
token = None
print("The request was not successful.")
print(token)
def test_update_user_infp(self):
print("Planning to update user's info")
datap = {'firstname': 'Newfirstname',
'lastname': 'Newname',
'phone': '111000111'}
r = requests.put('http://127.0.0.1:8080/api/users/user1a', data=json.dumps(datap), headers=headers)
print(r.text)
def test_get_users(self):
print("Planning to get users")
r = requests.get('http://127.0.0.1:8080/api/users', headers=headers)
print"Get request for api Users"
print(r.text)
#classmethod
def tearDownClass(cls):
cls.driver.close()
if __name__ == "__main__":
unittest.main()
Log are as below, if you see in logs it says Generating Token Planning to get users but gives a . and .Planning to update user's info authentication required, why ? It just works fine if I dont use class and def.
DevTools listening on ws://127.0.0.1:55429/devtools/browser/cc53d84c-3847-40a3-b3fb-159892c71ac9
Generating Token
<Response [200]>
{u'status': u'SUCCESS', u'token': u'MjU5NzMzMDY1NTU1NzYxMTE4NjQ4NDgxMTc0OTkyMjI4NTg0NTE5'}
MjU5NzMzMDY1NTU1NzYxMTE4NjQ4NDgxMTc0OTkyMjI4NTg0NTE5
Planning to get users
Get request for api Users
{"payload":["user1a"],"status":"SUCCESS"}
.Planning to update user's info
{"message":"Token authentication required","status":"FAILURE"}
.
----------------------------------------------------------------------
Ran 2 tests in 5.938s
OK
Following up from the comments section:
Here's a sample answer on how to declare and initialize token in one method and access in another.
import unittest
class Example(unittest.TestCase):
#classmethod
def setUpClass(cls):
print("Print me only once")
cls.token = 100 # Initilze your token here using auth route
def test1(self):
print(1)
print('Token = ', self.token) # Prints Token = 100
def test2(myself): # Although self is preferred
print(2)
print('Token = ', myself.token) # Prints Token = 100
unittest.main()
Related
first, i'm sorry for this long message but i have an issue that is blocking me from advancing in my project: Let me first explain quickly the workflow, User enters a search query -> making a search in linkedin with this query -> grabbing urls of users (in function of nb of pages) -> search for these users in proxycurl (https://nubela.co/proxycurl/docs#people-api-person-lookup-endpoint) -> grab their infos with a function -> store them in my db -> grabs infos about the experiences of the scraped users -> make a search in proxycurl API again but for the companies this time -> grab infos about companies and store them in db -> search infos about employees in this company (https://nubela.co/proxycurl/docs#company-api-employee-search-api-endpoint) -> grab url of the CTO -> search in the contact API to grab the infos about the CTO (https://nubela.co/proxycurl/docs#contact-api-personal-contact-number-lookup-endpoint and https://nubela.co/proxycurl/docs#contact-api-personal-email-lookup-endpoint) -> store everything in database.
Ok so i manage to grab urls, search for the users in api, but i never manage to get the 'extra' information with my code while i can grab them for the same profiles in Postman, same for personnal_email, personnal_contact_number, github_profile_id.
Then i manage to grab the data about the companies, but still same problem, can't retrieve the 'extra' information, or the 'funding_data' or 'acquisitions' even if i include them in my code.
I really don't know what's wrong with my code (i'm assuming something's wrong because everything works perfectly with postman), and i can take a little help here, thanks for your time ! (Full code below)
from telnetlib import EC
import requests
from datetime import datetime
import json
import re
from cgitb import text
import selenium
from selenium.webdriver.support.ui import WebDriverWait
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.action_chains import ActionChains
from bs4 import BeautifulSoup, NavigableString, Tag
from time import sleep
from time import time
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
import csv
import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
import openpyxl
import requests
cred = credentials.Certificate(r"C:\Users\radia\Downloads\st-londres-2-firebase-adminsdk-7eowq-786e799875.json")
firebase_admin.initialize_app(cred, {
'databaseURL': 'https://st-londres-2-default-rtdb.firebaseio.com/'
})
print('- Importation des packages')
# Task 1: webdriver configuration
driver = webdriver.Chrome(ChromeDriverManager().install())
# Task 1.1: Open Chrome and Access Linkedin
sleep(2)
url = 'https://www.linkedin.com/login'
driver.get(url)
print('Initialisation du chrome driver')
sleep(2)
# Task 1.2: Import username and password
credential = open(r"C:\Users\radia\OneDrive\Bureau\credentials.txt")
line = credential.readlines()
username = line[0]
password = line[1]
print('Importation des id')
sleep(2)
# Task 1.2: Key in login credentials
email_field = driver.find_element(By.ID, 'username')
email_field.send_keys(username)
print('Email ok')
sleep(3)
password_field = driver.find_element(By.NAME, 'session_password')
password_field.send_keys(password)
print('Mdp ok')
sleep(2)
# Task 1.2: Click the Login button
signin_field = driver.find_element(By.XPATH, '//*[#id="organic-div"]/form/div[3]/button')
signin_field.click()
sleep(3)
print('- Task A: Connexion à Linkedin')
search_field = driver.find_element(By.XPATH, '//*[#id="global-nav-typeahead"]/input')
search_query = input('Type of profile to scrape ')
search_field.send_keys(search_query)
search_field.send_keys(Keys.RETURN)
print('TASK B OK')
sleep(10)
try:
driver.find_element(By.XPATH, "//*[#id='search-reusables__filters-bar']/ul/li[2]/button").click()
except selenium.common.exceptions.NoSuchElementException:
print("Element not found")
def GetURL(): #function to grab linkedin urls
page_source = BeautifulSoup(driver.page_source, features='lxml')
a_elements = page_source.find_all('a', {'class': "app-aware-link"})
all_urls = []
for element in a_elements:
url = element.get('href')
all_urls.append(url)
return all_urls
##Pagination
sleep(2)
input_page = int(input('Nombre de pages à scraper: '))
URLs_all_page = []
for page in range(input_page):
URLs_one_page = GetURL()
sleep(2)
driver.execute_script('window.scrollTo(0, document.body.scrollHeight);') #scrolling to the end of the page
sleep(3)
next_button = driver.find_element(By.XPATH, '//button[contains(#class, "artdeco-pagination__button--next") and .//li-icon]')
driver.execute_script("arguments[0].click();", next_button)
sleep(2)
if URLs_one_page is not None:
URLs_all_page = URLs_all_page + URLs_one_page
print(URLs_all_page)
else:
print('variable stores a None value')
sleep(2)
print(URLs_all_page)
sleep(1)
def get_profile_info(url): # function to make api calls for users
api_endpoint = 'https://nubela.co/proxycurl/api/v2/linkedin'
api_key = 'SDrD73S2fXlvCMdFDExEaw'
headers = {'Authorization': 'Bearer ' + api_key}
params = {
'url': url,
'fallback_to_cache': 'on-error',
'use_cache': 'if-present',
'skills': 'include',
'inferred_salary': 'include',
'personal_email': 'include',
'personal_contact_number': 'include',
'twitter_profile_id': 'include',
'facebook_profile_id': 'include',
'github_profile_id': 'include',
'extra': 'include',
}
try:
response = requests.get(api_endpoint, headers=headers, params=params)
if response.status_code != 404:
data_profile = response.json()
return data_profile
else:
return None
except requests.exceptions.RequestException as e:
print (e)
return None
def get_company_info(url): #function to make api calls for companies
api_key = 'SDrD73S2fXlvCMdFDExEaw'
headers = {'Authorization': 'Bearer ' + api_key}
api_endpoint = 'https://nubela.co/proxycurl/api/linkedin/company'
params = {
'resolve_numeric_id': 'true',
'categories': 'include',
'funding_data': 'include',
'extra': 'include',
'exit_data': 'include',
'acquisitions': 'include',
'url': 'include',
'use_cache': 'if-present',
}
try:
response = requests.get(api_endpoint, params={'url':url}, headers=headers)
if response.status_code == 404:
print("Company not found for URL:", url)
return None
else:
data_company = response.json()
print(data_company)
if 'extra' in data_company:
print("Extra information found:", data_company['extra'])
else:
print("No extra information found in JSON response.")
return data_company
except requests.exceptions.RequestException as e:
print (e)
return None
def get_company_employee_url(company_linkedin_profile_url):
api_endpoint = 'https://nubela.co/proxycurl/api/linkedin/company/employee/search/'
api_key = 'SDrD73S2fXlvCMdFDExEaw'
header_dic = {'Authorization': 'Bearer ' + api_key}
params = {
'page_size': '10',
'linkedin_company_profile_url': company_linkedin_profile_url,
'keyword_regex': '[Cc][Tt][Oo]',
'enrich_profiles': 'enrich',
'resolve_numeric_id': 'false',
}
response = requests.get(api_endpoint,
params=params,
headers=header_dic)
print(response.status_code)
print(response.text)
if response.status_code == 404:
print("No employees found for URL:", url)
return None
else:
data_employees = response.json()
if 'employees' in data_employees:
print("Employees found:", data_employees['employee_search_results'])
else:
print("No employees found in JSON response.")
#return and store profile_url in data_employees:
for employee in data_employees['employee_search_results']:
profile_url = employee['profile_url']
print(profile_url)
def get_company_employee_info(profile_url):
api_endpoint = 'https://nubela.co/proxycurl/api/contact-api/personal-contact'
api_key = 'SDrD73S2fXlvCMdFDExEaw'
header_dic = {'Authorization': 'Bearer ' + api_key}
params = {
'linkedin_profile_url': 'https://linkedin.com/in/test-phone-number',
}
response = requests.get(api_endpoint,
params=params,
headers=header_dic)
# Initialize visited URLs + data_list
visited_urls = []
for url in URLs_all_page:
if url in visited_urls:
print("Profile already exists in the database for URL:", url)
continue
data = get_profile_info(url)
if data and "error" in data:
print(data["error"])
if not data or "experiences" not in data:
continue
data["search_query"] = search_query # Add the search_query to the data
db.reference('profiles').push(data) # Store data in the candidates table
visited_urls.append(url)
print("Profile data and search query successfully added to the candidates table for URL:", url)
for item in data['experiences']:
company_name = str(item['company'])
company_name_push = re.sub(r'[^a-zA-Z0-9]', '', company_name) # Error handling when pushing code to db, replacement of illegal values
company_linkedin_profile_url = item['company_linkedin_profile_url']
company_description = item['description']
company_data = get_company_info(company_linkedin_profile_url)
if company_name_push:
filtered_company = db.reference('companies/'+ company_name_push).get()
else:
continue
if filtered_company is None:
db.reference('companies').push({
'company_name': company_name_push,
'company_linkedin_profile_url': company_linkedin_profile_url,
'company_description': company_description,
'company_data': company_data
})
print("Company data successfully added for URL:", company_linkedin_profile_url)
else:
print("Company already exists in the database for URL:", company_linkedin_profile_url)
experiences = {
'candidate_name': data['full_name'],
'title': item['title'],
'company': item['company'],
'location': item['location'],
'start_date': item['starts_at'],
'end_date': item['ends_at'],
'description': item['description'],
}
db.reference('experiences').push(experiences)
company_employee_url = get_company_employee_url(company_linkedin_profile_url)
company_employee_data = get_company_employee_info(company_employee_url)
if company_employee_data:
db.reference('company_employees/' + company_name_push).push(company_employee_data)
print("Company employee data successfully added for company:", company_name)
else:
print("No data found for company employees for company:", company_name)
Just started learning Python. I am trying to gather data by webscraping and tweet out info. But everytime I rerun the code. I get
Forbidden: 403 Forbidden
187 - Status is a duplicate.
How do I loop this script without getting this error?
Here's my code :
def scrape ():
page = requests.get("https://www.reuters.com/business/future-of-money/")
soup = BeautifulSoup(page.content, "html.parser")
home = soup.find(class_="editorial-franchise-layout__main__3cLBl")
posts = home.find_all(class_="text__text__1FZLe text__dark-grey__3Ml43 text__inherit-font__1Y8w3 text__inherit-size__1DZJi link__underline_on_hover__2zGL4")
top_post = posts[0].find("h3", class_="text__text__1FZLe text__dark-grey__3Ml43 text__medium__1kbOh text__heading_3__1kDhc heading__base__2T28j heading__heading_3__3aL54 hero-card__title__33EFM").find_all("span")[0].text.strip()
tweet (top_post)
def tweet (top_post):
api_key = 'deletedforprivacy'
api_key_secret = 'deletedforprivacy'
access_token = 'deletedforprivacy'
access_token_secret = 'deletedforprivacy'
authenticator = tweepy.OAuthHandler(api_key, api_key_secret)
authenticator.set_access_token(access_token, access_token_secret)
api = tweepy.API(authenticator, wait_on_rate_limit=True)
api.update_status(f"{top_post} \nSource : https://www.reuters.com/business/future-of-money/")
print(top_post)
scrape()
The twitter api checks if the content is duplicate and if it is duplicate it returns:
Request returned an error: 403 {"detail":"You are not allowed to create a Tweet with duplicate content.","type":"about:blank","title":"Forbidden","status":403}
I added an simple function to check if the previous content is same as the one about to be added
** Full Code**
from requests_oauthlib import OAuth1Session
import os
import json
import requests
from bs4 import BeautifulSoup
import time
user_id = 000000000000000 # Get userid from https://tweeterid.com/
bearer_token = "<BEARER_TOKEN>"
consumer_key = "<CONSUMER_KEY>"
consumer_secret = "<CONSUMER_SECRET>"
def init():
# Get request token
request_token_url = "https://api.twitter.com/oauth/request_token?oauth_callback=oob&x_auth_access_type=write"
oauth = OAuth1Session(consumer_key, client_secret=consumer_secret)
try:
fetch_response = oauth.fetch_request_token(request_token_url)
except ValueError:
print(
"There may have been an issue with the consumer_key or consumer_secret you entered."
)
resource_owner_key = fetch_response.get("oauth_token")
resource_owner_secret = fetch_response.get("oauth_token_secret")
print("Got OAuth token and secret")
# Get authorization
base_authorization_url = "https://api.twitter.com/oauth/authorize"
authorization_url = oauth.authorization_url(base_authorization_url)
print("Please go here and authorize: %s" % authorization_url)
verifier = input("Paste the PIN here: ")
# Get the access token
access_token_url = "https://api.twitter.com/oauth/access_token"
oauth = OAuth1Session(
consumer_key,
client_secret=consumer_secret,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret,
verifier=verifier,
)
oauth_tokens = oauth.fetch_access_token(access_token_url)
access_token = oauth_tokens["oauth_token"]
access_token_secret = oauth_tokens["oauth_token_secret"]
# Make the request
oauth = OAuth1Session(
consumer_key,
client_secret=consumer_secret,
resource_owner_key=access_token,
resource_owner_secret=access_token_secret,
)
scraper(oauth, bearer_token)
def bearer_oauth(r):
"""
Method required by bearer token authentication.
"""
r.headers["Authorization"] = f"Bearer {bearer_token}"
r.headers["User-Agent"] = "v2UserTweetsPython"
return r
def previous_tweet():
url = "https://api.twitter.com/2/users/{}/tweets".format(user_id)
# Tweet fields are adjustable.
# Options include:
# attachments, author_id, context_annotations,
# conversation_id, created_at, entities, geo, id,
# in_reply_to_user_id, lang, non_public_metrics, organic_metrics,
# possibly_sensitive, promoted_metrics, public_metrics, referenced_tweets,
# source, text, and withheld
params = {"tweet.fields": "text"}
response = requests.request(
"GET", url, auth=bearer_oauth, params=params)
print(response.status_code)
if response.status_code != 200:
raise Exception(
"Request returned an error: {} {}".format(
response.status_code, response.text
)
)
# checking if this is the first post
if response.json() != {'meta': {'result_count': 0}}:
# Since twitter changes html to small url I am splitting at \n to match to new payload
previous_tweet_text = response.json()["data"][0]["text"].split("\n")[0]
previous_payload = {"text": f"{previous_tweet_text}"}
else:
previous_payload = {"text": f""}
return previous_payload
def scraper(oauth, bearer_token):
while True:
page = requests.get(
"https://www.reuters.com/business/future-of-money/")
soup = BeautifulSoup(page.content, "html.parser")
home = soup.find(class_="editorial-franchise-layout__main__3cLBl")
posts = home.find_all(
class_="text__text__1FZLe text__dark-grey__3Ml43 text__inherit-font__1Y8w3 text__inherit-size__1DZJi link__underline_on_hover__2zGL4")
top_post = posts[0].find(
"h3", class_="text__text__1FZLe text__dark-grey__3Ml43 text__medium__1kbOh text__heading_3__1kDhc heading__base__2T28j heading__heading_3__3aL54 hero-card__title__33EFM").find_all("span")[0].text.strip()
# Be sure to add replace the text of the with the text you wish to Tweet. You can also add parameters to post polls, quote Tweets, Tweet with reply settings, and Tweet to Super Followers in addition to other features.
payload = {
"text": f"{top_post}\nSource:https://www.reuters.com/business/future-of-money/"}
current_checker_payload = {"text": payload["text"].split("\n")[0]}
previous_payload = previous_tweet()
if previous_payload != current_checker_payload:
tweet(payload, oauth)
else:
print("Content hasn't changed")
time.sleep(60)
def tweet(payload, oauth):
# Making the request
response = oauth.post(
"https://api.twitter.com/2/tweets",
json=payload,
)
if response.status_code != 201:
raise Exception(
"Request returned an error: {} {}".format(
response.status_code, response.text)
)
print("Response code: {}".format(response.status_code))
# Showing the response as JSON
json_response = response.json()
print(json.dumps(json_response, indent=4, sort_keys=True))
if __name__ == "__main__":
init()
** Output**
Response code: 201
{
"data": {
"id": "1598558336672497664",
"text": "FTX ex-CEO Bankman-Fried claims he was unaware of improper use of customer funds -ABC News\nSource:URL" #couldn't post short url in stackoverflow
}
}
Content hasn't changed
Content hasn't changed
Content hasn't changed
Hope this helps. Happy Coding :)
How do I create a request to rentry.co in order to create pastes?
I've tried to solve this in Python but I get the following response:
403 reason: Forbidden ...
I tried changing the URL and adding my cookie.
My code looks currently as follows.
import requests
text = "Hello World!"
data = {"text":text}
r = requests.post("https://rentry.co/api", data=data)
print(f"status code: {r.status_code}")
print(f"reason: {r.reason}") ```
try this
#!/usr/bin/env python3
import http.cookiejar
import sys
import urllib.parse
import urllib.request
from http.cookies import SimpleCookie
from json import loads as json_loads
_headers = {"Referer": 'https://rentry.co'}
class UrllibClient:
"""Simple HTTP Session Client, keeps cookies."""
def __init__(self):
self.cookie_jar = http.cookiejar.CookieJar()
self.opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(self.cookie_jar))
urllib.request.install_opener(self.opener)
def get(self, url, headers={}):
request = urllib.request.Request(url, headers=headers)
return self._request(request)
def post(self, url, data=None, headers={}):
postdata = urllib.parse.urlencode(data).encode()
request = urllib.request.Request(url, postdata, headers)
return self._request(request)
def _request(self, request):
response = self.opener.open(request)
response.status_code = response.getcode()
response.data = response.read().decode('utf-8')
return response
def new(url, edit_code, text):
client, cookie = UrllibClient(), SimpleCookie()
cookie.load(vars(client.get('https://rentry.co'))['headers']['Set-Cookie'])
csrftoken = cookie['csrftoken'].value
payload = {
'csrfmiddlewaretoken': csrftoken,
'url': url,
'edit_code': edit_code,
'text': text
}
return json_loads(client.post('https://rentry.co/api/new', payload, headers=_headers).data)
def get_rentry_link(text):
url, edit_code = '', ''
response = new(url, edit_code, text)
if response['status'] != '200':
print('error: {}'.format(response['content']))
try:
for i in response['errors'].split('.'):
i and print(i)
sys.exit(1)
except:
sys.exit(1)
else:
pastebin_link = response['url']
print('Url: {}\nEdit code: {}'.format(response['url'], response['edit_code']))
return pastebin_link
if __name__ == '__main__':
link_list = ['https://stackoverflow.com/', 'https://www.youtube.com/', 'https://www.google.com/']
pastebin_link = get_rentry_link('\n'.join(map(str, link_list)))
I have this python function and i wish to execute this using lambda handler function hence I have written this code. When I execute in Pycharm I don't see any output in console. Can someone guide what is the problem with below code?
import json
from json import loads
import requests
from requests import exceptions
from requests.auth import HTTPBasicAuth
def lambda_handler(event, context):
test_post_headers_body_json()
return {"statusCode": 200, "body": json.dumps("Hello from Lambda!")}
def test_post_headers_body_json():
client_id = "WJRYDHNGROIZHL8B"
client_secret = "V5VXK6FLG1YI0GD2XY3H"
user = "automation-store-admin1#abc.com"
password = "c0Ba5PBdvVl2"
access_point = "https://api.platform.abc.com/auth/oauth/token"
grant_type = "password"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
# auth = auth.HTTPBasicAuth(client_id, client_secret)
data = {"grant_type": grant_type, "username": user, "password": password}
resp = None
try:
resp = requests.post(
access_point,
auth=HTTPBasicAuth(client_id, client_secret),
data=data,
headers=headers,
)
except exceptions.ConnectionError:
exit(1)
if resp.status_code == 200:
resp = loads(resp.text)
if "access_token" in resp:
print(resp["access_token"])
exit(0)
exit(1)
It is normal because when running you code, Python only declare the function not using it. You should add a __main__ entry point at the end of your file:
import json
from json import loads
import requests
from requests import exceptions
from requests.auth import HTTPBasicAuth
def lambda_handler(event, context):
test_post_headers_body_json()
return {"statusCode": 200, "body": json.dumps("Hello from Lambda!")}
def test_post_headers_body_json():
client_id = "WJRYDHNGROIZHL8B"
client_secret = "V5VXK6FLG1YI0GD2XY3H"
user = "automation-store-admin1#abc.com"
password = "c0Ba5PBdvVl2"
access_point = "https://api.platform.abc.com/auth/oauth/token"
grant_type = "password"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
# auth = auth.HTTPBasicAuth(client_id, client_secret)
data = {"grant_type": grant_type, "username": user, "password": password}
resp = None
try:
resp = requests.post(
access_point,
auth=HTTPBasicAuth(client_id, client_secret),
data=data,
headers=headers,
)
except exceptions.ConnectionError:
exit(1)
if resp.status_code == 200:
resp = loads(resp.text)
if "access_token" in resp:
print(resp["access_token"])
exit(0)
exit(1)
# added part
if __name__ == '__main__':
test_post_headers_body_json()
I have a Python class that sends payloads to AWS with boto3 and requests library. However, sometimes the http requests fail with various codes, so I wanted to write a wrapper function inside the class that will retry to send the payload 5 times if it gets certain codes, and raise an exception if it completely fails. Here is the class method(assume method calls work as expected):
import requests
from boto3 import Session
def update_status(self, status):
payload = status
auth = self.sign_request()
response = requests.patch(self.url, auth=auth, data=payload)
status_code = response.status_code
response_text = response.text
if not response.ok:
logging.error("Failed updating status of request: " + str(
{'host': self.host, 'region': self.region,
'service': self.service, 'url': self.url, 'status': str(status)}))
raise IOError('Update training status failed with status code: ' + str(status_code) + '\n' + response_text)
logging.info("Updated status")
Sometimes this api call will fail with status 504. I would like to write a wrapper retry method around this class method that will by default retry 5 times with a wait of retry^2 between every try, and exit the loop if it's a success with code 200.
I found this code which seems to be along the lines of what I would use, I'm just not sure how to wrap my current method inside this and call it:
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
def requests_retry_session(
retries=5,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
session=None,
):
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
Issue with the above code is it is using requests.session and returning it while my class is already using boto3.Session. Any help would be appreciated!
I'd try something like this:
import time
import requests
from functools import wraps
import logging
logging.basicConfig(level=logging.DEBUG)
def retry(delay=10, retries=4):
def retry_decorator(f):
#wraps(f)
def f_retry(*args, **kwargs):
opt_dict = {'retries': retries, 'delay': delay}
while opt_dict['retries'] > 1:
try:
return f(*args, **kwargs)
except Exception as e:
msg = "Exception: {}, Retrying in {} seconds...".format(e, delay)
print(msg)
time.sleep(opt_dict['delay'])
opt_dict['retries'] -= 1
return f(*args, **kwargs)
return f_retry
return retry_decorator
class YourClass:
# JUST MOCK FOR PROOF OF CONCEPT
url = 'YOUR URL'
status = 'YOUR STATUS'
def sign_request(self):
return ''
host = 'YOUR HOST'
region = 'YOUR REGION'
service = 'YOUR SERVICE'
# MOCK END
def update_status(self, status):
payload = status
auth = self.sign_request()
#retry(1, 5)
def get_status():
response = requests.patch(self.url, auth=auth, data=payload)
if not response.ok:
logging.error("Failed updating status of request: " + str(
{'host': self.host, 'region': self.region,
'service': self.service, 'url': self.url, 'status': str(status)}))
raise IOError('Update training status failed with status code: ' + str(response.status_code) + '\n' + response.text)
return response
res = get_status()
status_code = res.status_code
response_text = res.text
logging.info("Updated status")
x = YourClass()
x.url = 'https://httpstat.us/200'
x.update_status('')
x.url = 'https://httpstat.us/504'
x.update_status('')
Of course you may want to adjust it to your needs.