Python/Json Code - Where was this variable located - python

This code is from a separate submission.
If you look at the lines :
each[AuthorString]
each[Title]
im wondering where the user got these variables from?
I navigated to the json page
Link
and could not find these variables? maybe im in the wrong page? Screenshots will help
here is the code
import requests
session_ids = ['13619' ,'13736']
for session_id in session_ids:
url = 'https://cdn-solr.asco.org/solr/ml/mlselect'
payload = '?_format=json&wt=json&indent=true&q=SessionId:' + session_id + '&start=0&rows=30&sort=score%20desc,%20SessionId%20asc&fq=RecordType:sessions&facet=true&f.Year.facet.sort=index&facet.field={!key=Year}Year&facet.field={!key=subject_thes}subject_thes&facet.field={!key=MediaTypes}MediaTypes&facet.field={!key=fctSessionType}fctSessionType&facet.pivot={!key=MeetingName}fctMeetingName,fctTrack&spellcheck.maxCollationTries=100'
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36'}
jsonData = requests.get(url+payload, headers=headers).json()
sessionParticipationID = jsonData['response']['docs'][0]['SessionParticipationID']
session_id_list = '%20OR%20'.join(sessionParticipationID)
payload = '?_format=json&wt=json&indent=true&sort=PresentationOrderWithinSession%20asc,%20ISODateString%20asc,%20ISODateStringEnd%20asc&fl=_id,%20score,%20ISODateString,%20ISODateStringEnd,%20ISODateString_1,%20ISODateStringEnd_1,%20Year,%20Title,%20tempAbstractID,%20MediaID,%20VideoID,%20EdBookID,%20edBookTitle,%20PosterID,%20edBookTitle,%20SessionTitle,%20SessionTypeId,%20AuthorString,%20AbstID,%20Role,%20FullName,%20PosterBoard,%20Institution,%20ProgramTitle,%20MeetingName,%20FirstAuthor&q=_id:(' + session_id_list + ')&rows=' + str(len(sessionParticipationID))
jsonData = requests.get(url+payload, headers=headers).json()
title_auth = [] #<-- to make a list of {title:author} dictionary
for each in jsonData['response']['docs']:
title = each['Title'] #this line
author = each['AuthorString'] #and this

Related

#Python Web_Scraping_Linkedin_ User Incomplete result

Please I need some support from you all. It’s a practice python code used for scarping employee information/user URL from Linkedin, this code can currently only print those user name and their current position, however, the showing result is also incomplete (Some of them are just their name without the role in the company) In the end, the user URL could not printenter image description here out.
import random
import argparse
import requests
import re
parser = argparse.ArgumentParser(description='Searches Google For Linkedin Profiles')
parser.add_argument('--keyword', type=str, help='keywords to search')
parser.add_argument('--limit', type=int, help='how many profiles to scrape')
args = parser.parse_args()
class LinkedinScraper(object):
def __init__(self, keyword, limit):
#:param keyword: a str of keyword(s) to search for
#:param limit: number of profiles to scrape
self.keyword = keyword.replace(' ', '%20')
self.all_htmls = ""
self.server = 'www.google.com'
self.quantity = '100'
self.limit = int(limit)
self.counter = 0
def search(self):
#perform the search
#:return: a list of htmls from Google Searches
# choose a random user agent
user_agents = [
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1464.0 Safari/537.36',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0) chromeframe/10.0.648.205',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1500.55 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.19 (KHTML, like Gecko) Ubuntu/11.10 Chromium/18.0.1025.142 Chrome/18.0.1025.142 Safari/535.19',
'Mozilla/5.0 (Windows NT 5.1; U; de; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 Opera 11.00'
]
while self.counter < self.limit:
headers = {'User-Agent': random.choice(user_agents)}
url = 'http://google.com/search?num=100&start=' + str(self.counter) + '&hl=en&meta=&q=site%3Alinkedin.com/in%20' + self.keyword
resp = requests.get(url, headers=headers)
if ("Our systems have detected unusual traffic from your computer network.") in resp.text:
print("Running into captchas")
return
self.all_htmls += resp.text
self.counter += 10
def parse_links(self):
reg_links = re.compile("url=https://www.linkedin.com(.*?)&")
self.temp = reg_links.findall(self.all_htmls)
results = []
for regex in self.temp:
final_url = regex.replace("url= ", "")
results.append("https://www.linkedin.com" + final_url)
return results
def parse_people(self):
# :param html: parse the html for Linkedin Profiles using regex
# :return: a list of
reg_people = re.compile(r'>[a-zA-Z0-9._ -]* -|\| LinkedIn')
self.temp = reg_people.findall(self.all_htmls)
print(self.temp)
results = []
for iteration in (self.temp):
delete = iteration.replace(' | LinkedIn', '')
delete = delete.replace(' - LinkedIn', '')
delete = delete.replace(' profiles ', '')
delete = delete.replace('LinkedIn', '')
delete = delete.replace('|', '')
delete = delete.replace('"', '')
delete = delete.replace('>', '')
delete = delete.strip("-")
if delete != " ":
results.append(delete)
return results
if __name__ == "__main__":
ls = LinkedinScraper(keyword="Tesla",limit=100)
ls.search()
links = ls.parse_links()
print(links)
profiles = ls.parse_people()
print(*profiles,sep="\n")

Beautiful soup web scraping returning None-Python

I have a list of movies that I want to scrap the genres from Google.
I've built this code:
import requests
from bs4 import BeautifulSoup
list=['Se7en','Cinema Paradiso','The Shining','Toy Story 3','Capernaum']
gen2 = {}
for i in list:
user_query = i +'movie genre'
URL = 'https://www.google.co.in/search?q=' + user_query
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.5005.63 Safari/537.36'}
page = requests.get(URL, headers=headers)
soup = BeautifulSoup(page.content, 'html.parser')
c = soup.find(class_='EDblX DAVP1')
print(c)
if c != None:
genres = c.findAll('a')
gen2[i]= genres
But it returns an empty dict, so I checked one by one and it worked, for example:
import requests
from bs4 import BeautifulSoup
user_query = 'Se7en movie genre'
URL = "https://www.google.co.in/search?q=" + user_query
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.5005.63 Safari/537.36'}
page = requests.get(URL, headers=headers)
soup = BeautifulSoup(page.content, 'html.parser')
v = soup.find(class_='KKHQ8c')
h = {}
genres = v.findAll('a')
for genre in genres:
h['Se7en']=genre
So I find out that in the for loop the variable c is returning None.
I can't figure out why! It only return None inside the loop.
Currently, your URLs are of the form
URLs
so the returned results(google) aren't accurate for all the movies.
You can change it to
`for i in list:
i="+".join(i.split(" "));
user_query = i + "+movie+genre"
URL = 'https://www.google.com/search?q=+'+user_query`
also, movies that belong to a single genre like Cinema Paradiso are in a div with class name "Z0LcW".

How would I use multithreading an api request like such

I created a username checker against the Ubisoft api. However the requests are fairly slow so I wanted to speed it up, and one thing I thought of was multithreading. I know about pools and such but I've got no clue how to use it in an api request like here.
def check():
global checkedCount
global availableCount
headers = {
'Method':'GET',
'Authority':'public-ubiservices.ubi.com',
'referer':'https://lb-prod-acc_ount-pdc.ubisoft.com',
'Ubi-AppId':'c5393f10-7ac7-4b4f-90fa-21f8f3451a04',
'Authorization': authToken,
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36',
'Ubi-RequestedPlatformType':'uplay'}
for name in usernameList:
r = requests.get("https://public-ubiservices.ubi.com/v3/profiles?nameOnPlatform=" + name + "&platformType=uplay", headers=headers)
while r.status_code != 200: #retry on rate limit
r = requests.get("https://public-ubiservices.ubi.com/v3/profiles?nameOnPlatform=" + name + "&platformType=uplay", headers=headers)
if not r.json()['profiles']:
availableCount += 1
checkedCount += 1
print(f"{Fore.CYAN}[$]{Fore.RESET} {name} is available")
else:
checkedCount += 1
print(f"{Fore.CYAN}[$]{Fore.RESET} {name} is unavailable")
Don't say it's a duplicate question. Because I'm not trying to use multiple url's like other questions.

Save .csv file with names in first column and links in second column (IndentationError: expected an indented block)

When trying to run the script the following error appears:
line 16
for tag in jogos:
^
IndentationError: expected an indented block
My expected result is:
COLUMN 1 COLUMN 2
Team A v Team B LINK HREF
Team C v Team D LINK HREF
Team E v Team F LINK HREF
Team G v Team H LINK HREF
Another problem is that specifying data delivery with namelist and linkslist is only delivering the first value and not all possible values.
In this answer (https://stackoverflow.com/a/68446386/11462274), via print, like this:
print(tag.find("a", href=True).get_text().strip())
Or this:
print(tag.find("a", href=True)["href"])
The result delivers the complete list of values, but when I define a specific name like namelist and linkslist, it stops returning the complete list and delivers only one value.
Full Script:
import requests
from bs4 import BeautifulSoup
url = "http://sports.williamhill.com/bet/pt/betlive/9"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
}
site = requests.get(url, headers=headers)
soup = BeautifulSoup(site.content, "html.parser")
jogos = soup.find_all("tr", class_="rowLive")
with open ('Lista_de_Jogos.csv', 'a', newline='', encoding='UTF8') as f:
for tag in jogos:
namelist = tag.find("a", href=True).get_text().strip()
linkslist = tag.find("a", href=True)["href"]
row = namelist + ';' + linkslist + '\n'
f.write(row)
The error message is obvious. Since your using a context manager - with(...) you should write the code within that indentation block.
import requests
from bs4 import BeautifulSoup
url = "http://sports.williamhill.com/bet/pt/betlive/9"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
}
site = requests.get(url, headers=headers)
soup = BeautifulSoup(site.content, "html.parser")
jogos = soup.find_all("tr", class_="rowLive")
with open("Lista_de_Jogos.csv", "a", newline="", encoding="UTF8") as f:
for tag in jogos:
namelist = tag.find("a", href=True).get_text().strip()
linkslist = tag.find("a", href=True)["href"]
row = namelist + ";" + linkslist + "\n"
f.write(row)
You have to indent the code after the 'with open' statement.
import requests
from bs4 import BeautifulSoup
url = "http://sports.williamhill.com/bet/pt/betlive/9"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
}
site = requests.get(url, headers=headers)
soup = BeautifulSoup(site.content, "html.parser")
jogos = soup.find_all("tr", class_="rowLive")
print (jogos)
with open ('Lista_de_Jogos.csv', 'a', newline='', encoding='UTF8') as f:
for tag in jogos:
namelist = tag.find("a", href=True).get_text().strip()
linkslist = tag.find("a", href=True)["href"]
row = namelist + ';' + linkslist + '\n'
f.write(row)

How to get all URLs within a page fom oddsportal?

I have a code that scrapes all URLs from oddsportal.com main page.
I want the subsequent links to all pages within the parent URL
e.g.
https://www.oddsportal.com/soccer/africa/africa-cup-of-nations/results/
has further pages i.e. https://www.oddsportal.com/soccer/africa/africa-cup-of-nations/results/, https://www.oddsportal.com/soccer/africa/africa-cup-of-nations-2019/results/, etc.
How can I get that?
My existing code:
import requests
import bs4 as bs
import pandas as pd
url = 'https://www.oddsportal.com/results/#soccer'
headers = {
'User-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36'}
resp = requests.get(url, headers=headers)
soup = bs.BeautifulSoup(resp.text, 'html.parser')
base_url = 'https://www.oddsportal.com'
a = soup.findAll('a', attrs={'foo': 'f'})
# This set will have all the URLs of the main page
s = set()
for i in a:
s.add(base_url + i['href'])
s = list(s)
# This will filter for all soccer URLs
s = [x for x in s if '/soccer/' in x]
s = pd.DataFrame(s)
print(s)
I am very new to webscraping and hence this question.
You can find main_div tag based on class attribute and use find_all method to get a tag by looping over it you can extract href of it
from bs4 import BeautifulSoup
import requests
headers = {
'User-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36'}
source = requests.get("https://www.oddsportal.com/soccer/africa/africa-cup-of-nations/results/",headers=headers)
soup = BeautifulSoup(source.text, 'html.parser')
main_div=soup.find("div",class_="main-menu2 main-menu-gray")
a_tag=main_div.find_all("a")
for i in a_tag:
print(i['href'])
Output:
/soccer/africa/africa-cup-of-nations/results/
/soccer/africa/africa-cup-of-nations-2019/results/
/soccer/africa/africa-cup-of-nations-2017/results/
/soccer/africa/africa-cup-of-nations-2015/results/
/soccer/africa/africa-cup-of-nations-2013/results/
/soccer/africa/africa-cup-of-nations-2012/results/
/soccer/africa/africa-cup-of-nations-2010/results/
/soccer/africa/africa-cup-of-nations-2008/results/

Categories

Resources