I am trying to parse the elements of a webpage from transfermarkt.com and convert it to JSON format to use the details of the Players.
transfermarkt.com page - https://www.transfermarkt.com/ederson/marktwertverlauf/spieler/238223
I am getting an error below :
ExpatError: not well-formed (invalid token): line 6, column 519
My code is below -
import xmltojson
import json
import requests
# Sample URL to fetch the html page
url = "https://www.transfermarkt.com/ederson/marktwertverlauf/spieler/238223"
# Headers to mimic the browser
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36
(KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36',
'Connection': 'keep-alive'
}
# Get the page through get() method
html_response = requests.get(url=url, headers = headers)
# Save the page content as sample.html
with open("sample.html", "w", encoding="utf-8") as html_file:
html_file.write(html_response.text)
with open("sample.html", "r", encoding = "utf-8") as html_file:
html = html_file.read()
json_ = xmltojson.parse(html)
with open("data.json", "w") as file:
json.dump(json_, file)
print(json_)
Related
import requests
from pprint import pprint
import pandas as pd
baseurl = "https://www.nseindia.com/"
url = f'https://www.nseindia.com/api/live-analysis-oi-spurts-underlyings'
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, '
'like Gecko) '
'Chrome/80.0.3987.149 Safari/537.36',
'accept-language': 'en,gu;q=0.9,hi;q=0.8', 'accept-encoding': 'gzip, deflate, br'}
session = requests.Session()
request = session.get(baseurl, headers=headers, timeout=30)
cookies = dict(request.cookies)
res = session.get(url, headers=headers, timeout=30, cookies=cookies)
print(res.json())
I tried df = pd.DataFrame(res.json()) but couldn't get data in table format. How to do that Plz. Also how to select few particular columns only in data output instead of all columns.
Try this :
import json
import codecs
df = pd.DataFrame(json.loads(codecs.decode(bytes(res.text, 'utf-8'), 'utf-8-sig'))['data'])
And to select a specific columns, you can use :
mini_df = df[['symbol', 'latestOI', 'prevOI', 'changeInOI', 'avgInOI']]
>>> print(mini_df)
I am trying to get the data from this website, https://en.macromicro.me/charts/947/commodity-ccfi-scfi , for China and Shanghai Continerized Freight Index.
I understand that the data is called from an API, how do I find out how the call is made and how do I extract it using python?
I am new in html in general so I have no idea where to start.
I tried,
import requests
url = "https://en.macromicro.me/charts/data/947/commodity-ccfi-scfi"
resp = requests.get(url)
resp = resp.json()
But the response is <Response [404]>
If I change the url to https://en.macromicro.me/charts/data/947/
the response is {'success': 0, 'data': [], 'msg': 'error #644'}
Try the below
import requests
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36',
'Referer': 'https://en.macromicro.me/charts/947/commodity-ccfi-scfi',
'X-Requested-With': 'XMLHttpRequest',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Authorization': 'Bearer 9633cefba71a598adae0fde0b56878fe',
'Cookie': 'PHPSESSID=n74gv10hc8po7mrj491rk4sgo1; _ga=GA1.2.1231997091.1631627585; _gid=GA1.2.1656837390.1631627585; _gat=1; _hjid=c52244fd-b912-4d53-b0e3-3f11f430b51c; _hjFirstSeen=1; _hjAbsoluteSessionInProgress=0'}
r = requests.get('https://en.macromicro.me/charts/data/947', headers=headers)
print(r.json())
output
{'success': 1, 'data': {' ...}
I am trying to harness a Russian language spellcheck API, Yandex.Speller.
The request seems to work fine in my browser. However, when I use a python script, the response is empty.
I am stumped as to what I am doing wrong.
Here is my code:
import urllib
from urllib.request import urlopen
import json
def main():
api(text_preproc())
def text_preproc():
""" Takes misspelled word/phrase,
“t”, and prepares it for
API request
"""
t = "синхрафазатрон в дубне"
text = t.replace(" ", "+")
return text
def diff_api(text):
my_url = "https://speller.yandex.net/services/spellservice.json/checkText?text="
my_headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'}
my_data = {
"text" : text,
"lang" : "ru",
"format" : "plain"}
my_uedata = urllib.parse.urlencode(my_data)
my_edata = my_uedata.encode('ascii')
req = urllib.request.Request(url=my_url, data=my_edata, headers=my_headers)
response = urlopen(req)
data = json.load(response)
print(data)
The response is always an empty array, no matter how I tinker with my request.
Any insight into what I might be doing wrong?
my_uedata has to be a part of the URL you send the request to.
Also, in:
def main():
api(text_preproc())
You call api() but the function is not defined. I've used diff_api().
Try this:
import json
import urllib
from urllib.request import urlopen
def main():
diff_api(text_preproc("синхрафазатрон в дубне"))
def text_preproc(phrase):
""" Takes misspelled word/phrase,
“t”, and prepares it for
API request
"""
return phrase.replace(" ", "+")
def diff_api(text):
my_url = "https://speller.yandex.net/services/spellservice.json/checkText?text="
my_headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'}
my_data = {
"text": text,
"lang": "ru",
"format": "plain"}
my_uedata = urllib.parse.urlencode(my_data)
req = urllib.request.Request(url=my_url+my_uedata, headers=my_headers)
data = json.load(urlopen(req))
print(data)
main()
Output:
[{'code': 1, 'pos': 5, 'row': 0, 'col': 5, 'len': 14, 'word': 'синхрафазатрон', 's': ['синхрофазотрон', 'синхрофазатрон', 'синхрофазотрона']}]
what is wrong in my code, I try get the same content like in https://koleo.pl/rozklad-pkp/krakow-glowny/radom/19-03-2019_10:00/all/EIP-IC--EIC-EIP-IC-KM-REG but result is diffrent as I want to have.
import requests
from bs4 import BeautifulSoup
s = requests.Session()
s.headers.update({"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36'})
response=s.get('https://koleo.pl/rozklad-pkp/krakow-glowny/radom/19-03-
2019_10:00/all/EIP-IC--EIC-EIP-IC-KM-REG')
soup=BeautifulSoup(response.text,'lxml')
print(soup.prettify())
You can use requests and pass params in to get json for the train info and prices. I haven't parsed out all the info as this is just to show you it is possible. I parse out the train ids to be able to make the subsequent requests from price info which are linked by ids to the train info
import requests
from bs4 import BeautifulSoup as bs
url = 'https://koleo.pl/pl/connections/?'
headers = {
'Accept' : 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding' : 'gzip, deflate, br',
'Accept-Language' : 'en-US,en;q=0.9',
'Connection' : 'keep-alive',
'Cookie' : '_ga=GA1.2.2048035736.1553000429; _gid=GA1.2.600745193.1553000429; _gat=1; _koleo_session=bkN4dWRrZGx0UnkyZ3hjMWpFNGhiS1I3TzhQMGNyWitvZlZ0QVRUVVVtWUFPMUwxL0hJYWJyYnlGTUdHYXNuL1N6QlhHMHlRZFM3eFZFcjRuK3ZubllmMjdSaU5CMWRBSTFOc1JRc2lDUGV0Y2NtTjRzbzZEd0laZWI1bjJoK1UrYnc5NWNzZzNJdXVtUlpnVE15QnRnPT0tLTc1YzV1Q2xoRHF4VFpWWTdWZDJXUnc9PQ%3D%3D--3b5fe9bb7b0ce5960bc5bd6a00bf405df87f8bd4',
'Host' : 'koleo.pl',
'Referer' : 'https://koleo.pl/rozklad-pkp/krakow-glowny/radom/19-03-2019_10:00/all/EIP-IC--EIC-EIP-IC-KM-REG',
'User-Agent' : 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36',
'X-CSRF-Token' : 'heag3Y5/fh0hyOfgdmSGJBmdJR3Perle2vJI0VjB81KClATLsJxFAO4SO9bY6Ag8h6IkpFieW1mtZbD4mga7ZQ==',
'X-Requested-With' : 'XMLHttpRequest'
}
params = {
'v' : 'a0dec240d8d016fbfca9b552898aba9c38fc19d5',
'query[date]' : '19-03-2019 10:00:00',
'query[start_station]' : 'krakow-glowny',
'query[end_station]': 'radom',
'query[brand_ids][]' : '29',
'query[brand_ids][]' : '28',
'query[only_direct]' : 'false',
'query[only_purchasable]': 'false'
}
with requests.Session() as s:
data= s.get(url, params = params, headers = headers).json()
print(data)
priceUrl = 'https://koleo.pl/pl/prices/{}?v=a0dec240d8d016fbfca9b552898aba9c38fc19d5'
for item in data['connections']:
r = s.get(priceUrl.format(item['id'])).json()
print(r)
You have to use selenium in order to get that dynamically generated content. And then you can parse html with BS. For example I've parsed dates:
from bs4 import BeautifulSoup
from selenium import webdriver
driver = webdriver.Firefox()
driver.get('https://koleo.pl/rozklad-pkp/krakow-glowny/radom/19-03-2019_10:00/all/EIP-IC--EIC-EIP-IC-KM-REG')
soup = BeautifulSoup(driver.page_source, 'lxml')
for div in soup.findAll("div", {"class": 'date custom-panel'}):
date = div.findAll("div", {"class": 'row'})[0].string.strip()
print(date)
Output:
wtorek, 19 marca
środa, 20 marca
I have already searched for an answer here and spent a long time on google, but nothing...
I've already tried opening the file with 'w' instead of 'r' or 'a' but I still can't get my code to overwrite the current results I have originally written to the CSV file. I'm basically scraping information from a website and I want to first search for a term, scrape that data, save it to the CSV file AND THEN, search for another term and scrape that data and overwrite the current CSV file with the new data.
#!/usr/bin/python3
#from pyvirtualdisplay import Display
import csv
from bs4 import BeautifulSoup
import urllib.request
def getPageSource(current_page):
hdr = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
req = urllib.request.Request(current_page, headers=hdr)
page = urllib.request.urlopen(req)
soup = BeautifulSoup(page, "html5lib")
return(soup)
def get_length(file_path):
with open("data.csv", 'r', encoding='utf8') as csvfile:
reader = csv.reader(csvfile)
reader_list = list(reader)
return len(reader_list)
def write_data(file_path, company_name, role, full_url, date):
fieldnames = ['ID', 'Company','Role', 'URL', 'Date']
next_id = get_length(file_path)
with open(file_path, "w", encoding='utf8') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
#writer.writeheader()
writer.writerow({
"ID": next_id,
"Company": company_name,
"Role": role,
"URL": full_url,
"Date": date
})
csvfile.close()
def find_data(source):
base_url = 'https://www.irishjobs.ie'
for a in source.find_all(attrs={"itemtype" : "https://schema.org/JobPosting"}):
job_info = a.find('h2').find('a')
company_name = a.find('h3').find('a').get_text()
url = job_info['href']
full_url = (base_url + url)
role = (job_info.get_text())
date = a.find('li',class_='updated-time').get_text().replace('Updated','').strip()
write_data("data.csv", company_name, role, full_url, date)
if __name__ == '__main__':
query = input('Enter role to search: ')
source = getPageSource('https://www.irishjobs.ie/ShowResults.aspx?Keywords='+query+'&Location=102&Category=3&Recruiter=All&SortBy=MostRecent&PerPage=100')
find_data(source)
You need to keep the file open until you have finished writing it. Also, it is easier to keep a count of the rows written (using enumerate()) than to keep trying to read the file back in:
import csv
from bs4 import BeautifulSoup
import urllib.request
def getPageSource(current_page):
hdr = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
req = urllib.request.Request(current_page, headers=hdr)
page = urllib.request.urlopen(req)
return (BeautifulSoup(page, "html5lib"))
def find_data(source):
base_url = 'https://www.irishjobs.ie'
fieldnames = ['ID', 'Company','Role', 'URL', 'Date']
with open('data.csv', 'w', encoding='utf8', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(fieldnames)
for id, a in enumerate(source.find_all(attrs={"itemtype" : "https://schema.org/JobPosting"}), start=1):
job_info = a.find('h2').find('a')
company_name = a.find('h3').find('a').get_text()
url = job_info['href']
full_url = (base_url + url)
role = (job_info.get_text())
date = a.find('li',class_='updated-time').get_text().replace('Updated','').strip()
writer.writerow([id, company_name, role, full_url, date])
if __name__ == '__main__':
query = input('Enter role to search: ')
source = getPageSource('https://www.irishjobs.ie/ShowResults.aspx?Keywords='+query+'&Location=102&Category=3&Recruiter=All&SortBy=MostRecent&PerPage=100')
find_data(source)
This would give you data.csv starting:
ID,Company,Role,URL,Date
1,Computer Futures,Xamarin Developer,https://www.irishjobs.ie/Jobs/Xamarin-Developer-8143810.aspx,06/03/2018
2,Wallace Myers International,New Business Development Manager,https://www.irishjobs.ie/Jobs/New-Business-Development-Manager-8143989.aspx,06/03/2018
3,Reperio Human Capital Ltd,Senior Software Developer - Dublin,https://www.irishjobs.ie/Jobs/Senior-Software-Developer-Dublin-8150128.aspx,20/03/2018
In your case, it is probably easier to just use a plain csv.writer() rather than a Dictwriter().