re-iterate over and over rather than once in soup - python

I keep re-iterating over this code. I'm keen to scrape all past results data from this site yet i keep looping over one by one?
for example race_number printed goes 1, 1,2, 1,2,3 etc etc
End goal is to full all list with data and panda it out to look at results and trends.
import requests
import csv
import os
import numpy
import pandas
from bs4 import BeautifulSoup as bs
with requests.Session() as s:
webpage_response = s.get('http://www.harness.org.au/racing/fields/race-fields/?mc=SW010420')
soup = bs(webpage_response.content, "html.parser")
#soup1 = soup.select('.content')
results = soup.find_all('div', {'class':'forPrint'})
race_number = []
race_name = []
race_title = []
race_distance = []
place = []
horse_name = []
Prizemoney = []
Row = []
horse_number = []
Trainer = []
Driver = []
Margin = []
Starting_odds = []
Stewards_comments = []
Scratching = []
Track_Rating = []
Gross_Time = []
Mile_Rate = []
Lead_Time = []
First_Quarter = []
Second_Quarter = []
Third_Quarter = []
Fourth_Quarter = []
for race in results:
race_number1 = race.find(class_='raceNumber').get_text()
race_number.append(race_number1)
race_name1 = race.find(class_='raceTitle').get_text()
race_name.append(race_name1)
race_title1 = race.find(class_='raceInformation').get_text(strip=True)
race_title.append(race_title1)
race_distance1 = race.find(class_='distance').get_text()
race_distance.append(race_distance1)
Need help fixing iteration over and over, and what is the next best move to look at table data rather than headers above?
Cheers

Is this the output you are expecting:
import requests
import csv
import os
import numpy
import pandas as pd
import html
from bs4 import BeautifulSoup as bs
with requests.Session() as s:
webpage_response = s.get('http://www.harness.org.au/racing/fields/race-fields/?mc=SW010420')
soup = bs(webpage_response.content, "html.parser")
#soup1 = soup.select('.content')
data = {}
data["raceNumber"] = [ i['rowspan'] for i in soup.find_all("td", {"class": "raceNumber", "rowspan": True})]
data["raceTitle"] = [ i.get_text(strip=True) for i in soup.find_all("td", {"class": "raceTitle"})]
data["raceInformation"] = [ i.get_text(strip=True) for i in soup.find_all("td", {"class": "raceInformation"})]
data["distance"] = [ i.get_text(strip=True) for i in soup.find_all("td", {"class": "distance"})]
print(data)
data_frame = pd.DataFrame(data)
print(data_frame)
## Output
## raceNumber raceTitle raceInformation distance
##0 3 PREMIX KING PACE $4,500\n\t\t\t\t\t4YO and older.\n\t\t\t\t\tNR... 1785M
##1 3 GATEWAY SECURITY PACE $7,000\n\t\t\t\t\t4YO and older.\n\t\t\t\t\tNR... 2180M
##2 3 PERRY'S FOOTWEAR TROT $7,000\n\t\t\t\t\t\n\t\t\t\t\tNR 46 to 55.\n\t... 2180M
##3 3 DELAHUNTY PLUMBING 3YO TROT $7,000\n\t\t\t\t\t3YO.\n\t\t\t\t\tNR 46 to 52.... 2180M
##4 3 RAYNER'S FRUIT & VEGETABLES 3YO PACE $7,000\n\t\t\t\t\t3YO.\n\t\t\t\t\tNR 48 to 56.... 2180M
##5 3 KAYE MATTHEWS TRIBUTE $9,000\n\t\t\t\t\t4YO and older.\n\t\t\t\t\tNR... 2180M
##6 3 TALQUIST TREES PACE $7,000\n\t\t\t\t\t\n\t\t\t\t\tNR 62 to 73.\n\t... 2180M
##7 3 WEEKLY ADVERTISER 3WM PACE $7,000\n\t\t\t\t\t\n\t\t\t\t\tNR 56 to 61.\n\t... 1785M

Related

Scrapping pdf links in dataframe from webpage using BeautifulSoup

I want to extract all the pdf links which takes us to the page directly from where we can download all the pdfs . I want to store these pdfs in a data frame
url = "https://www.volvogroup.com/en/news-and-media/press-releases.html"
source = requests.get(url)
soup = BeautifulSoup(source.text , "html.parser")
news_check = soup.find_all("a" , class_ = "articlelist__contentDownloadItem")
for i in news_check :
print(i)
break
data = set()
for i in soup.find_all('a'):
for j in i.find_all('href'):
pdf_link = "https://www.volvogroup.com" + j.get('.pdf')
data.add(j)
print(pdf_link)
You can try below code to get pdf link:
import requests
from bs4 import BeautifulSoup as bs
import pandas as pd
url = "https://www.volvogroup.com/en/news-and-media/press-releases.html"
source = requests.get(url)
soup = bs(source.text , "html.parser")
news_check = soup.find_all("a" , class_ = "articlelist__contentDownloadItem")
data = set()
for i in news_check:
pdf_link ="https://www.volvogroup.com" + i['href']
data.add(pdf_link)
# for j in i.find_all('href'):
# pdf_link = + j.get('.pdf')
# data.add(j)
# print(pdf_link)
df = pd.DataFrame(data)
print(df)
Output :
0 https://www.volvogroup.com/content/dam/volvo-g...
1 https://www.volvogroup.com/content/dam/volvo-g...
2 https://www.volvogroup.com/content/dam/volvo-g...
3 https://www.volvogroup.com/content/dam/volvo-g...
4 https://www.volvogroup.com/content/dam/volvo-g...
5 https://www.volvogroup.com/content/dam/volvo-g...
6 https://www.volvogroup.com/content/dam/volvo-g...
7 https://www.volvogroup.com/content/dam/volvo-g...
8 https://www.volvogroup.com/content/dam/volvo-g...
9 https://www.volvogroup.com/content/dam/volvo-g...
10 https://www.volvogroup.com/content/dam/volvo-g...
11 https://www.volvogroup.com/content/dam/volvo-g...
12 https://www.volvogroup.com/content/dam/volvo-g...

How can I clean up the response for this script to make it more readable?

How can I make the output to for this script into neater format like csv? When I save the response to text it is formatted badly. I tried using writer.writerow but I could not get this method to account for variables.
import requests
from bs4 import BeautifulSoup
url = "https://www.rockauto.com/en/catalog/ford,2015,f-150,3.5l+v6+turbocharged,3308773,brake+&+wheel+hub,brake+pad,1684"
response = requests.get(url)
data = response.text
soup = BeautifulSoup(data, 'html.parser')
meta_tag = soup.find('meta', attrs={'name': 'keywords'})
category = meta_tag['content']
linecodes = []
partnos = []
descriptions = []
infos = []
for tbody in soup.select('tbody[id^="listingcontainer"]'):
tmp = tbody.find('span', class_='listing-final-manufacturer')
linecodes.append(tmp.text if tmp else '-')
tmp = tbody.find('span', class_='listing-final-partnumber as-link-if-js buyers-guide-color')
partnos.append(tmp.text if tmp else '-')
tmp = tbody.find('span', class_='span-link-underline-remover')
descriptions.append(tmp.text if tmp else '-')
tmp = tbody.find('div', class_='listing-text-row')
infos.append(tmp.text if tmp else '-')
for row in zip(linecodes,partnos,infos,descriptions):
result = category + ' | {:<20} | {:<20} | {:<80} | {:<80}'.format(*row)
with open('complete.txt', 'a+') as f:
f.write(result + '/n')
print(result)
You could put it into a pandas dataframe
Remove the last for-loop from the original code.
# imports
import requests
from bs4 import BeautifulSoup
import pandas as pd
# set pandas display options to display more rows and columns
pd.set_option('display.max_columns', 700)
pd.set_option('display.max_rows', 400)
pd.set_option('display.min_rows', 10)
# your code
url = "https://www.rockauto.com/en/catalog/ford,2015,f-150,3.5l+v6+turbocharged,3308773,brake+&+wheel+hub,brake+pad,1684"
response = requests.get(url)
data = response.text
soup = BeautifulSoup(data, 'html.parser')
meta_tag = soup.find('meta', attrs={'name': 'keywords'})
category = meta_tag['content']
linecodes = []
partnos = []
descriptions = []
infos = []
for tbody in soup.select('tbody[id^="listingcontainer"]'):
tmp = tbody.find('span', class_='listing-final-manufacturer')
linecodes.append(tmp.text if tmp else '-')
tmp = tbody.find('span', class_='listing-final-partnumber as-link-if-js buyers-guide-color')
partnos.append(tmp.text if tmp else '-')
tmp = tbody.find('span', class_='span-link-underline-remover')
descriptions.append(tmp.text if tmp else '-')
tmp = tbody.find('div', class_='listing-text-row')
infos.append(tmp.text if tmp else '-')
added code for dataframe
# create dataframe
df = pd.DataFrame(zip(linecodes,partnos,infos,descriptions), columns=['codes', 'parts', 'info', 'desc'])
# add the category column
df['category'] = category
# break the category column into multiple columns if desired
# skip the last 2 columns, because they are empty
df[['cat_desc', 'brand', 'model', 'engine', 'cat_part']] = df.category.str.split(',', expand=True).iloc[:, :-2]
# drop the unneeded category column
df.drop(columns='category', inplace=True)
# save to csv
df.to_csv('complete.txt', index=False)
# display(df)
codes parts info desc cat_desc brand model engine cat_part
0 CENTRIC 30016020 Rear; w/ Manual parking brake Semi-Metallic; w/Shims and Hardware 2015 FORD F-150 Brake Pad FORD F-150 3.5L V6 Turbocharged Brake Pad
1 CENTRIC 30116020 Rear; w/ Manual parking brake Ceramic; w/Shims and Hardware 2015 FORD F-150 Brake Pad FORD F-150 3.5L V6 Turbocharged Brake Pad
2 DYNAMIC FRICTION 1551160200 Rear; Manual Parking Brake 5000 Advanced; Ceramic 2015 FORD F-150 Brake Pad FORD F-150 3.5L V6 Turbocharged Brake Pad

Is there any way to get the cookies and cache of a visited website from chrome to beautifulsoup in Python?

I want to scrape a certain website weather data but the default page layout gives max of 40 results but when layout changed to simple list gives 100 results and the layout is set to default which is difficult to achieve with selenium. Is there any way to get the cookies saved in chrome to be used with beautiful soup.
import requests
from bs4 import BeautifulSoup
import browser_cookie3
cj = browser_cookie3.load()
s = requests.Session()
url = "https:/something.org/titles/2"
i=1
print(cj)
for c in cj:
if 'mangadex' in str(c):
s.cookies.set_cookie(c)
r = s.get(url)
soup = BeautifulSoup(r.content, 'lxml')
for anime in soup.find_all('div', {'class': 'manga-entry col-lg-6 border-bottom pl-0 my-1'}):
det = anime.find('a', {"class": "ml-1 manga_title text-truncate"})
anime_name = det.text
anime_link = det['href']
stars = anime.select("span")[3].text
print(anime_name, anime_link, stars,i)
i=i+1
Try:
import browser_cookie3
import requests
cj = browser_cookie3.load()
s = requests.Session()
for c in cj:
if 'sitename' in str(c):
s.cookies.set_cookie(c)
r = s.get(the_site)
This code use the browsers cookies in the requests module in as Session. Simply change sitename to the site you want cookies from.
Your new code:
import requests
from bs4 import BeautifulSoup
import browser_cookie3
cj = browser_cookie3.load()
s = requests.Session()
url = "https://something.org/titles/2"
i = 1
print(cj)
for c in cj:
if 'mangadex' in str(c):
s.cookies.set_cookie(c)
r = s.get(url)
soup = BeautifulSoup(r.content, 'lxml')
for anime in soup.find_all('div', {'class': 'manga-entry row m-0 border-bottom'}):
det = anime.find('a', {"class": "ml-1 manga_title text-truncate"})
anime_name = det.text
anime_link = det['href']
stars = anime.select("span")[3].text
print(anime_name, anime_link, stars, i)
i = i + 1
prints:
-Hitogatana- /title/540/hitogatana 4 1
-PIQUANT- /title/44134/piquant 5 2
-Rain- /title/37103/rain 4 3
-SINS- /title/1098/sins 4
:radical /title/46819/radical 1 5
:REverSAL /title/3877/reversal 3 6
... /title/52206/ 7
...Curtain. ~Sensei to Kiyoraka ni Dousei~ /title/7829/curtain-sensei-to-kiyoraka-ni-dousei 8
...Junai no Seinen /title/28947/junai-no-seinen 9
...no Onna /title/10162/no-onna 2 10
...Seishunchuu! /title/19186/seishunchuu 11
...Virgin Love /title/28945/virgin-love 12
.flow - Untitled (Doujinshi) /title/27292/flow-untitled-doujinshi 2 13
.gohan /title/50410/gohan 14
.hack//4koma + Gag Senshuken /title/7750/hack-4koma-gag-senshuken 24 15
.hack//Alcor - Hagun no Jokyoku /title/24375/hack-alcor-hagun-no-jokyoku 16
.hack//G.U.+ /title/7757/hack-g-u 1 17
.hack//GnU /title/7758/hack-gnu 18
.hack//Link - Tasogare no Kishidan /title/24374/hack-link-tasogare-no-kishidan 1 19
.hack//Tasogare no Udewa Densetsu /title/5817/hack-tasogare-no-udewa-densetsu 20
.hack//XXXX /title/7759/hack-xxxx 21
.traeH /title/9789/traeh 22
(G) Edition /title/886/g-edition 1 23
(Not) a Househusband /title/22832/not-a-househusband 6 24
(R)estauraNTR /title/37551/r-estaurantr 14 25
[ rain ] 1st Story /title/25587/rain-1st-story 3 26
[another] Xak /title/24881/another-xak 27
[es] ~Eternal Sisters~ /title/4879/es-eternal-sisters 1 28
and so on to 100...

For loop for web scraping in python

I have a small project working on web-scraping Google search with a list of keywords. I have built a nested For loop for scraping the search results. The problem is that a for loop for searching keywords in the list does not work as I intended to, which is scraping the data from each searching result. The results get only the result of the last keyword, except for the first two search results.
Here is the code:
browser = webdriver.Chrome(r"C:\...\chromedriver.exe")
df = pd.DataFrame(columns = ['ceo', 'value'])
baseUrl = 'https://www.google.com/search?q='
html = browser.page_source
soup = BeautifulSoup(html)
ceo_list = ["Bill Gates", "Elon Musk", "Warren Buffet"]
values =[]
for ceo in ceo_list:
browser.get(baseUrl + ceo)
r = soup.select('div.g.rhsvw.kno-kp.mnr-c.g-blk')
df = pd.DataFrame()
for i in r:
value = i.select_one('div.Z1hOCe').text
ceo = i.select_one('.kno-ecr-pt.PZPZlf.gsmt.i8lZMc').text
values = [ceo, value]
s = pd.Series(values)
df = df.append(s,ignore_index=True)
print(df)
The output:
0 1
0 Warren Buffet Born: October 28, 1955 (age 64 years), Seattle...
The output that I am expecting is as this:
0 1
0 Bill Gates Born:..........
1 Elon Musk Born:...........
2 Warren Buffett Born: August 30, 1930 (age 89 years), Omaha, N...
Any suggestions or comments are welcome here.
Declare df = pd.DataFrame() outside the for loop
Since currently, you have defined it inside the loop, for each keyword in your list it will initialize a new data frame and the older will be replaced. That's why you are just getting the result for the last keyword.
Try this:
browser = webdriver.Chrome(r"C:\...\chromedriver.exe")
df = pd.DataFrame(columns = ['ceo', 'value'])
baseUrl = 'https://www.google.com/search?q='
html = browser.page_source
soup = BeautifulSoup(html)
ceo_list = ["Bill Gates", "Elon Musk", "Warren Buffet"]
df = pd.DataFrame()
for ceo in ceo_list:
browser.get(baseUrl + ceo)
r = soup.select('div.g.rhsvw.kno-kp.mnr-c.g-blk')
for i in r:
value = i.select_one('div.Z1hOCe').text
ceo = i.select_one('.kno-ecr-pt.PZPZlf.gsmt.i8lZMc').text
s = pd.Series([ceo, value])
df = df.append(s,ignore_index=True)
print(df)

Real Estate Market Scraping using Python and BeautifulSoup

I need some concept how to parse a real estate market using Python. I've searched some information about parsing the websites, I even did this in VBA, but I would like to do it in python.
This is the site which will be parsed (it's one offer only now, but it will be working on full range of real estate offers, multiple sites from kontrakt.szczecin.pl):
http://www.kontrakt.szczecin.pl/mieszkanie-sprzedaz-100m2-335000pln-grudziadzka-pomorzany-szczecin-zachodniopomorskie,351149
First of all, program will use 3 pieces of information:
1/ The table where is information (Main parameters):
Numer oferty 351149, Liczba pokoi 3, Cena 335 000 PLN, Cena za m2 3 350 PLN (Number of offer, Room no, Price, Price by square meter etc). However the quantity of information depends on property offer: sometimes is 14, sometimes is 12, sometimes 16 etc.
2/ Description of property in paragraphs (it is another part of program, for now it can be skipped): Sometimes in the table (1/) there is information that there is garage or balcony. But in paragraph there is a sentence that garage is for additional price (which means for me that property doesn't have garage) or balcony is in French type (which is no balcony for me).
I managed that program should find the correct word in paragraph (such as garage) and copy text from paragraph with additional text on the left and right side (for instance: 20 letters in both sides, but what if the word is in the first place?)
3/ Additional Parameters -
Not every offer has it but like this one (http://www.kontrakt.szczecin.pl/mieszkanie-sprzedaz-6664m2-339600pln-potulicka-nowe-miasto-szczecin-zachodniopomorskie,351165) there is information about number of balconies in property. Sometimes there is information about basement too. It should be similar code to the 1/ issue.
So I tried something like this, using some internet sources (it is still incomplete):
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
my_url = "http://www.kontrakt.szczecin.pl/mieszkanie-sprzedaz-6664m2-339600pln-potulicka-nowe-miasto-szczecin-zachodniopomorskie,351165"
#PL: otwiera połączenie z wybraną stroną, pobieranie zawartości strony (urllib)
#EN: Opens a connection and grabs url
uClient = uReq(my_url)
page_html = uClient.read()
uClient.close()
#html parsing (BeautifulSoup)
page_soup = soup(page_html, "html.parser") #html.parser -> zapisujemy do html, nie np. do xml
#PL: zbiera tabelkę z numerami ofert, kuchnią i innymi danymi o nieruchomości z tabelki
#EN: grabs the data about real estate like kitchen, offer no, etc.
containers = page_soup.findAll("section",{"class":"clearfix"},{"id":"quick-summary"})
# print(len(containers)) - len(containers) sprawdza ile takich obiektów istnieje na stronie
#PL: Co prawda na stronie jest tylko jedna taka tabelka, ale dla dobra nauki zrobię tak jak gdyby tabelek było wiele.
#EN: There is only one table, but for the sake of knowledge I do the container variable
container = containers[0]
find_dt = container.findAll("dt")
find_dd = container.findAll("dd")
print(find_dt[0].text + " " + find_dd[0])
It works, but still is incomplete. I don't continue it right now because there is major flaw. As you see the last print it takes indexes, but not every property will have the same order (because as I mentioned sometimes there is 10 pieces of info, sometimes more, sometimes less). It will be a huge mess in CSV.
My VBA program worked in this way:
Copy table to Excel (Sheet 1)
In the sheet 2 there was parameters that program was looking for (such as Prices)
Mechanism in shortcut: Copy parameter from sheet 2 (Price), go to sheet 1 (where is parsed information), find Price string (paste the information from sheet 2: "Price"), go line below, copy price value, go to sheet 2, find Price, go below, paste the price value. And so on.
Looking for help with concept and coding also.
EDIT:
PART 1 and PART 2 is ready. But I have big issues with PART 3. Here is the code:
from urllib import request as uReq
import requests
#dzięki temu program jest zamykany odrazu, i nie kontynuuje wykonywania reszty kodu. Po imporcie wystarczy exit(0)
from sys import exit
from urllib.request import urlopen as uReq2
from bs4 import BeautifulSoup as soup
import csv
import re
import itertools
filename = 'test.txt'
#licznik, potrzebny do obliczenia ilości numerów ofert w pliku .txt
num_lines = 0
# tworzymy listę danych i listę URLi. Wyniki będą dodawane do list, dlatego potrzeba jest ich utworzenia (jako puste)
list_of_lines = ['351238', '351237', '111111', '351353']
list_of_lines2 = []
list_of_URLs = []
list_of_redictered_URLs = []
KONTRAKT = 'http://www.kontrakt.szczecin.pl'
with open(filename, 'r') as file:
for line in file:
#dodajemy linię (ofertę) do listy
list_of_lines.append(line.strip())
#num_lines jest licznikiem, wskazuje ile wierszy zawiera lista, zmienna jest istotna w zakresię tworzenia pętli z adresami URL
num_lines += 1
#tworzymy URLe z Numerów Ofert zawartych w filename
for i in range(num_lines):
nr_oferty = list_of_lines[i]
my_url = "http://www.kontrakt.szczecin.pl/lista-ofert/?f_listingId=" + nr_oferty + "&f=&submit=Szukaj"
list_of_URLs.append(my_url)
print(list_of_URLs)
#Cześć druga: konwertowanie listy linków na listę linków przekierowanych
#Program wchodzi na stronę, która powinna być przekierowana, jednak ze względu na użyscie Java Scriptu,
#zadanie zostało utrudnione. Dlatego, też celem programu jest symulowanie przeglądarki, pobranie
#zawartości strony, a następnie 'wyłuskanie' odpowiedniego linku do przekierowania
i = 0
for i in range(num_lines):
url_redirect = list_of_URLs[i]
my_url = url_redirect
BROWSER = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
response = requests.get(my_url, headers=BROWSER)
script1 = '<script>'
script2 = '</script>'
content_URL = str(response.content)
find_script1 = (content_URL.find(script1))
find_script2 = (content_URL.find(script2))
url_ready = content_URL[find_script1:find_script2]
print(i+1,'z', num_lines, '-', 'oferta nr:', str(my_url[57:57+6]))
list_of_redictered_URLs.append(url_ready)
#usuwanie zbędnych tagów i znaków, w celu uzyskania czystego przekierowanego linku
list_of_redictered_URLs = [w.replace('<script>window.location=\\\'','') for w in list_of_redictered_URLs]
list_of_redictered_URLs = [w.replace('\\\';','') for w in list_of_redictered_URLs]
#print(list_of_redictered_URLs)
#usuwanie pustych wierszy z listy (oferty, które są nieakutalne na liste "wchodzą jako puste" !!! item: jest to zmienna, można zamienić np. na janusz.
filtered_list = list(filter(lambda item: item.strip(), list_of_redictered_URLs))
filtered_list = [KONTRAKT + item for item in filtered_list]
#zmiana na tuple, ze względu iż mutowalność (dodawanie kolejnych linków) nie będzie potrzebne
filtered_list = tuple(filtered_list)
#print(str(filtered_list))
print('Lista linków:\n',filtered_list)
# Kolejną częścią programu jest pobieranie istotnych informacji (parametrów podstawowych)
# ze strony kontrakt.szczecin.pl, a następnie ich zapisanie w pliku csv.
# Nagłówki w csv oraz nazwy parametrów na stronie (muszą być identyczne jak na stronie, aby mogły
# zostać odpowiednio przyporządkowane w .csv)
HEADERS = ['Numer oferty',
'Liczba pokoi',
'Cena',
'Cena za m2',
'Powierzchnia',
'Piętro',
'Liczba pięter',
'Typ kuchni',
'Balkon',
'Czynsz administracyjny',
'Rodzaj ogrzewania',
'Umeblowanie',
'Wyposażona kuchnia',
'Gorąca woda',
'Rodzaj budynku',
'Materiał',
'Rok budowy',
'Stan nieruchomości',
'Rynek',
'Dach:',
'Liczba balkonów:',
'Liczba tarasów:',
'Piwnica:',
'Ogród:',
'Ochrona:',
'Garaż:',
'Winda:',
'Kształt działki:',
'Szerokość działki (mb.):',
'Długość działki (mb.):',
'Droga dojazdowa:',
'Gaz:',
'Prąd:',
'Siła:','piwnica',
'komórk',
'strych',
'gospodarcze',
'postojow',
'parking',
'przynależn',
'garaż',
'ogród',
'ogrod',
'działka',
'ocieplony',
'moderniz',
'restaur',
'odnow',
'ociepl',
'remon',
'elew',
'dozór',
'dozor',
'monitoring',
'monit',
'ochron',
'alarm',
'strzeż',
'portier',
'wspólnot',
'spółdziel',
'kuchni',
'aneks',
'widna',
'ciemna',
'prześwit',
'oficyn',
'linia',
'zabudow',
'opłat',
'bezczynsz',
'poziom',
'wind',
'francuski',
'ul.',
'w cenie',
'dodatkową']
LINKI = ["Link"]
#HEADERS2 = ['Liczba balkonów:',
# 'Liczba tarasów:',
# 'Piwnica:',
# 'Ogród:',
# 'Ochrona:',
# 'Garaż:',
# 'Winda:']
HEADERS3 = ['piwnica',
'komórk',
'strych',
'gospodarcze',
'postojow',
'parking',
'przynależn',
'garaż',
'ogród',
'ogrod',
'działka',
'ocieplony',
'moderniz',
'restaur',
'odnow',
'ociepl',
'remon',
'elew',
'dozór',
'dozor',
'monitoring',
'monit',
'ochron',
'alarm',
'strzeż',
'portier',
'wspólnot',
'spółdziel',
'kuchni',
'aneks',
'widna',
'ciemna',
'prześwit',
'oficyn',
'linia',
'zabudow',
'opłat',
'bezczynsz',
'poziom',
'wind',
'francuski',
'ul.',
'w cenie',
'dodatkową',]
csv_name = 'data.csv'
print('Dane zostaną zapisane do pliku:',csv_name + '.csv')
print('\n>>>>Program rozpoczyna pobieranie danych')
#Pobieranie linków
i = 0
#Tworzy plik csv o nazwie csv
#writerow może mieć tylko jeden argument, dlatego jest nim suma poszczególnych list. Lista
#linki ma jędną pozycję, ponieważ można sumować dane jednego typu. Nie można sumować listy ze stringami.
with open(csv_name + '.csv', 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quotechar='"')
HEADERS_ALL = HEADERS+HEADERS3+LINKI
csvwriter.writerow(HEADERS_ALL)
for i in range(len(filtered_list)):
my_url = filtered_list[i]
with uReq2(my_url) as uClient:
page_soup = soup(uClient.read(), 'lxml')
print('\t\t-----------',i+1,'-----------\n',my_url)
#<dt> - nazwa parametru np. Kuchnia
#<dd> - wartość parametru np. widna
row = ['-'] * len(HEADERS) + ['-'] * len(HEADERS3) + ['-'] * len(LINKI)
# Parametry podstawowe (kontrakt.szczecin.pl)
for dt, dd in zip(page_soup.select('section#quick-summary dt'), page_soup.select('section#quick-summary dd')):
if dt.text.strip() not in HEADERS:
print("\n 1(dt,dd):UWAGA!, kolumna [{}] nie istnieje w nagłówkach! (stała: HEADERS)\n".format(dt.text.strip()))
continue
row[HEADERS.index(dt.text.strip())] = dd.text.strip()
# Parametry dodatkowe
for span, li in zip(page_soup.select('section#property-features span'), page_soup.select('section#property-features li')):
if span.text.strip() not in HEADERS:
print("\n 2:UWAGA(span,li), kolumna [{}] nie istnieje w nagłówkach (stała HEADERS)!\n".format(span.text.strip()))
continue
row[HEADERS.index(span.text.strip())] = li.text.strip()
#csvwriter.writerow(row)
print(row)
#No to zaczynamy zabawę...................................
# zmienna j odnosi się do indeksu HEADERS3, jest to j nie i, ponieważ i jest w dalszym użyciu
# w pętli powyżej
for p in page_soup.select('section#description'):
p = str(p)
p = p.lower()
for j in range(len(HEADERS3)):
#print('j:',j)
# find_p znajduje wszystkie słowa kluczowe z HEADERS3 w paragrafie na stronie kontraktu.
find_p = re.findall(HEADERS3[j],p)
# listy, które wyświetlają pozycję startową poszczególnych słów muszą zaczynać się od '-' lub 0?,
# ponieważ, gdy dane słowo nie zostanie odnalezione to listy będą puste w pierwszej iteracji pętli
# co w konsekewncji doprowadzi do błędu out of range
m_start = []
m_end = []
lista_j = []
for m in re.finditer(HEADERS3[j], p):
#print((m.start(),m.end()), m.group())
m_start.append(m.start())
m_end.append(m.end())
#print(h)
for k in range(len(m_start)):
#właściwe teraz nie wiem po co to jest..
try:
x = m_start[k]
y = m_end[k]
except IndexError:
x = m_start[0]
y = m_end[0]
#print('xy:',x,y)
#print(find_p)
#print(HEADERS3[j])
z = (HEADERS3[j]+':',p[-60+x:y+60]+' ++-NNN-++')
lista_j.append(z)
print (lista_j)
print(str(lista_j))
row[HEADERS.index(span.text.strip())] = str(lista_j)
csvwriter.writerow(row)
#print(row)
This code snippet will parse the quick summary table of the property url and saves it in csv file:
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
import csv
# my_url = 'http://www.kontrakt.szczecin.pl/mieszkanie-sprzedaz-6664m2-339600pln-potulicka-nowe-miasto-szczecin-zachodniopomorskie,351165'
my_url = 'http://www.kontrakt.szczecin.pl/mieszkanie-sprzedaz-100m2-335000pln-grudziadzka-pomorzany-szczecin-zachodniopomorskie,351149'
with uReq(my_url) as uClient:
page_soup = soup(uClient.read(), 'lxml')
with open('data.csv', 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quotechar='"')
for dt, dd in zip(page_soup.select('section#quick-summary dt'), page_soup.select('section#quick-summary dd')):
csvwriter.writerow([dt.text.strip(), dd.text.strip()])
The result is in data.csv, screenshot from my LibreOffice:
For having the table transposed, you can use this code:
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
import csv
# my_url = 'http://www.kontrakt.szczecin.pl/mieszkanie-sprzedaz-6664m2-339600pln-potulicka-nowe-miasto-szczecin-zachodniopomorskie,351165'
my_url = 'http://www.kontrakt.szczecin.pl/mieszkanie-sprzedaz-100m2-335000pln-grudziadzka-pomorzany-szczecin-zachodniopomorskie,351149'
with uReq(my_url) as uClient:
page_soup = soup(uClient.read(), 'lxml')
headers = ['Numer oferty',
'Liczba pokoi',
'Cena',
'Cena za m2',
'Powierzchnia',
'Piętro',
'Liczba pięter',
'Typ kuchni',
'Balkon',
'Czynsz administracyjny',
'Rodzaj ogrzewania',
'Gorąca woda',
'Rodzaj budynku',
'Materiał',
'Rok budowy',
'Stan nieruchomości',
'Rynek',
'Dach:',
'Liczba balkonów:',
'Piwnica:',
'Kształt działki:',
'Szerokość działki (mb.):',
'Długość działki (mb.):',
'Droga dojazdowa:',
'Gaz:',
'Prąd:',
'Siła:']
with open('data.csv', 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quotechar='"')
csvwriter.writerow(headers)
row = ['-'] * len(headers)
for dt, dd in zip(page_soup.select('section#quick-summary dt'), page_soup.select('section#quick-summary dd')):
if dt.text.strip() not in headers:
print("Warning, column [{}] doesn't exist in headers!".format(dt.text.strip()))
continue
row[headers.index(dt.text.strip())] = dd.text.strip()
csvwriter.writerow(row)
The result will be in csv file like this (the values not present will be substituted with '-'):

Categories

Resources