What can I do to scrape 10000 pages without appearing captchas? - python

Hi there i've been trying to collect all the information in 10,000 pages of this page for a school project, I thought everything was fine until on page 4 I got a mistake. I check the page manually and I find that the page now asks me for a captcha.
What can I do to avoid it? Maybe set a timer between the searchs?
Here it is my code.
import bs4, requests, csv
g_page = requests.get("http://www.usbizs.com/NY/New_York.html")
m_page = bs4.BeautifulSoup(g_page.text, "lxml")
get_Pnum = m_page.select('div[class="pageNav"]')
MAX_PAGE = int(get_Pnum[0].text[9:16])
print("Recolectando información de la página 1 de {}.".format(MAX_PAGE))
contador = 0
information_list = []
for k in range(1, MAX_PAGE):
c_items = m_page.select('div[itemtype="http://schema.org/Corporation"] a')
c_links = []
i = 0
for link in c_items:
c_links.append(link.get("href"))
i+=1
for j in range(len(c_links)):
temp = []
s_page = requests.get(c_links[j])
i_page = bs4.BeautifulSoup(s_page.text, "lxml")
print("Ingresando a: {}".format(c_links[j]))
info_t = i_page.select('div[class="infolist"]')
info_1 = info_t[0].text
info_2 = info_t[1].text
temp = [info_1,info_2]
information_list.append(temp)
contador+=1
with open ("list_information.cv", "w") as file:
writer=csv.writer(file)
for row in information_list:
writer.writerow(row)
print("Información de {} clientes recolectada y guardada correctamente.".format(j+1))
g_page = requests.get("http://www.usbizs.com/NY/New_York-{}.html".format(k+1))
m_page = bs4.BeautifulSoup(g_page.text, "lxml")
print("Recolectando información de la página {} de {}.".format(k+1,MAX_PAGE))
print("Programa finalizado. Información recolectada de {} clientes.".format(contador))

Related

Update dictionary within dictionary dynamically return same character count for different parameters

I'm trying to retrieve wikipedia pages' characters count, for articles in different languages. I'm using a dictionary with as key the name of the page and as value a dictionary with the language as key and the count as value.
The code is:
pages = ["L'arte della gioia", "Il nome della rosa"]
langs = ["it", "en"]
dicty = {}
dicto ={}
numz = 0
for x in langs:
wikipedia.set_lang(x)
for y in pages:
pagelang = wikipedia.page(y)
splittedpage = pagelang.content
dicto[y] = dicty
for char in splittedpage:
numz +=1
dicty[x] = numz
If I print dicto, I get
{"L'arte della gioia": {'it': 72226, 'en': 111647}, 'Il nome della rosa': {'it': 72226, 'en': 111647}}
The count should be different for the two pages.
Please try this code. I didn't run it because I don't have the wikipedia module.
Notes:
Since your expected result is dict[page,dict[lan,cnt]], I think first iterate pages is more natural, then iterate languages. Maybe for performance reason you want first iterate languages, please comment.
Characters count of text can simply be len(text), why iterate and sum again?
Variable names. You will soon be lost in x y like variables.
pages = ["L'arte della gioia", "Il nome della rosa"]
langs = ["it", "en"]
dicto = {}
for page in pages:
lang_cnt_dict = {}
for lang in langs:
wikipedia.set_lang(lang)
page_lang = wikipedia.page(page)
chars_cnt = len(pagelang.content)
lang_cnt_dict[lan] = chars_cnt
dicto[page] = lang_cnt_dict
print(dicto)
update
If you want iterate langs first
pages = ["L'arte della gioia", "Il nome della rosa"]
langs = ["it", "en"]
dicto = {}
for lang in langs:
wikipedia.set_lang(lang)
for page in pages:
page_lang = wikipedia.page(page)
chars_cnt = len(pagelang.content)
if page in dicto:
dicto[page][lang] = chars_cnt
else:
dicto[page] = {lang: chars_cnt}
print(dicto)

How to set a value for an empy list

I am starting to learn to program using BeautifulSoup. What I want to achieve with this code is to save prices from different pages. To achieve this I store the prices of each page in a list and all those lists in a list. The problem is some pages do not save the prices so there are some lists that are completely empty. What I am looking for is that those empty lists are assigned the elements of the "ListaR" so that later I do not have problems. Here's my code:
from bs4 import BeautifulSoup
import requests
import pandas as pd
from decimal import Decimal
from typing import List
AppID = ['495570', '540190', '607210', '575780', '338840', '585830', '637330', '514360', '575760', '530540', '361890', '543170', '346500', '555930', '575700', '595780', '362400', '562360', '745670', '763360', '689360', '363610', '575770', '467310', '380560']
ListaPrecios = list()
ListaUrl = list() #<------- LISTA
Blanco = [""]
ListaR = ["$0.00 USD", "$0.00 USD"]
for x in AppID: # <--------- Para cada una de las AppID...
#STR#
url = "https://steamcommunity.com/market/search?category_753_Game%5B%5D=tag_app_"+x+"&category_753_cardborder%5B%5D=tag_cardborder_0&category_753_item_class%5B%5D=tag_item_class_2#p1_price_asc" # <------ Usa AppID para entrar a sus links de mercado
ListaUrl += [url] # <---------- AGREGA CADA LINK A UNA LISTA
PageCromos = [requests.get(x) for x in ListaUrl]
SoupCromos = [BeautifulSoup(x.content, "html.parser") for x in PageCromos]
PrecioCromos = [x.find_all("span", {"data-price": True}) for x in SoupCromos] # <--------- GUARDA LISTAS DENTRO DE LISTAS CON CODIGO
min_CromoList = []
for item in PrecioCromos:
CromoList = [float(i.text.strip('USD$')) for i in item]
min_CromoList.append(min(CromoList)) # <---------------- Lista con todos los precios minimos de cromos de cada juego
print(min_CromoList)
Output:
ValueError: min() arg is an empty sequence
You can change this line
min_CromoList.append(min(CromoList))
to:
if not CromoList: # this will evaluate to True if the list is empty
min_CromoList.append(min(ListaR))
else:
min_CromoList.append(min(CromoList))
A neat feature of python is that empty lists evaluate to False and non-empty lists evaluate to True.
Since min(ListaR) will always evaluate to '$0.00 USD' it is probably neater to write this as:
if not CromoList:
min_CromoList.append('$0.00 USD')
else:
min_CromoList.append(min(CromoList))

Generating a table with docx from a dataframe in python

Hellow,
Currently I´m working in a project in which I have to generate some info with docx library in python. I want to know how to generate a docx table from a dataframe in order to have the output with all the columns and rows from de dataframe I've created. Here is my code, but its not working correctly because I can´t reach the final output:
table = doc.add_table(rows = len(detalle_operaciones_total1), cols=5)
table.style = 'Table Grid'
table.rows[0].cells[0].text = 'Nombre'
table.rows[0].cells[1].text = 'Operacion Nro'
table.rows[0].cells[2].text = 'Producto'
table.rows[0].cells[3].text = 'Monto en moneda de origen'
table.rows[0].cells[4].text = 'Monto en moneda local'
for y in range(1, len(detalle_operaciones_total1)):
Nombre = str(detalle_operaciones_total1.iloc[y,0])
Operacion = str(detalle_operaciones_total1.iloc[y,1])
Producto = str(detalle_operaciones_total1.iloc[y,2])
Monto_en_MO = str(detalle_operaciones_total1.iloc[y,3])
Monto_en_ML = str(detalle_operaciones_total1.iloc[y,4])
table.rows[y].cells[0].text = Nombre
table.rows[y].cells[1].text = Operacion
table.rows[y].cells[2].text = Producto
table.rows[y].cells[3].text = Monto_en_MO
table.rows[y].cells[4].text = Monto_en_ML

Scrapy using loops in Python

I want an activity to scrapy a web page. The part of data web is route_data.
route_data = ["javascript:mostrarFotografiaHemiciclo( '/wc/htdocs/web/img/diputados/peq/215_14.jpg', '/wc/htdocs/web', 'Batet Lamaña, Meritxell (Presidenta del Congreso de los Diputados)', 'Diputada por Barcelona', 'G.P. Socialista' ,'','');",
"javascript:mostrarFotografiaHemiciclo( '/wc/htdocs/web/img/diputados/peq/168_14.jpg', '/wc/htdocs/web', 'Rodríguez Gómez de Celis, Alfonso (Vicepresidente Primero)', 'Diputado por Sevilla', 'G.P. Socialista' ,'','');",]
I create a dictionary with empty values.
dictionary_data = {"Nombre":None, "Territorio":None, "Partido":None, "url":None}
I have to save in dictionary_data each one line:
url = /wc/htdocs/web/img/diputados/peq/215_14.jpg
Nombre = Batet Lamaña, Meritxell
Territorio = Diputada por Barcelona
Partido = G.P. Socialista
For thus, and I loop over route_data.
for i in route_data:
text = i.split(",")
nombre = text[2:4]
territorio = text[4]
partido = text[5]
But the output is:
[" 'Batet Lamaña", " Meritxell (Presidenta del Congreso de los Diputados)'"] 'Diputada por Barcelona' 'G.P. Socialista'
[" 'Rodríguez Gómez de Celis", " Alfonso (Vicepresidente Primero)'"] 'Diputado por Sevilla' 'G.P. Socialista'
How can it get put correct in dictionary?
A simple solution would be:
all_routes = []
for i in route_data:
text = re.findall("'.+?'", i)
all_routes.append(
{"Nombre": re.sub('\(.*?\)', '', text[2]).strip(),
"Territorio": text[3],
"Partido": text[-2],
"Url": text[0]})

Real Estate Market Scraping using Python and BeautifulSoup

I need some concept how to parse a real estate market using Python. I've searched some information about parsing the websites, I even did this in VBA, but I would like to do it in python.
This is the site which will be parsed (it's one offer only now, but it will be working on full range of real estate offers, multiple sites from kontrakt.szczecin.pl):
http://www.kontrakt.szczecin.pl/mieszkanie-sprzedaz-100m2-335000pln-grudziadzka-pomorzany-szczecin-zachodniopomorskie,351149
First of all, program will use 3 pieces of information:
1/ The table where is information (Main parameters):
Numer oferty 351149, Liczba pokoi 3, Cena 335 000 PLN, Cena za m2 3 350 PLN (Number of offer, Room no, Price, Price by square meter etc). However the quantity of information depends on property offer: sometimes is 14, sometimes is 12, sometimes 16 etc.
2/ Description of property in paragraphs (it is another part of program, for now it can be skipped): Sometimes in the table (1/) there is information that there is garage or balcony. But in paragraph there is a sentence that garage is for additional price (which means for me that property doesn't have garage) or balcony is in French type (which is no balcony for me).
I managed that program should find the correct word in paragraph (such as garage) and copy text from paragraph with additional text on the left and right side (for instance: 20 letters in both sides, but what if the word is in the first place?)
3/ Additional Parameters -
Not every offer has it but like this one (http://www.kontrakt.szczecin.pl/mieszkanie-sprzedaz-6664m2-339600pln-potulicka-nowe-miasto-szczecin-zachodniopomorskie,351165) there is information about number of balconies in property. Sometimes there is information about basement too. It should be similar code to the 1/ issue.
So I tried something like this, using some internet sources (it is still incomplete):
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
my_url = "http://www.kontrakt.szczecin.pl/mieszkanie-sprzedaz-6664m2-339600pln-potulicka-nowe-miasto-szczecin-zachodniopomorskie,351165"
#PL: otwiera połączenie z wybraną stroną, pobieranie zawartości strony (urllib)
#EN: Opens a connection and grabs url
uClient = uReq(my_url)
page_html = uClient.read()
uClient.close()
#html parsing (BeautifulSoup)
page_soup = soup(page_html, "html.parser") #html.parser -> zapisujemy do html, nie np. do xml
#PL: zbiera tabelkę z numerami ofert, kuchnią i innymi danymi o nieruchomości z tabelki
#EN: grabs the data about real estate like kitchen, offer no, etc.
containers = page_soup.findAll("section",{"class":"clearfix"},{"id":"quick-summary"})
# print(len(containers)) - len(containers) sprawdza ile takich obiektów istnieje na stronie
#PL: Co prawda na stronie jest tylko jedna taka tabelka, ale dla dobra nauki zrobię tak jak gdyby tabelek było wiele.
#EN: There is only one table, but for the sake of knowledge I do the container variable
container = containers[0]
find_dt = container.findAll("dt")
find_dd = container.findAll("dd")
print(find_dt[0].text + " " + find_dd[0])
It works, but still is incomplete. I don't continue it right now because there is major flaw. As you see the last print it takes indexes, but not every property will have the same order (because as I mentioned sometimes there is 10 pieces of info, sometimes more, sometimes less). It will be a huge mess in CSV.
My VBA program worked in this way:
Copy table to Excel (Sheet 1)
In the sheet 2 there was parameters that program was looking for (such as Prices)
Mechanism in shortcut: Copy parameter from sheet 2 (Price), go to sheet 1 (where is parsed information), find Price string (paste the information from sheet 2: "Price"), go line below, copy price value, go to sheet 2, find Price, go below, paste the price value. And so on.
Looking for help with concept and coding also.
EDIT:
PART 1 and PART 2 is ready. But I have big issues with PART 3. Here is the code:
from urllib import request as uReq
import requests
#dzięki temu program jest zamykany odrazu, i nie kontynuuje wykonywania reszty kodu. Po imporcie wystarczy exit(0)
from sys import exit
from urllib.request import urlopen as uReq2
from bs4 import BeautifulSoup as soup
import csv
import re
import itertools
filename = 'test.txt'
#licznik, potrzebny do obliczenia ilości numerów ofert w pliku .txt
num_lines = 0
# tworzymy listę danych i listę URLi. Wyniki będą dodawane do list, dlatego potrzeba jest ich utworzenia (jako puste)
list_of_lines = ['351238', '351237', '111111', '351353']
list_of_lines2 = []
list_of_URLs = []
list_of_redictered_URLs = []
KONTRAKT = 'http://www.kontrakt.szczecin.pl'
with open(filename, 'r') as file:
for line in file:
#dodajemy linię (ofertę) do listy
list_of_lines.append(line.strip())
#num_lines jest licznikiem, wskazuje ile wierszy zawiera lista, zmienna jest istotna w zakresię tworzenia pętli z adresami URL
num_lines += 1
#tworzymy URLe z Numerów Ofert zawartych w filename
for i in range(num_lines):
nr_oferty = list_of_lines[i]
my_url = "http://www.kontrakt.szczecin.pl/lista-ofert/?f_listingId=" + nr_oferty + "&f=&submit=Szukaj"
list_of_URLs.append(my_url)
print(list_of_URLs)
#Cześć druga: konwertowanie listy linków na listę linków przekierowanych
#Program wchodzi na stronę, która powinna być przekierowana, jednak ze względu na użyscie Java Scriptu,
#zadanie zostało utrudnione. Dlatego, też celem programu jest symulowanie przeglądarki, pobranie
#zawartości strony, a następnie 'wyłuskanie' odpowiedniego linku do przekierowania
i = 0
for i in range(num_lines):
url_redirect = list_of_URLs[i]
my_url = url_redirect
BROWSER = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
response = requests.get(my_url, headers=BROWSER)
script1 = '<script>'
script2 = '</script>'
content_URL = str(response.content)
find_script1 = (content_URL.find(script1))
find_script2 = (content_URL.find(script2))
url_ready = content_URL[find_script1:find_script2]
print(i+1,'z', num_lines, '-', 'oferta nr:', str(my_url[57:57+6]))
list_of_redictered_URLs.append(url_ready)
#usuwanie zbędnych tagów i znaków, w celu uzyskania czystego przekierowanego linku
list_of_redictered_URLs = [w.replace('<script>window.location=\\\'','') for w in list_of_redictered_URLs]
list_of_redictered_URLs = [w.replace('\\\';','') for w in list_of_redictered_URLs]
#print(list_of_redictered_URLs)
#usuwanie pustych wierszy z listy (oferty, które są nieakutalne na liste "wchodzą jako puste" !!! item: jest to zmienna, można zamienić np. na janusz.
filtered_list = list(filter(lambda item: item.strip(), list_of_redictered_URLs))
filtered_list = [KONTRAKT + item for item in filtered_list]
#zmiana na tuple, ze względu iż mutowalność (dodawanie kolejnych linków) nie będzie potrzebne
filtered_list = tuple(filtered_list)
#print(str(filtered_list))
print('Lista linków:\n',filtered_list)
# Kolejną częścią programu jest pobieranie istotnych informacji (parametrów podstawowych)
# ze strony kontrakt.szczecin.pl, a następnie ich zapisanie w pliku csv.
# Nagłówki w csv oraz nazwy parametrów na stronie (muszą być identyczne jak na stronie, aby mogły
# zostać odpowiednio przyporządkowane w .csv)
HEADERS = ['Numer oferty',
'Liczba pokoi',
'Cena',
'Cena za m2',
'Powierzchnia',
'Piętro',
'Liczba pięter',
'Typ kuchni',
'Balkon',
'Czynsz administracyjny',
'Rodzaj ogrzewania',
'Umeblowanie',
'Wyposażona kuchnia',
'Gorąca woda',
'Rodzaj budynku',
'Materiał',
'Rok budowy',
'Stan nieruchomości',
'Rynek',
'Dach:',
'Liczba balkonów:',
'Liczba tarasów:',
'Piwnica:',
'Ogród:',
'Ochrona:',
'Garaż:',
'Winda:',
'Kształt działki:',
'Szerokość działki (mb.):',
'Długość działki (mb.):',
'Droga dojazdowa:',
'Gaz:',
'Prąd:',
'Siła:','piwnica',
'komórk',
'strych',
'gospodarcze',
'postojow',
'parking',
'przynależn',
'garaż',
'ogród',
'ogrod',
'działka',
'ocieplony',
'moderniz',
'restaur',
'odnow',
'ociepl',
'remon',
'elew',
'dozór',
'dozor',
'monitoring',
'monit',
'ochron',
'alarm',
'strzeż',
'portier',
'wspólnot',
'spółdziel',
'kuchni',
'aneks',
'widna',
'ciemna',
'prześwit',
'oficyn',
'linia',
'zabudow',
'opłat',
'bezczynsz',
'poziom',
'wind',
'francuski',
'ul.',
'w cenie',
'dodatkową']
LINKI = ["Link"]
#HEADERS2 = ['Liczba balkonów:',
# 'Liczba tarasów:',
# 'Piwnica:',
# 'Ogród:',
# 'Ochrona:',
# 'Garaż:',
# 'Winda:']
HEADERS3 = ['piwnica',
'komórk',
'strych',
'gospodarcze',
'postojow',
'parking',
'przynależn',
'garaż',
'ogród',
'ogrod',
'działka',
'ocieplony',
'moderniz',
'restaur',
'odnow',
'ociepl',
'remon',
'elew',
'dozór',
'dozor',
'monitoring',
'monit',
'ochron',
'alarm',
'strzeż',
'portier',
'wspólnot',
'spółdziel',
'kuchni',
'aneks',
'widna',
'ciemna',
'prześwit',
'oficyn',
'linia',
'zabudow',
'opłat',
'bezczynsz',
'poziom',
'wind',
'francuski',
'ul.',
'w cenie',
'dodatkową',]
csv_name = 'data.csv'
print('Dane zostaną zapisane do pliku:',csv_name + '.csv')
print('\n>>>>Program rozpoczyna pobieranie danych')
#Pobieranie linków
i = 0
#Tworzy plik csv o nazwie csv
#writerow może mieć tylko jeden argument, dlatego jest nim suma poszczególnych list. Lista
#linki ma jędną pozycję, ponieważ można sumować dane jednego typu. Nie można sumować listy ze stringami.
with open(csv_name + '.csv', 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quotechar='"')
HEADERS_ALL = HEADERS+HEADERS3+LINKI
csvwriter.writerow(HEADERS_ALL)
for i in range(len(filtered_list)):
my_url = filtered_list[i]
with uReq2(my_url) as uClient:
page_soup = soup(uClient.read(), 'lxml')
print('\t\t-----------',i+1,'-----------\n',my_url)
#<dt> - nazwa parametru np. Kuchnia
#<dd> - wartość parametru np. widna
row = ['-'] * len(HEADERS) + ['-'] * len(HEADERS3) + ['-'] * len(LINKI)
# Parametry podstawowe (kontrakt.szczecin.pl)
for dt, dd in zip(page_soup.select('section#quick-summary dt'), page_soup.select('section#quick-summary dd')):
if dt.text.strip() not in HEADERS:
print("\n 1(dt,dd):UWAGA!, kolumna [{}] nie istnieje w nagłówkach! (stała: HEADERS)\n".format(dt.text.strip()))
continue
row[HEADERS.index(dt.text.strip())] = dd.text.strip()
# Parametry dodatkowe
for span, li in zip(page_soup.select('section#property-features span'), page_soup.select('section#property-features li')):
if span.text.strip() not in HEADERS:
print("\n 2:UWAGA(span,li), kolumna [{}] nie istnieje w nagłówkach (stała HEADERS)!\n".format(span.text.strip()))
continue
row[HEADERS.index(span.text.strip())] = li.text.strip()
#csvwriter.writerow(row)
print(row)
#No to zaczynamy zabawę...................................
# zmienna j odnosi się do indeksu HEADERS3, jest to j nie i, ponieważ i jest w dalszym użyciu
# w pętli powyżej
for p in page_soup.select('section#description'):
p = str(p)
p = p.lower()
for j in range(len(HEADERS3)):
#print('j:',j)
# find_p znajduje wszystkie słowa kluczowe z HEADERS3 w paragrafie na stronie kontraktu.
find_p = re.findall(HEADERS3[j],p)
# listy, które wyświetlają pozycję startową poszczególnych słów muszą zaczynać się od '-' lub 0?,
# ponieważ, gdy dane słowo nie zostanie odnalezione to listy będą puste w pierwszej iteracji pętli
# co w konsekewncji doprowadzi do błędu out of range
m_start = []
m_end = []
lista_j = []
for m in re.finditer(HEADERS3[j], p):
#print((m.start(),m.end()), m.group())
m_start.append(m.start())
m_end.append(m.end())
#print(h)
for k in range(len(m_start)):
#właściwe teraz nie wiem po co to jest..
try:
x = m_start[k]
y = m_end[k]
except IndexError:
x = m_start[0]
y = m_end[0]
#print('xy:',x,y)
#print(find_p)
#print(HEADERS3[j])
z = (HEADERS3[j]+':',p[-60+x:y+60]+' ++-NNN-++')
lista_j.append(z)
print (lista_j)
print(str(lista_j))
row[HEADERS.index(span.text.strip())] = str(lista_j)
csvwriter.writerow(row)
#print(row)
This code snippet will parse the quick summary table of the property url and saves it in csv file:
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
import csv
# my_url = 'http://www.kontrakt.szczecin.pl/mieszkanie-sprzedaz-6664m2-339600pln-potulicka-nowe-miasto-szczecin-zachodniopomorskie,351165'
my_url = 'http://www.kontrakt.szczecin.pl/mieszkanie-sprzedaz-100m2-335000pln-grudziadzka-pomorzany-szczecin-zachodniopomorskie,351149'
with uReq(my_url) as uClient:
page_soup = soup(uClient.read(), 'lxml')
with open('data.csv', 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quotechar='"')
for dt, dd in zip(page_soup.select('section#quick-summary dt'), page_soup.select('section#quick-summary dd')):
csvwriter.writerow([dt.text.strip(), dd.text.strip()])
The result is in data.csv, screenshot from my LibreOffice:
For having the table transposed, you can use this code:
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
import csv
# my_url = 'http://www.kontrakt.szczecin.pl/mieszkanie-sprzedaz-6664m2-339600pln-potulicka-nowe-miasto-szczecin-zachodniopomorskie,351165'
my_url = 'http://www.kontrakt.szczecin.pl/mieszkanie-sprzedaz-100m2-335000pln-grudziadzka-pomorzany-szczecin-zachodniopomorskie,351149'
with uReq(my_url) as uClient:
page_soup = soup(uClient.read(), 'lxml')
headers = ['Numer oferty',
'Liczba pokoi',
'Cena',
'Cena za m2',
'Powierzchnia',
'Piętro',
'Liczba pięter',
'Typ kuchni',
'Balkon',
'Czynsz administracyjny',
'Rodzaj ogrzewania',
'Gorąca woda',
'Rodzaj budynku',
'Materiał',
'Rok budowy',
'Stan nieruchomości',
'Rynek',
'Dach:',
'Liczba balkonów:',
'Piwnica:',
'Kształt działki:',
'Szerokość działki (mb.):',
'Długość działki (mb.):',
'Droga dojazdowa:',
'Gaz:',
'Prąd:',
'Siła:']
with open('data.csv', 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quotechar='"')
csvwriter.writerow(headers)
row = ['-'] * len(headers)
for dt, dd in zip(page_soup.select('section#quick-summary dt'), page_soup.select('section#quick-summary dd')):
if dt.text.strip() not in headers:
print("Warning, column [{}] doesn't exist in headers!".format(dt.text.strip()))
continue
row[headers.index(dt.text.strip())] = dd.text.strip()
csvwriter.writerow(row)
The result will be in csv file like this (the values not present will be substituted with '-'):

Categories

Resources