Cannot getting the "href" attributes via BeautifulSoup - python

in short, i can't get the links of "href" attribute from this link (a turkish online book and related stuff seller).
here's my code (i know it's not the best, i'm learning python for a few months online, so any heads up for best practices also welcomed)
i tried to get book names, writers, prices, publishers and the links for each book; without links it's working as i expected.
import requests
import pandas as pd
from bs4 import BeautifulSoup
from time import sleep
from random import randint
yazar = []
fiyat = []
yayın = []
isim = []
for i in range(1,10):
url = "https://www.dr.com.tr/CokSatanlar/Kitap#/page="+str(i)
page = requests.get(url)
soup = BeautifulSoup(page.text, "lxml")
# book names
k = soup.find_all("a", {"class":"prd-name"})
for i in k:
isim.append(i.text)
# writer names
y = soup.find_all("a", {"class":"who text-overflow"})
for i in y:
yazar.append(i.text)
# prices
f = soup.find_all("div", {"class":"prd-price"})
for i in f:
fiyat.append(i.text.split()[0])
# publishers
ye = soup.find_all("a", {"class":"prd-publisher"})
for i in ye:
yayın.append(i.get("title"))
sleep(randint(2, 4))
however when i try to get links
soup.find_all("a", {"class":"prd-name"}).get("href")
it turns none and i couldn't manage to make this work whatever i tried.
thank you all in advance and sorry for a little longer than usual post.

The data you see on the page is loaded from external location, so you need other URL to get correct data:
import requests
import pandas as pd
from bs4 import BeautifulSoup
url = "https://www.dr.com.tr/Catalog/CatalogProducts"
data = {
"catalogId": "4020",
"page": "1",
"sortfield": "soldcount",
"sortorder": "desc",
"size": "60",
"categoryid": "0",
"parentId": "0",
"mediatypes": "",
"HideNotForSale": "true",
"minPrice": "-1",
"maxPrice": "-1",
"writer": "",
"minDiscount": "-1",
"maxdiscount": "-1",
"language": "",
}
all_data = []
for page in range(1, 3): # <-- increase number of pages here
print(f"Getting page {page}")
data["page"] = page
soup = BeautifulSoup(requests.post(url, data=data).content, "html.parser")
for p in soup.select(".prd-content"):
all_data.append(p.get_text(strip=True, separator="|").split("|")[:5])
df = pd.DataFrame(
all_data, columns=["name", "autor", "price", "type", "publisher"]
)
print(df)
df.to_csv("data.csv", index=False)
Prints:
name autor price type publisher
0 Esra Ezmeci Seti 5 Kitap Takım - Defter Hediyeli Esra Ezmeci 155,45 TL İnce Kapak Destek Yayınları
1 Şimdi Onlar Düşünsün Bircan Yıldırım 36,20 TL İnce Kapak Destek Yayınları
2 İz Bıraktığın Kadar Varsın Esra Ezmeci 36,20 TL İnce Kapak Destek Yayınları
...
and saves data.csv (screenshot from Libre Office):

Think you wont get a None you will get:
AttributeError: ResultSet object has no attribute 'get'. You're probably treating a list of elements like a single element. Did you call find_all() when you meant to call find()?
find_all() produces a ResultSet, so you have to iterate it to get all the href:
for a in soup.find_all("a", {"class":"prd-name"}):
print('https://www.dr.com.tr'+a.get("href"))
Output
https://www.dr.com.tr/kitap/daha-adil-bir-dunya-mumkun/arastirma-tarih/politika-arastirma/turkiye-politika-/urunno=0001934858001
https://www.dr.com.tr/kitap/burasi-cok-onemli-enerjiden-ekonomiye-tam-bagimsiz-turkiye/arastirma-tarih/politika-arastirma/turkiye-politika-/urunno=0001966362001
https://www.dr.com.tr/kitap/iz-biraktigin-kadar-varsin/egitim-basvuru/psikoloji-bilimi/urunno=0001947472001
https://www.dr.com.tr/kitap/simdi-onlar-dusunsun/bircan-yildirim/egitim-basvuru/kisisel-gelisim/urunno=0001964436001
https://www.dr.com.tr/kitap/kadinlar-sicak-erkekler-soguk-sever/esra-ezmeci/egitim-basvuru/psikoloji-bilimi/urunno=0001904239001
https://www.dr.com.tr/kitap/dustugunde-kalkarsan-hayat-guzeldir/egitim-basvuru/psikoloji-bilimi/urunno=0001816754001
...

Related

Convert Python list to Json object

I have three lists emojiLink, emojiTitle, emojiDescription in my code below.
from bs4 import BeautifulSoup
import pandas as pd
r = requests.get("https://www.emojimeanings.net/list-smileys-people-whatsapp")
soup = BeautifulSoup(r.text, "lxml")
emojiLink = []
emojiTitle = []
emojiDescription = []
for tableRow in soup.find_all("tr", attrs={"class": "ugc_emoji_tr"}):
for img in tableRow.findChildren("img"):
emojiLink.append(img['src'])
for tableData in soup.find_all("td"):
for boldTag in tableData.findChildren("b"):
emojiTitle.append(boldTag.text)
for tableRow in soup.find_all("tr", attrs={"class": "ugc_emoji_tr"}):
for tabledata in tableRow.findChildren("td"):
if tabledata.has_attr("id"):
k = tabledata.text.strip().split('\n')[-1]
l = k.lstrip()
emojiDescription.append(l)
I want to convert these lists into a Json object which gonna look like...
{{"link": "emojiLink[0]", "title": "emojiTitle[0]", "desc": "emojiDescription[0]"},{"link": "emojiLink[1]", "title": "emojiTitle[1]", "desc": "emojiDescription[1]"}..........} so on...
I am not getting to how to do this?
THANKS IN ADVANCE!!!
This returns an array of JSON objects based off of Chandella07's answer.
from bs4 import BeautifulSoup
import pandas as pd
import requests
import json
r = requests.get("https://www.emojimeanings.net/list-smileys-people-whatsapp")
soup = BeautifulSoup(r.text, "lxml")
emojiLinkList = []
emojiTitleList = []
emojiDescriptionList = []
jsonData = []
for tableRow in soup.find_all("tr", attrs={"class": "ugc_emoji_tr"}):
for img in tableRow.findChildren("img"):
emojiLinkList.append(img['src'])
for tableData in soup.find_all("td"):
for boldTag in tableData.findChildren("b"):
emojiTitleList.append(boldTag.text)
for tableRow in soup.find_all("tr", attrs={"class": "ugc_emoji_tr"}):
for tabledata in tableRow.findChildren("td"):
if tabledata.has_attr("id"):
k = tabledata.text.strip().split('\n')[-1]
l = k.lstrip()
emojiDescriptionList.append(l)
for link, title, desc in zip(emojiLinkList, emojiTitleList, emojiDescriptionList):
dict = {"link": link, "title": title, "desc": desc}
jsonData.append(dict)
print(json.dumps(jsonData, indent=2))
Data Example:
{
"link": "https://www.emojimeanings.net/img/emojis/purse_1f45b.png",
"title": "Wallet",
"desc": "After the shopping trip, the money has run out or the wallet was forgotten at home. The accessory keeps loose money but also credit cards or make-up. Can refer to shopping or money and stand for femininity and everything girlish."
},
One by one access each element from list and put it into some dict and at the end append to a list:
import json
# some example lists
em_link = ['a', 'b', 'c']
em_title = ['x', 'y', 'z']
em_desc = [1,2,3]
arr = []
for i,j,k in zip(em_link, em_title, em_desc):
d = {}
d.update({"link": i})
d.update({"title": j})
d.update({"desc": k})
arr.append(d)
print(json.dumps(arr))
Output:
[{"link": "a", "title": "x", "desc": 1}, {"link": "b", "title": "y", "desc": 2}, {"link": "c", "title": "z", "desc": 3}]
There is something wrong with your dict format. {{...},{...}} is not a valid format, [{...},{...}] is valid.
Regarding the merging logic:
for i in zip([1,2,3], ["a", "b"], [8,9,10]):
print(i)
... will output ...
(1, 'a', 8)
(2, 'b', 9)
Try something like that:
out = []
for i in zip(emojiLink, emojiTitle, emojiDescription):
out.append({"link": i[0], ...})
You can use the json library to read/write in json format.
import json
with open('./smthn.json', 'w') as f:
json.dump({"a": "dictionary"}, f)
https://devtut.github.io/python/json-module.html#storing-data-in-a-file
So, you want a list of dictionary records? If you're sure all of the lists are the same length, you can do:
gather = []
for l,t,d in zip(emojiLink,emojiTitle,emojiDescription):
gather.append( {"link":l, "title":t, "desc":d} )
json.dump( gather, open("myrecord.json","w") )

How can I extract values from Tableau on this webpage

I am trying to extract the "mobility index" values for each state and county from this webpage:
https://www.cuebiq.com/visitation-insights-mobility-index/
The preferred output would be a panel data of place (state/county) by date for all available places and dates.
There is another thread (How can I scrape tooltips value from a Tableau graph embedded in a webpage) with a similar question. I tried to follow the solution there but it doesn't seem to work for my case.
Thanks a lot in advance.
(A way that I have tried is to download PDF files generated from Tableau, which would contain all counties' value on a specific date. However, I still need to find a way to make request for each date in the data. Anyway, let me know if you have a better idea than this route).
This tableau data url doesn't return any data. In fact, it only render images of the values (canvas probably) and I'm guessing it detects click based on coordinate. Probably, it's made this way to cache the value and render quickly.
But when you click on a state, it actually returns data but it seems it doesn't always returns the result for the state (but works the individual county).
The solution I've found is to use the tooltip to get the data for the state. When you click the state, it generates a request like this :
POST https://public.tableau.com/{path}/{session_id}/commands/tabsrv/render-tooltip-server
with the following form param :
worksheet: US Map - State - CMI
dashboard: CMI
tupleIds: [18]
vizRegionRect: {"r":"viz","x":496,"y":148,"w":0,"h":0,"fieldVector":null}
allowHoverActions: false
allowPromptText: true
allowWork: false
useInlineImages: true
where tupleIds: [18] refers to the index of the state in a list of states in reverse alphabetical order like this :
stateNames = ["Wyoming","Wisconsin","West Virginia","Washington","Virginia","Vermont","Utah","Texas","Tennessee","South Dakota","South Carolina","Rhode Island","Pennsylvania","Oregon","Oklahoma","Ohio","North Dakota","North Carolina","New York","New Mexico","New Jersey","New Hampshire","Nevada","Nebraska","Montana","Missouri","Mississippi","Minnesota","Michigan","Massachusetts","Maryland","Maine","Louisiana","Kentucky","Kansas","Iowa","Indiana","Illinois","Idaho","Georgia","Florida","District of Columbia","Delaware","Connecticut","Colorado","California","Arkansas","Arizona","Alabama"]
It gives a json with the html of the tooltip which has the CMI and YoY values you want to extract :
{
"vqlCmdResponse": {
"cmdResultList": [{
"commandName": "tabsrv:render-tooltip-server",
"commandReturn": {
"tooltipText": "{\"htmlTooltip\": \"<HTML HERE WITH THE VALUES>\"}]},\"overlayAnchors\":[]}"
}
}]
}
}
The only caveat is that you'll hava to make one request per state :
import requests
from bs4 import BeautifulSoup
import json
import time
data_host = "https://public.tableau.com"
r = requests.get(
f"{data_host}/views/CMI-2_0/CMI",
params= {
":showVizHome":"no",
}
)
soup = BeautifulSoup(r.text, "html.parser")
tableauData = json.loads(soup.find("textarea",{"id": "tsConfigContainer"}).text)
dataUrl = f'{data_host}{tableauData["vizql_root"]}/bootstrapSession/sessions/{tableauData["sessionid"]}'
r = requests.post(dataUrl, data= {
"sheet_id": tableauData["sheetId"],
})
data = []
stateNames = ["Wyoming","Wisconsin","West Virginia","Washington","Virginia","Vermont","Utah","Texas","Tennessee","South Dakota","South Carolina","Rhode Island","Pennsylvania","Oregon","Oklahoma","Ohio","North Dakota","North Carolina","New York","New Mexico","New Jersey","New Hampshire","Nevada","Nebraska","Montana","Missouri","Mississippi","Minnesota","Michigan","Massachusetts","Maryland","Maine","Louisiana","Kentucky","Kansas","Iowa","Indiana","Illinois","Idaho","Georgia","Florida","District of Columbia","Delaware","Connecticut","Colorado","California","Arkansas","Arizona","Alabama"]
for stateIndex, state in enumerate(stateNames):
time.sleep(0.5) #for throttling
r = requests.post(f'{data_host}{tableauData["vizql_root"]}/sessions/{tableauData["sessionid"]}/commands/tabsrv/render-tooltip-server',
data = {
"worksheet": "US Map - State - CMI",
"dashboard": "CMI",
"tupleIds": f"[{stateIndex+1}]",
"vizRegionRect": json.dumps({"r":"viz","x":496,"y":148,"w":0,"h":0,"fieldVector":None}),
"allowHoverActions": "false",
"allowPromptText": "true",
"allowWork": "false",
"useInlineImages": "true"
})
tooltip = json.loads(r.json()["vqlCmdResponse"]["cmdResultList"][0]["commandReturn"]["tooltipText"])["htmlTooltip"]
soup = BeautifulSoup(tooltip, "html.parser")
rows = [
t.find("tr").find_all("td")
for t in soup.find_all("table")
]
entry = { "state": state }
for row in rows:
if (row[0].text == "Mobility Index:"):
entry["CMI"] = "".join([t.text.strip() for t in row[1:]])
if row[0].text == "YoY (%):":
entry["YoY"] = "".join([t.text.strip() for t in row[1:]])
print(entry)
data.append(entry)
print(data)
Try this on repl.it
To get the county information it's the same as this post using the select endpoint which gives you the data with the same format as the post you've linked in your question
The following will extract data for all county and state :
import requests
from bs4 import BeautifulSoup
import json
import time
data_host = "https://public.tableau.com"
worksheet = "US Map - State - CMI"
dashboard = "CMI"
r = requests.get(
f"{data_host}/views/CMI-2_0/CMI",
params= {
":showVizHome":"no",
}
)
soup = BeautifulSoup(r.text, "html.parser")
tableauData = json.loads(soup.find("textarea",{"id": "tsConfigContainer"}).text)
dataUrl = f'{data_host}{tableauData["vizql_root"]}/bootstrapSession/sessions/{tableauData["sessionid"]}'
r = requests.post(dataUrl, data= {
"sheet_id": tableauData["sheetId"],
})
data = []
stateNames = ["Wyoming","Wisconsin","West Virginia","Washington","Virginia","Vermont","Utah","Texas","Tennessee","South Dakota","South Carolina","Rhode Island","Pennsylvania","Oregon","Oklahoma","Ohio","North Dakota","North Carolina","New York","New Mexico","New Jersey","New Hampshire","Nevada","Nebraska","Montana","Missouri","Mississippi","Minnesota","Michigan","Massachusetts","Maryland","Maine","Louisiana","Kentucky","Kansas","Iowa","Indiana","Illinois","Idaho","Georgia","Florida","District of Columbia","Delaware","Connecticut","Colorado","California","Arkansas","Arizona","Alabama"]
for stateIndex, state in enumerate(stateNames):
time.sleep(0.5) #for throttling
r = requests.post(f'{data_host}{tableauData["vizql_root"]}/sessions/{tableauData["sessionid"]}/commands/tabsrv/render-tooltip-server',
data = {
"worksheet": worksheet,
"dashboard": dashboard,
"tupleIds": f"[{stateIndex+1}]",
"vizRegionRect": json.dumps({"r":"viz","x":496,"y":148,"w":0,"h":0,"fieldVector":None}),
"allowHoverActions": "false",
"allowPromptText": "true",
"allowWork": "false",
"useInlineImages": "true"
})
tooltip = json.loads(r.json()["vqlCmdResponse"]["cmdResultList"][0]["commandReturn"]["tooltipText"])["htmlTooltip"]
soup = BeautifulSoup(tooltip, "html.parser")
rows = [
t.find("tr").find_all("td")
for t in soup.find_all("table")
]
entry = { "state": state }
for row in rows:
if (row[0].text == "Mobility Index:"):
entry["CMI"] = "".join([t.text.strip() for t in row[1:]])
if row[0].text == "YoY (%):":
entry["YoY"] = "".join([t.text.strip() for t in row[1:]])
r = requests.post(f'{data_host}{tableauData["vizql_root"]}/sessions/{tableauData["sessionid"]}/commands/tabdoc/select',
data = {
"worksheet": worksheet,
"dashboard": dashboard,
"selection": json.dumps({
"objectIds":[stateIndex+1],
"selectionType":"tuples"
}),
"selectOptions": "select-options-simple"
})
entry["county_data"] = r.json()["vqlCmdResponse"]["layoutStatus"]["applicationPresModel"]["dataDictionary"]["dataSegments"]
print(entry)
data.append(entry)
print(data)

Scrape Tables on Multiple Pages with Single URL

I am trying to scrape data from Fangraphs. The tables are split into 21 pages but all of the pages use the same url. I am very new to webscraping (or python in general), but Fangraphs does not have a public API so scraping the page seems to be my only option. I am currently using BeautifulSoup to parse the HTML code and I am able to scrape the initial table, but that only contains the first 30 players, but I want the entire player pool. Two days of web searching and I am stuck. Link and my current code are below. I know they have a link to download the csv file, but that gets tedious through out the season and I would like expedite the data harvesting process. Any direction would be helpful, thank you.
https://www.fangraphs.com/projections.aspx?pos=all&stats=bat&type=fangraphsdc
import requests
import pandas as pd
url = 'https://www.fangraphs.com/projections.aspx?pos=all&stats=bat&type=fangraphsdc&team=0&lg=all&players=0'
response = requests.get(url, verify=False)
# Use BeautifulSoup to parse the HTML code
soup = BeautifulSoup(response.content, 'html.parser')
# changes stat_table from ResultSet to a Tag
stat_table = stat_table[0]
# Convert html table to list
rows = []
for tr in stat_table.find_all('tr')[1:]:
cells = []
tds = tr.find_all('td')
if len(tds) == 0:
ths = tr.find_all('th')
for th in ths:
cells.append(th.text.strip())
else:
for td in tds:
cells.append(td.text.strip())
rows.append(cells)
# convert table to df
table = pd.DataFrame(rows)
import requests
from bs4 import BeautifulSoup
import pandas as pd
params = {
"pos": "all",
"stats": "bat",
"type": "fangraphsdc"
}
data = {
'RadScriptManager1_TSM': 'ProjectionBoard1$dg1',
"__EVENTTARGET": "ProjectionBoard1$dg1",
'__EVENTARGUMENT': 'FireCommand:ProjectionBoard1$dg1$ctl00;PageSize;1000',
'__VIEWSTATEGENERATOR': 'C239D6F0',
'__SCROLLPOSITIONX': '0',
'__SCROLLPOSITIONY': '1366',
"ProjectionBoard1_tsStats_ClientState": "{\"selectedIndexes\":[\"0\"],\"logEntries\":[],\"scrollState\":{}}",
"ProjectionBoard1_tsPosition_ClientState": "{\"selectedIndexes\":[\"0\"],\"logEntries\":[],\"scrollState\":{}}",
"ProjectionBoard1$rcbTeam": "All+Teams",
"ProjectionBoard1_rcbTeam_ClientState": "",
"ProjectionBoard1$rcbLeague": "All",
"ProjectionBoard1_rcbLeague_ClientState": "",
"ProjectionBoard1_tsProj_ClientState": "{\"selectedIndexes\":[\"5\"],\"logEntries\":[],\"scrollState\":{}}",
"ProjectionBoard1_tsUpdate_ClientState": "{\"selectedIndexes\":[],\"logEntries\":[],\"scrollState\":{}}",
"ProjectionBoard1$dg1$ctl00$ctl02$ctl00$PageSizeComboBox": "30",
"ProjectionBoard1_dg1_ctl00_ctl02_ctl00_PageSizeComboBox_ClientState": "",
"ProjectionBoard1$dg1$ctl00$ctl03$ctl01$PageSizeComboBox": "1000",
"ProjectionBoard1_dg1_ctl00_ctl03_ctl01_PageSizeComboBox_ClientState": "{\"logEntries\":[],\"value\":\"1000\",\"text\":\"1000\",\"enabled\":true,\"checkedIndices\":[],\"checkedItemsTextOverflows\":false}",
"ProjectionBoard1_dg1_ClientState": ""
}
def main(url):
with requests.Session() as req:
r = req.get(url, params=params)
soup = BeautifulSoup(r.content, 'html.parser')
data['__VIEWSTATE'] = soup.find("input", id="__VIEWSTATE").get("value")
data['__EVENTVALIDATION'] = soup.find(
"input", id="__EVENTVALIDATION").get("value")
r = req.post(url, params=params, data=data)
df = pd.read_html(r.content, attrs={
'id': 'ProjectionBoard1_dg1_ctl00'})[0]
df.drop(df.columns[1], axis=1, inplace=True)
print(df)
df.to_csv("data.csv", index=False)
main("https://www.fangraphs.com/projections.aspx")
Output: view-online

Convert download string in pattern like [{" t": "1 ", "id": "NOW.976818" .... "cv": "1"}] into Pd dataframe?

I downloaded list of news content into pandas dataframe. Instead of putting the info into table, pd put everything into a single cell. Upon inspection, the downloaded string is in this pattern:
"['[{"t": "1", "id": "NOW.976818", "dt": "2019/11/15 10:13", "h": "《美股業績》Nvidia季績勝預期 季度收入預測遜預期", "u": "",...
How to convert this into pd table?
My codes:
urlpull ="http://www.aastocks.com/tc/resources/datafeed/getmorenews.ashx?cat=result-announcement&newstime=942660890&newsid=NOW.976800&period=0&key="
df = pd.DataFrame({'News': ['a'], 'Page': ['1']})
result = requests.get(urlpull)
result.raise_for_status()
result.encoding = "utf-8"
src = result.content
soup = BeautifulSoup(src, 'lxml')
news = []
for a_tag in soup.find_all('p'):
news.append(a_tag.text)
df = df.append(pd.DataFrame(news, columns=['News']))
print(news)
df['num'] = df['News'].str.extract('(\d{5})')
df["stock_num"] = pd.to_numeric(df["num"], errors="coerce").fillna(0).astype("int64")
print (df)
df.to_excel("News.xlsx")
you can do directly
pd.read_table(filename/url)

Python 2.7 BeautifulSoup email scraping stops before end of full database

Hope you are all well! I'm new and using Python 2.7! I'm tring to extract emails from a public available directory website that does not seems to have API: this is the site: http://www.tecomdirectory.com/companies.php?segment=&activity=&search=category&submit=Search
, the code stop gathering email where on the page at the bottom where it says "load more"!
Here is my code:
import requests
import re
from bs4 import BeautifulSoup
file_handler = open('mail.txt','w')
soup = BeautifulSoup(requests.get('http://www.tecomdirectory.com/companies.php?segment=&activity=&search=category&submit=Search').content)
tags = soup('a')
list_new =[]
for tag in tags:
if (re.findall(r'href="mailto:([^"#]+#[^"]+)">\1</a>',('%s'%tag))): list_new = list_new +(re.findall(r'href="mailto:([^"#]+#[^"]+)">\1</a>', ('%s'%tag)))
for x in list_new:
file_handler.write('%s\n'%x)
file_handler.close()
How can i make sure that the code goes till the end of the directory and does not stop where it shows load more?
Thanks.
Warmest regards
You just need to post some data, in particular incrementing group_no to simulate clicking the load more button:
from bs4 import BeautifulSoup
import requests
# you can set whatever here to influence the results
data = {"group_no": "1",
"search": "category",
"segment": "",
"activity": "",
"retail": "",
"category": "",
"Bpark": "",
"alpha": ""}
post = "http://www.tecomdirectory.com/getautocomplete_keyword.php"
with requests.Session() as s:
soup = BeautifulSoup(
s.get("http://www.tecomdirectory.com/companies.php?segment=&activity=&search=category&submit=Search").content,
"html.parser")
print([a["href"] for a in soup.select("a[href^=mailto:]")])
for i in range(1, 5):
data["group_no"] = str(i)
soup = BeautifulSoup(s.post(post, data=data).content, "html.parser")
print([a["href"] for a in soup.select("a[href^=mailto:]")])
To go until the end, you can loop until the post returns no html, that signifies we cannot load any more pages:
def yield_all_mails():
data = {"group_no": "1",
"search": "category",
"segment": "",
"activity": "",
"retail": "",
"category": "",
"Bpark": "",
"alpha": ""}
post = "http://www.tecomdirectory.com/getautocomplete_keyword.php"
start = "http://www.tecomdirectory.com/companies.php?segment=&activity=&search=category&submit=Search"
with requests.Session() as s:
resp = s.get(start)
soup = BeautifulSoup(s.get(start).content, "html.parser")
yield (a["href"] for a in soup.select("a[href^=mailto:]"))
i = 1
while resp.content.strip():
data["group_no"] = str(i)
resp = s.post(post, data=data)
soup = BeautifulSoup(resp.content, "html.parser")
yield (a["href"] for a in soup.select("a[href^=mailto:]"))
i += 1
So if we ran the function like below setting "alpha": "Z" to just iterate over the Z's:
from itertools import chain
for mail in chain.from_iterable(yield_all_mails()):
print(mail)
We would get:
mailto:info#10pearls.com
mailto:fady#24group.ae
mailto:pepe#2heads.tv
mailto:2interact#2interact.us
mailto:gc#worldig.com
mailto:marilyn.pais#3i-infotech.com
mailto:3mgulf#mmm.com
mailto:venkat#4gid.com
mailto:info#4power.biz
mailto:info#4sstudyabroad.com
mailto:fouad#622agency.com
mailto:sahar#7quality.com
mailto:mike.atack#8ack.com
mailto:zyara#emirates.net.ae
mailto:aokasha#zynx.com
Process finished with exit code 0
You should put a sleep in between requests so you don't hammer the server and get yourself blocked.

Categories

Resources