I have used following code to add screenshots in pytest-html report, it's not giving any error but .png file is blank after running the code.
#pytest.mark.hookwrapper
def pytest_runtest_makereport(item):
"""
Extends the PyTest Plugin to take and embed screenshot in html report, whenever test fails.
:param item:
"""
pytest_html = item.config.pluginmanager.getplugin('html')
outcome = yield
report = outcome.get_result()
extra = getattr(report, 'extra', [])
if report.when == 'call' or report.when == "setup":
xfail = hasattr(report, 'wasxfail')
if (report.skipped and xfail) or (report.failed and not xfail):
file_name = report.nodeid.replace("::", "_") + ".png"
_capture_screenshot(file_name)
if file_name:
html = '<div><img src="%s" alt="screenshot" style="width:304px;height:228px;" ' \
'onclick="window.open(this.src)" align="right"/></div>' % file_name
extra.append(pytest_html.extras.html(html))
report.extra = extra
def _capture_screenshot(name):
driver.get_screenshot_as_file(name)
#pytest.fixture(scope='session', autouse=True)
def browser():
global driver
if driver is None:
driver = webdriver.Chrome()
return driver
Related
I tried to save screenshot of failure testcase in selenium python project by using driver.save_screenshot() method. I specified my folder as parameter. But can't see failed screenshot in my project. Here is the code.
self.driver.save_screenshot(".//Screenshots//"+"test_homePageTitle.png")
Here is the absolute path - /Users/cherry/Documents/Selenium pj/nopcommerceApp2/Screenshots
I want to save failed testcase as screenshot in my specific folder
Here is the full code:
import time
from pageObjects.loginPage import LoginPage
from utilities.readProperties import ReadConfig
class Test_001_Login:
baseURL = ReadConfig.getApplicationURL()
username = ReadConfig.getUseremail()
password = ReadConfig.getPassword()
def test_homePageTitle(self, setup):
self.driver = setup
self.driver.get(self.baseURL)
act_title = self.driver.title
self.driver.close()
if act_title == "Your store. Login":
assert True
else:
time.sleep(5)
# self.driver.save_screenshot("./nopcommerceApp2/Screenshots" + "test_homePageTitle.png")
self.driver.save_screenshots('.//Screenshots//"+"test_homePageTitle.png')
self.driver.close()
assert False
def test_login(self, setup):
self.driver = setup
self.driver.get(self.baseURL)
time.sleep(10)
self.lp = LoginPage(self.driver)
self.lp.setUserName(self.username)
self.lp.setPassword(self.password)
self.lp.clickLogin()
act_title = self.driver.title
self.driver.close()
if act_title == "Dashboard / nopCommerce administration":
assert True
else:
time.sleep(5)
self.driver.save_screenshot(".\\Screenshots\\" + "test_login.png")
self.driver.close()
assert False
it is not a sustainable way. Instead of it, you can use such reporting plugin.
I may sugges you pytest-html
pip install pytest-html
Then add foolowing code into your conftest.py
# to add screenshot for failed steps
#mark.hookwrapper
def pytest_runtest_makereport(item, call):
pytest_html = item.config.pluginmanager.getplugin('html')
outcome = yield
report = outcome.get_result()
extra = getattr(report, 'extra', [])
if report.when == 'call':
xfail_state = hasattr(report, 'wasxfail')
if (report.skipped and xfail_state) or (report.failed and not xfail_state):
mydriver = item.funcargs['driver']
screenshot = mydriver.get_screenshot_as_base64()
extra.append(pytest_html.extras.image(screenshot, ''))
report.extra = extra
I'm trying to Scrape LinkedInn for Job Listings. Unfortunately after each run I'm getting the same line repeatedly instead of all the listings. Would anyone know why this might be? I'm fairly new to WebScrapers. I'm not sure if it's my loop that's causing the same result to repeat or if I'm exporting to CSV incorrectly.
`
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
from bs4 import BeautifulSoup
import time
import pandas as pd
import csv
job_name = "Data Analyst"
country_name = "United States"
job_url ="";
for item in job_name.split(" "):
if item != job_name.split(" ")[-1]:
job_url = job_url + item + "%20"
else:
job_url = job_url + item
country_url ="";
for item in country_name.split(" "):
if item != country_name.split(" ")[-1]:
country_url = country_url + item + "%20"
else:
country_url = country_url + item
url = "https://www.linkedin.com/jobs/search?keywords=Data%20Analyst&location=United%20States&geoId=103644278&trk=public_jobs_jobs-search-bar_search-submit&position=1&pageNum=0"
url.format(job_url,country_url)
# Creating a webdriver instance
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))
#Change the path to match the location of your "chromedriver" file
#driver = webdriver.Chrome("/home/im-admin/.scripts/Py/chromedriver")
# Opening the url we have just defined in our browser
driver.get(url)
#We find how many jobs are offered.
jobs_num = driver.find_element(By.CSS_SELECTOR,"h1>span").get_attribute("innerText")
if len(jobs_num.split(',')) > 1:
jobs_num = int(jobs_num.split(',')[0])*2
else:
jobs_num = int(jobs_num)
jobs_num = int(jobs_num)
#We create a while loop to browse all jobs.
i = 2
while i <= int(jobs_num/2)+1:
#We keep scrollind down to the end of the view.
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
i = i + 1
print("Current at: ", i, "Percentage at: ", ((i+1)/(int(jobs_num/2)+1))*100, "%",end="\r")
try:
#We try to click on the load more results buttons in case it is already displayed.
infinite_scroller_button = driver.find_element(By.XPATH, ".//button[#aria-label='Load more results']")
infinite_scroller_button.click()
time.sleep(0.1)
except:
#If there is no button, there will be an error, so we keep scrolling down.
time.sleep(0.1)
pass
#We get a list containing all jobs that we have found.
job_lists = driver.find_element(By.CLASS_NAME,"jobs-search__results-list")
jobs = job_lists.find_elements(By.TAG_NAME,"li") # return a list
#We declare void list to keep track of all obtaind data.
job_title_list = []
company_name_list = []
location_list = []
date_list = []
job_link_list = []
#We loof over every job and obtain all the wanted info.
for job in jobs:
#job_title
job_title = job.find_element(By.CSS_SELECTOR,"h3").get_attribute("innerText")
job_title_list.append(job_title)
#company_name
company_name = job.find_element(By.CSS_SELECTOR,"h4").get_attribute("innerText")
company_name_list.append(company_name)
#location
location = job.find_element(By.CSS_SELECTOR,"div>div>span").get_attribute("innerText")
location_list.append(location)
#date
date = job.find_element(By.CSS_SELECTOR,"div>div>time").get_attribute("datetime")
date_list.append(date)
#job_link
job_link = job.find_element(By.CSS_SELECTOR,"a").get_attribute("href")
job_link_list.append(job_link)
jd = [] #job_description
seniority = []
emp_type = []
job_func = []
job_ind = []
for item in range(len(jobs)):
print(item)
job_func0=[]
industries0=[]
# clicking job to view job details
#__________________________________________________________________________ JOB Link
try:
job_click_path = f'/html/body/div/div/main/section/ul/li[{item+1}]'
job_click = job.find_element(By.XPATH,job_click_path).click()
except:
pass
#job_click = job.find_element(By.XPATH,'.//a[#class="base-card_full-link"]')
#__________________________________________________________________________ JOB Description
jd_path = '/html/body/div/div/section/div/div/section/div/div/section/div'
try:
jd0 = job.find_element(By.XPATH,jd_path).get_attribute('innerText')
jd.append(jd0)
except:
jd.append(None)
pass
#__________________________________________________________________________ JOB Seniority
seniority_path='/html/body/div/div/section/div/div/section/div/ul/li[1]/span'
try:
seniority0 = job.find_element(By.XPATH,seniority_path).get_attribute('innerText')
seniority.append(seniority0)
except:
seniority.append(None)
pass
#__________________________________________________________________________ JOB Time
emp_type_path='/html/body/div/div/section/div/div/section/div/ul/li[2]/span'
try:
emp_type0 = job.find_element(By.XPATH,emp_type_path).get_attribute('innerText')
emp_type.append(emp_type0)
except:
emp_type.append(None)
pass
#__________________________________________________________________________ JOB Function
function_path='/html/body/div/div/section/div/div/section/div/ul/li[3]/span'
try:
func0 = job.find_element(By.XPATH,function_path).get_attribute('innerText')
job_func.append(func0)
except:
job_func.append(None)
pass
#__________________________________________________________________________ JOB Industry
industry_path='/html/body/div/div/section/div/div/section/div/ul/li[4]/span'
try:
ind0 = job.find_element(By.XPATH,industry_path).get_attribute('innerText')
job_ind.append(ind0)
except:
job_ind.append(None)
pass
print("Current at: ", item, "Percentage at: ", (item+1)/len(jobs)*100, "%")
job_data = pd.DataFrame({
'Date': date,
'Company': company_name,
'Title': job_title,
'Location': location,
'Description': jd,
'Level': seniority,
'Type': emp_type,
'Function': job_func,
'Industry': job_ind,
'Link': job_link
})
#Change the path to jobdata.csv if you want it to output to a different folder.
##See https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_csv.html#
job_data.to_csv('jobdata.csv',encoding='utf-8',index=False)
`
This is my output
Date,Company,Title,Location,Description,Level,Type,Function,Industry,Link
2022-10-14,LHH,Data Analyst,"McLean, VA",,,,,,https://www.linkedin.com/jobs/view/data-analyst-at-lhh-3311865718?refId=pAkR2FDOYi8W2HOa%2FLgpiw%3D%3D&trackingId=5%2FX7p1W7L0eCE4XtpbzcEQ%3D%3D&position=23&pageNum=2&trk=public_jobs_jserp-result_search-card
2022-10-14,LHH,Data Analyst,"McLean, VA",,,,,,https://www.linkedin.com/jobs/view/data-analyst-at-lhh-3311865718?refId=pAkR2FDOYi8W2HOa%2FLgpiw%3D%3D&trackingId=5%2FX7p1W7L0eCE4XtpbzcEQ%3D%3D&position=23&pageNum=2&trk=public_jobs_jserp-result_search-card
2022-10-14,LHH,Data Analyst,"McLean, VA",,,,,,https://www.linkedin.com/jobs/view/data-analyst-at-lhh-3311865718?refId=pAkR2FDOYi8W2HOa%2FLgpiw%3D%3D&trackingId=5%2FX7p1W7L0eCE4XtpbzcEQ%3D%3D&position=23&pageNum=2&trk=public_jobs_jserp-result_search-card
I've tried printing the Panda Table directly with no success.
I am using pytest.mark.parametrize for data driven testing. Now when I am generating the html report, the test case name is coming like below which includes all the parameters(data). My goal is to capture only the test case name like "test_RSA_Health" and remove all additional details from the "Test" column of the report. Is it possible?
My Code:
conftest
import time
import allure
import pytest
from allure_commons.types import AttachmentType
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from Utilities.filepath import *
#pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
setattr(item, "rep_" + rep.when, rep)
test_fn = item.obj
docstring = getattr(test_fn, '__doc__')
if docstring:
rep.nodeid = docstring
return rep
#pytest.fixture(scope="function")
def selenium_driver(request):
chrome_options = Options()
chrome_options.add_argument("--headless")
# chrome_options.add_argument("--window-size=1920,1080")
# chrome_options.add_argument('--start-maximized')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--no-sandbox')
# chrome_options.add_argument("--disable-extensions")
# chrome_options.add_argument('disable-infobars')
s = Service("C:\\Users\\aprat\\OneDrive\\Desktop\\selenium\\chromedriver98\\chromedriver.exe")
url = "https:test.com"
driver = webdriver.Chrome(service=s, options=chrome_options)
driver.maximize_window()
driver.set_window_size(1200, 600)
driver.get(url)
driver.find_element(By.NAME, "user_name").send_keys("9998887776")
driver.find_element(By.NAME, "password_name").send_keys("qwerty123")
driver.find_element(By.XPATH, "//button[#type= 'submit']").click()
time.sleep(3)
request.cls.driver = driver
yield driver
driver.close()
#pytest.fixture()
def log_on_failure(request, selenium_driver):
yield
item = request.node
driver = selenium_driver
if item.rep_call.failed:
allure.attach(driver.get_screenshot_as_png(), name="screenshot", attachment_type=AttachmentType.PNG)
test script:
import time
import pytest
from Pages.HomePage import HomePage
from TestCases.BaseTest import BaseTest
from Utilities import dataProvider
class Test_RSA_Health(BaseTest):
#pytest.mark.parametrize("pin,sumvalue,mobileno,selfage,fullname,email,firstname,lastname,dob,income,pan,designation,add1,add2,height,weight,nomfirstname,nomlastname,nomdob", dataProvider.get_data("rsa_health"))
def test_RSA_Health(self,pin,sumvalue,mobileno,selfage,fullname,email,firstname,lastname,dob,income,pan,designation,add1,add2,height,weight,nomfirstname,nomlastname,nomdob):
home = HomePage(self.driver)
healthinsuranepage = home.SelectHealth()
self.VerifyPresence_PinCodeTextBox()
healthinsuranepage.landing_page()
healthinsuranepage.InputPin(pin)
healthinsuranepage.SelectSum(str(sumvalue))
healthinsuranepage.InputMobileNo(mobileno)
insureddetailspage = healthinsuranepage.ClickNext()
self.VerifyPresence_SelfCheckBox()
insureddetailspage.landing_page()
insureddetailspage.SelectMemberSelf()
self.VerifyPresence_SelfAgeTextBox()
insureddetailspage.InputAge(selfage)
time.sleep(2)
quotespage = insureddetailspage.ClickNext()
time.sleep(5)
quotespage.landing_page()
quotespage.ShareQuotes()
time.sleep(3)
quotespage.SelectAllQuotes()
time.sleep(2)
quotespage.ClickNext1()
self.VerifyPresence_NameTextBox()
quotespage.InputName(fullname)
quotespage.InputEmail(email)
quotespage.InputMobileNo(mobileno)
time.sleep(2)
quotespage.ClickSubmit()
time.sleep(2)
self.VerifyPresence_CloseButton()
time.sleep(2)
quotespage.ClickCloseButton()
time.sleep(2)
policydetailspage = quotespage.RSAPlanSelect()
time.sleep(3)
propdetailspage = policydetailspage.ConfirmTenure()
policydetailspage.landing_page()
self.VerifyPresence_FirstNameTextBox()
propdetailspage.landing_page()
propdetailspage.InputFirstName(firstname)
propdetailspage.InputLastName(lastname)
propdetailspage.InputDOB(dob)
propdetailspage.SelectPropGender()
propdetailspage.InputEmailId(email)
propdetailspage.InputContactNo(mobileno)
propdetailspage.InputIncome(income)
propdetailspage.InputPANCard(pan)
propdetailspage.SelectOccupationDropdown()
self.VerifyPresence_SelectOccupationOption()
propdetailspage.SelectOccupation()
propdetailspage.InputDesignation(designation)
propdetailspage.SelectMaritalStatusDropdown()
self.VerifyPresence_MaritalStatusOption()
propdetailspage.SelectMaritalStatus()
propdetailspage.SelectEducationDropdown()
self.VerifyPresence_QualificationOption()
propdetailspage.SelectQualification()
propdetailspage.SelectTPANameDropdown()
self.VerifyPresence_TPANameOption()
propdetailspage.SelectTPA()
propdetailspage.InputAdd1(add1)
propdetailspage.InputAdd2(add2)
selfdetailspage = propdetailspage.ClickNext()
self.VerifyPresence_SelfFirstNameTextBox()
selfdetailspage.landing_page()
selfdetailspage.InputSelfFirstName(firstname)
selfdetailspage.InputSelfLastName(lastname)
selfdetailspage.InputSelfDOB(dob)
selfdetailspage.SelectSelfGender()
selfdetailspage.InputSelfHeight(height)
selfdetailspage.InputSelfWeight(weight)
selfdetailspage.InputSelfDesignation(designation)
selfdetailspage.InputNomineeFName(nomfirstname)
selfdetailspage.InputNomineeLName(nomlastname)
selfdetailspage.InputNomineeDOB(nomdob)
selfdetailspage.SelectNomineeGender()
selfdetailspage.SelectNomineeRltnDropdown()
self.VerifyPresence_NomRelationOption()
selfdetailspage.SelectNomineeRelation()
questionariespage = selfdetailspage.ClickNext()
time.sleep(4)
questionariespage.landing_page()
policyreviewpage = questionariespage.ClickNext()
self.VerifyPresence_NameValidationText()
policyreviewpage.landing_page()
proposer_name = policyreviewpage.GetName()
proposer_email = policyreviewpage.GetEmail()
proposer_mobno = policyreviewpage.GetPhoneNo()
try:
assert proposer_name == fullname
assert proposer_email == email
assert int(proposer_mobno) == mobileno
except Exception as e:
raise e
policyreviewpage.FinalSubmit()
self.VerifyPresence_ShareButton()
policyreviewpage.SharePolicy()
Like this:
test_details = [{'pin': 444, 'sumvalue': 444,.....}]
def pytest_generate_tests(metafunc):
if 'test_data' in metafunc.fixturenames:
metafunc.parametrize("test_data", test_details)
def test_RSA_Health(self,test_data)
pin = test_data['pin']
Or like this:
test_details = [{'pin': 444, 'sumvalue': 444,.....}]
#pytest.mark.parametrize("test_data", [(test_details)]
def test_RSA_Health(self,test_data)
pin = test_data['pin']
I'm on a project and trying to write file in python and I'm trying to write a file in nice format. I'd try a lot but I don't know what's going wrong?
I'd try:
def generate_file(self, lyrics):
self.path()
print('We are writing file ............')
with open('filename.srt', 'w') as lrc:
for i in range(len(lyrics)):
add = ''
if lyrics[i].isnumeric():
add += '\n'
elif lyrics[i].isalpha():
add += '\n\n'
lrc.write(lyrics[i]+add)
add += ''
lrc.close()
print('We downloaded your file!')
Output:
000:00:00‚000 --> 00:00:00‚000by RentAnAdviser.com100:00:22‚608 --> 00:00:26‚607Drink from me drink fromme oh ah oh ah200:00:26‚803 --> 00:00:30‚602Then we′ll shoot across the symphony300:00:30‚808 --> 00:00:38‚807Then we′ll shoot across the sky400:00:43‚599 --> 00:00:48‚498Oh angels sent from up above500:00:48‚702 --> 00:00:53‚801You know you make my world light up600:00:54‚005 --> 00:00:59‚004When I was down when I was hurt700:00:59‚218 --> 00:01:04‚717You came to lift me up800:01:04‚911 --> 00:01:09‚610Life is a drink and love′s a ****900:01:09‚812 --> 00:01:15‚011Oh now I think I must be miles up1000:01:15‚217 --> 00:01:20‚316When I was hurt withered dried up1100:01:20‚506 --> 00:01:26‚005You came to rain a flood1200:01:26‚217 --> 00:01:28‚716So drink from me drink from me1300:01:28‚900 -
I excepted:
0
00:00:00,000 --> 00:00:00,000
by RentAnAdviser.com
1
00:00:17,842 --> 00:00:21,341
Drink from me‚ drink from me
2
00:00:21,537 --> 00:00:23,336
Then we′ll shoot across the sky
3
00:00:23,546 --> 00:00:24,545
Drink from me‚ drink from me
How can I do that?
My project:
from bs4 import BeautifulSoup
import os, requests, platform
class EpicLyricFinderApp:
def __init__(self):
self.text = '%20'.join(input('Enter song name and also include singer: ').split(' '))
self.url = 'https://www.rentanadviser.com/en/subtitles/subtitles4songs.aspx?src='+self.text
self.user = None
self.app()
def app(self):
req = requests.get(self.url).content
soup = BeautifulSoup(req, 'html.parser')
print('Please wait ...................')
tag = soup.findAll('table')
link = [('https://www.rentanadviser.com/en/subtitles/'+l.get('href'))+'&type=srt' for l in [a.find('a') for a in tag]]
blank_name = [''.join((l.get_text()).split(' ')[17:]) for l in [a.find('a') for a in tag]]
[print('No. {} ==>> {}'.format(name+1,blank_name[name])) for name in range(len(blank_name))]
# Get input form user to choice lyrics
print('='*60)
while True:
try:
self.user = int(input('Which lyrics you wanna download?: '))
except ValueError:
continue
else:
break
# Open .srt link
req1 = requests.get(link[self.user]).content
soup1 = BeautifulSoup(req1, 'html.parser')
lyrics = [c.get_text() for c in soup1.findAll('span', attrs={'id':'ctl00_ContentPlaceHolder1_lblSubtitle'})]
self.generate_file(lyrics)
#staticmethod
def path():
if platform.system()=='Linux':
linux = '/home/rohit/Desktop/lrc'
if os.path.exists(linux):
os.chdir(linux)
else:
os.mkdir(linux)
os.chdir(linux)
else:
windows = 'Cd:/Users/ABC/rohit/Desktop/lrc'
if os.path.exists(windows):
os.chdir(windows)
else:
os.mkdir(windows)
os.chdir(windows)
def generate_file(self, lyrics):
self.path()
print('We are writing file ............')
with open('_'.join(self.text.split('%20'))+'.srt', 'w') as lrc:
for i in range(len(lyrics)):
add = ''
if lyrics[i].isnumeric():
add += '\n'
elif lyrics[i].isalpha():
add += '\n\n'
lrc.write(lyrics[i]+add)
add += ''
lrc.close()
print('We downloaded your file!')
if __name__ == '__main__':
app = EpicLyricFinderApp()
I'm trying to make a program to uinterface with the Google CSE API, iterate over a list of people with companies, and pull LinkedIn profile information from the structured data. It then runs a regex to pull information from those and append them to a text file. Testing it works up to a point, but once I reach a certain name in the list, I get this error:
Traceback (most recent call last):
File "C:\Users\svillamil\Desktop\CSE2.py", line 27, in <module>
results = google_search("Gene Grochala Capital Health", my_api_key, my_cse_id, num=1)
File "C:\Users\svillamil\Desktop\CSE2.py", line 17, in google_search
return res['items']
KeyError: 'items'
Investigating it on the CSE show that the name and company yield no results. So, I put the made an exception for the KeyError.
except KeyError:
pass
This did not work, so I tried:
except Exception as e:
pass
and even:
except:
pass
with no luck.
Is there something wrong with my code that's stopping this error from passing? Or could it be an issue with the initial input?
Here is my program for reference:
from googleapiclient.discovery import build
import pprint
import csv
import re
import time
import os
os.chdir('C:\\users\\name\\Desktop')
my_api_key = "xxxxx"
my_cse_id = "xxxxx"
def google_search(search_term, api_key, cse_id, **kwargs):
service = build("customsearch","v1",developerKey=api_key)#initializes an instance of the custom search service with the build module.
res = service.cse().list(q=search_term, cx=cse_id, **kwargs).execute()#executes cse().list() on service to return metadata on the search performed, the specific CSE it is calling, and any other variable that might be added when calling the function as a whole
return res['items']
a = 0
with open('list.csv', 'r') as f:
reader = csv.reader(f)
for row in reader:
a+=1
name = row[1] + ' ' + row[2] + ' at ' +row[4]
print("This trial will search for", name)
results = google_search(name, my_api_key, my_cse_id, num=1)
try:
for result in results:
fn = r"fn':\s'(.+?)'"
pt = r"pagemap':.+'title.+?\s'(.*?)'"
role = r"role':\W+(.+?)'"
org = r"org\W+(.+?)'"
with open("cse_result.txt", "a+") as nameLookup:
if re.search(str(fn),str(result)) is not None:
name2 = re.search(str(fn),str(result)).group(1)
nameLookup.write("Trial "+str(a)+'\n')
nameLookup.write("The name being searched for in this trial is "+name+'.\n')
nameLookup.write("The name found is "+str(name2)+"\n")
nameLookup.write('\n')
else:
nameLookup.write("Trial "+str(a)+'\n')
nameLookup.write("We could not find a name on this trial."+'\n')
nameLookup.write('\n')
if re.search(str(pt),str(result)) is not None:
position_title = re.search(str(pt),str(result)).group(1)
nameLookup.write("The position found at this trial is " + position_title + '.\n')
nameLookup.write('\n')
else:
nameLookup.write('We could not find a position title at this trial.')
nameLookup.write('\n')
if re.search(str(role),str(result)) is not None:
role_title = re.search(str(role),str(result)).group(1)
nameLookup.write("The position found at this trial is " + role_title + '.\n')
nameLookup.write('\n')
else:
nameLookup.write('We could not return a position at this trial.')
nameLookup.write('\n')
if re.search(str(org),str(result)) is not None:
orginization = re.search(str(org),str(result)).group(1)
nameLookup.write("The orginization found at this trial is " + orginization + '.\n')
nameLookup.write('\n')
else:
nameLookup.write('We could not return an orginization at this trial.')
nameLookup.write('\n')
nameLookup.write('\n')
nameLookup.write('==========================')
nameLookup.write('\n')
except KeyError:
pass
#time.sleep(1)
This still yielded the same error
=======================================================
Here is an edited code some changes based on the comments
def google_search(search_term, api_key, cse_id, **kwargs):
service = build("customsearch","v1",developerKey=api_key)#initializes an instance of the custom search service with the build module.
res = service.cse().list(q=search_term, cx=cse_id, **kwargs).execute()#executes cse().list() on service to return metadata on the search performed, the specific CSE it is calling, and any other variable that might be added when calling the function as a whole
return res.get('items', [])
a = 0
def is_empty(any_structure):
if any_structure:
return False
else:
return True
with open('list.csv', 'r') as f:
reader = csv.reader(f)
for row in reader:
a+=1
name = row[1] + ' ' + row[2] + ' at ' +row[4]
print("This trial will search for", name)
results = google_search(name, my_api_key, my_cse_id, num=1)
for result in results:
fn = r"fn':\s'(.+?)'"
pt = r"pagemap':.+'title.+?\s'(.*?)'"
role = r"role':\W+(.+?)'"
org = r"org\W+(.+?)'"
with open("cse_result.txt", "a+") as nameLookup:
if is_empty(result)==True:
nameLookup.write('We could not return any data at this trial. Please see linkedin. This is trial '+a)
nameLookup.write('\n')
if re.search(str(fn),str(result)) is not None:
name2 = re.search(str(fn),str(result)).group(1)
nameLookup.write("Trial "+str(a)+'\n')
nameLookup.write("The name being searched for in this trial is "+name+'.\n')
nameLookup.write("The name found is "+str(name2)+"\n")
nameLookup.write('\n')
else:
nameLookup.write("Trial "+str(a)+'\n')
nameLookup.write("We could not find a name on this trial."+'\n')
nameLookup.write('\n')
if re.search(str(pt),str(result)) is not None:
position_title = re.search(str(pt),str(result)).group(1)
nameLookup.write("The position found at this trial is " + position_title + '.\n')
nameLookup.write('\n')
else:
nameLookup.write('We could not find a position title at this trial.')
nameLookup.write('\n')
if re.search(str(role),str(result)) is not None:
role_title = re.search(str(role),str(result)).group(1)
nameLookup.write("The position found at this trial is " + role_title + '.\n')
nameLookup.write('\n')
else:
nameLookup.write('We could not return a position at this trial.')
nameLookup.write('\n')
if re.search(str(org),str(result)) is not None:
orginization = re.search(str(org),str(result)).group(1)
nameLookup.write("The orginization found at this trial is " + orginization + '.\n')
nameLookup.write('\n')
else:
nameLookup.write('We could not return an orginization at this trial.')
nameLookup.write('\n')
nameLookup.write('\n')
nameLookup.write('==========================')
nameLookup.write('\n')
The problem now is that it is not appending the notice that there is no data found if the dictionary object is empty.