Web Scraping TimeOutException: Message: - python

I am trying to scrape an ecommerce website. I would like to scrape the product description of every product from the search results. I successfully scrape all the product links from the search results and get the product description of one product. However, when I try to loop the product links to get the product description from all of the products that I get from the search results, the TimeOutException: Message is coming up.
I already try to change the time of the WebDriverWait and it doesn't fix the error.
Any idea what should I do?
Here is my code:
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options # to customize chrome display
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from time import sleep
from collections import Counter
import json
from turtle import delay
import time
# create object for chrome options
chrome_options = Options()
# Customize chrome display
chrome_options.add_argument('start-maximized')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--headless')
chrome_options.add_argument('disable-notifications')
chrome_options.add_argument("window-size=1365,4597.190")
chrome_options.add_argument('--disable-infobars')
# create webdriver object
path = '/Applications/chromedriver'
webdriver_service = Service(path)
driver = webdriver.Chrome(executable_path=path, options=chrome_options)
baseurl = 'https://shopee.co.id/search?keyword=obat%20kanker'
product_links = []
for page in range(0,6):
search_link = 'https://shopee.co.id/search?keyword=obat%20kanker&page={}'.format(page)
driver.get(search_link)
WebDriverWait(driver, 20).until(EC.presence_of_all_elements_located((By.CLASS_NAME, "shopee-search-item-result__item")))
driver.execute_script("""
var scroll = document.body.scrollHeight / 10;
var i = 0;
function scrollit(i) {
window.scrollBy({top: scroll, left: 0, behavior: 'smooth'});
i++;
if (i < 10) {
setTimeout(scrollit, 500, i);
}
}
scrollit(i);
""")
sleep(5)
html = driver.execute_script("return document.getElementsByTagName('html')[0].innerHTML")
soup = BeautifulSoup(html, "html.parser")
product_list = soup.find_all('div',class_='col-xs-2-4 shopee-search-item-result__item' )
for item in product_list:
for link in item.find_all('a', href=True):
product_links.append(baseurl + link['href'])
for link in product_links:
driver.get(link)
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.CLASS_NAME, "_2VZg1J")))
driver.execute_script("""
var scroll = document.body.scrollHeight / 10;
var i = 0;
function scrollit(i) {
window.scrollBy({top: scroll, left: 0, behavior: 'smooth'});
i++;
if (i < 10) {
setTimeout(scrollit, 500, i);
}
}
scrollit(i);
""")
sleep(20)
html = driver.execute_script("return document.getElementsByTagName('html')[0].innerHTML")
soup = BeautifulSoup(html, "html.parser")
name = soup.find('div', class_='_2rQP1z').text.replace('Star+','')
price = soup.find('div', class_='_2Shl1j').text.replace('Rp','')
sold = soup.find('div', class_ = 'HmRxgn').text.strip()
rate = soup.find('div', class_ = '_3y5XOB _14izon').text.strip()
city = soup.find('span', class_ = '_2fJrvA').text.strip()
specification = soup.find('div', class_ = '_2jz573').text.strip()
herbcancer = {
'name': name,
'price': price,
'sold': sold,
'rate': rate,
'city': city,
'specification': specification
}
print(herbcancer)

Base url is incorrect that why they show you TimeOutException:
https://shopee.co.id/search?keyword=obat%20kanker
The correct base url is:
https://shopee.co.id
Complete Code is :
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options # to customize chrome display
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from time import sleep
from collections import Counter
import json
from turtle import delay
import time
# create object for chrome options
chrome_options = Options()
# Customize chrome display
chrome_options.add_argument('start-maximized')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--headless')
chrome_options.add_argument('disable-notifications')
chrome_options.add_argument("window-size=1365,4597.190")
chrome_options.add_argument('--disable-infobars')
# create webdriver object
path = ''
webdriver_service = Service(path)
driver = webdriver.Chrome(executable_path=path, options=chrome_options)
baseurl = 'https://shopee.co.id'
product_links = []
for page in range(0,6):
search_link = 'https://shopee.co.id/search?keyword=obat%20kanker&page={}'.format(page)
driver.get(search_link)
WebDriverWait(driver, 20).until(EC.presence_of_all_elements_located((By.CLASS_NAME, "shopee-search-item-result__item")))
driver.execute_script("""
var scroll = document.body.scrollHeight / 10;
var i = 0;
function scrollit(i) {
window.scrollBy({top: scroll, left: 0, behavior: 'smooth'});
i++;
if (i < 10) {
setTimeout(scrollit, 500, i);
}
}
scrollit(i);
""")
sleep(5)
html = driver.execute_script("return document.getElementsByTagName('html')[0].innerHTML")
soup = BeautifulSoup(html, "html.parser")
product_list = soup.find_all('div',class_='col-xs-2-4 shopee-search-item-result__item' )
for item in product_list:
for link in item.find_all('a', href=True):
comp=baseurl + link['href']
product_links.append(comp)
for link in product_links:
driver.get(link)
WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.CLASS_NAME, "_2VZg1J")))
driver.execute_script("""
var scroll = document.body.scrollHeight / 10;
var i = 0;
function scrollit(i) {
window.scrollBy({top: scroll, left: 0, behavior: 'smooth'});
i++;
if (i < 10) {
setTimeout(scrollit, 500, i);
}
}
scrollit(i);
""")
sleep(3)
html = driver.execute_script("return document.getElementsByTagName('html')[0].innerHTML")
soup = BeautifulSoup(html, "html.parser")
name = soup.find('div', class_='_2rQP1z').text.replace('Star+','')
price = soup.find('div', class_='_2Shl1j').text.replace('Rp','')
sold = soup.find('div', class_ = 'HmRxgn').text.strip()
rate = soup.find('div', class_ = '_3y5XOB _14izon').text.strip()
try:
city = soup.find('span', class_ = '_2fJrvA').text.strip()
except:
city=''
try:
specification = soup.find('div', class_ = '_2jz573').text.strip()
except:
specification=''
herbcancer = {
'name': name,
'price': price,
'sold': sold,
'rate': rate,
'city': city,
'specification': specification
}
print(herbcancer)

Related

I am trying to scrape bestbuy.com and I am able to scrape just one page rather than multiple pages

from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.chrome.service import Service as ChromeService
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
import configparser
from datetime import datetime, timedelta, date
import time
import json
import requests
parser = configparser.RawConfigParser()
parser.read('config.ini')
page=parser['PROPERTIES']['PAGE']
url= parser['PROPERTIES']['URL']
OMIT_KEYWORDS= parser['FILTERS']['OMIT'].split(',')
INCLUDE_KEYWORDS=parser['FILTERS']['INCLUDE'].split(',')
END_DATE = datetime.strptime(parser['DATE']['END'], '%Y-%m-%d')
START_DATE=datetime.strptime(parser['DATE']['START'],'%Y-%m-%d')
minimum_comment_length = int(parser['PROPERTIES']['MIN_COMMENT_LENGTH'])
maximum_comment_length = int(parser['PROPERTIES']['MAX_COMMENT_LENGTH'])
# Setting up driver options
options = webdriver.ChromeOptions()
# Setting up Path to chromedriver executable file
CHROMEDRIVER_PATH =r'C:\Users\HP\Desktop\INTERNSHIP\Target\chromedriver.exe'
# Adding options
options.add_experimental_option("excludeSwitches", ["enable-automation"])
options.add_experimental_option("useAutomationExtension", False)
# Setting up chrome service
service = ChromeService(executable_path=CHROMEDRIVER_PATH)
# Establishing Chrom web driver using set services and options
driver = webdriver.Chrome(service=service, options=options)
wait = WebDriverWait(driver, 20)
driver.implicitly_wait(10)
time.sleep(2)
driver.execute_script("window.scrollTo(0,document.body.scrollHeight)")
item_list=[]
driver.get(url)
reviews = wait.until(EC.visibility_of_all_elements_located((By.CSS_SELECTOR, ".review-item")))
time.sleep(2)
for review in reviews:
this_review_date_string= review.find_element_by_xpath(".//time[contains(#class,'submission-date')]")
this_review_date_string_ago= this_review_date_string.text
date_today= date.today()
if "month" in this_review_date_string_ago:
date_period_string = this_review_date_string_ago.split("month")[0]
date_period_int = int(date_period_string)*30
temp_review_date = date_today - timedelta(days=date_period_int)
elif "day" in this_review_date_string_ago:
date_period_string=this_review_date_string_ago.split("day")[0]
date_period_int = int(date_period_string)
temp_review_date = date_today - timedelta(days=date_period_int)
elif "hour" in this_review_date_string_ago:
date_period_string=this_review_date_string_ago.split("hour")[0]
date_period_int = int(date_period_string)
temp_review_date = date_today - timedelta(hours=date_period_int)
elif "year" in this_review_date_string_ago:
date_period_string=this_review_date_string_ago.split("year")[0]
date_period_int = int(date_period_string)*365
temp_review_date = date_today - timedelta(days=date_period_int)
this_review_datetime = temp_review_date.strftime('%d %B %Y')
current_date= datetime.strptime( this_review_datetime, '%d %B %Y')
if (START_DATE< current_date < END_DATE):
item={
'stars': review.find_element_by_xpath(".//p[contains(#class,'visually-hidden')]").text.replace("out of 5 stars","").replace("Rated",""),
'username': review.find_element_by_xpath(".//div[contains(#class,'ugc-author v-fw-medium body-copy-lg')]").text,
'userurl':"NA",
'title':review.find_element_by_xpath(".//h4[contains(#class,'c-section-title review-title heading-5 v-fw-medium')]").text,
'review_text':review.find_element_by_xpath(".//div[contains(#class,'ugc-review-body')]//p[contains(#class,'pre-white-space')]").text,
'permalink': "NA",
'reviewlocation': "NA",
'reviewdate': this_review_datetime,
'subproductname': "NA",
'subproductlink': "NA",
}
item_list.append(item)
print(item_list)
with open("output.json","r+") as outfile:
json.dump(item_list,outfile)
I want to scrape reviews from all pages but for now I am getting just one page reviews. The link I am using for scraping is https://www.bestbuy.com/site/reviews/bella-pro-series-12-6-qt-digital-air-fryer-oven-stainless-steel/6412331?variant=A&skuId=6412331&page=1. I want to paginate. Please tell me how to run the loop so that I can scrape all pages.
page=2
while True:
try:
#your code
driver.find_element(By.XPATH,f"//a[text()='{page}']").click()
page+=1
except:
break
Should be a simple way to click the a tags with the new page number.
<a aria-label="Page 1" class="" data-track="Page 1" href="/site/reviews/bella-pro-series-12-6-qt-digital-air-fryer-oven-stainless-steel/6412331?variant=A&skuId=6412331&page=8&page=1&pageSize=20&sku=6412331&sort=BEST_REVIEW&variant=A">1</a>
i don't write python code that much so its not going to be super clear. you just need to go to the page with dynamic page number (start with range to 200-300), and whenever you see no review elements found on the page, assume thats end of the comments, then move to next product.
...
item_list=[]
def getPage():
for i in range(arbitary_number):
time.sleep(2)
driver.execute_script(
"window.scrollTo(0,document.body.scrollHeight)")
url = https://www.bestbuy.com/site/reviews/bella-pro-series-12-6-qt-digital-air-fryer-oven-stainless-steel/6412331?variant=A&skuId=6412331&page={i}
driver.get(url)
reviews = wait.until(EC.visibility_of_all_elements_located((By.CSS_SELECTOR, ".review-item")))
time.sleep(2)

Showing null or nothing by fetching data from website with Selenium Python

There is I want to fetch activeness of website and uptime data but only I can get is active but no uptime data. I do target the exact class of the website but show null. Can you please modify my code, where I am doing something wrong. please help!
//Python code:
from bs4 import BeautifulSoup
import time
from selenium import webdriver
import soupsieve
from webdriver_manager.chrome import ChromeDriverManager
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.maximize_window()
time.sleep(5)
cosmos = "https://www.mintscan.io/cosmos/validators/cosmosvaloper1we6knm8qartmmh2r0qfpsz6pq0s7emv3e0meuw"
driver.get(cosmos)
time.sleep(8)
soup = BeautifulSoup(driver.page_source, 'lxml')
driver.close()
uptime = soup.find('li', {'class': "InfoRow_container__2xTzg"})
uptime_dep = uptime.find('div', {'class': "InfoRow_value__1CHna"}).string
print(uptime_dep)
acitve = soup.find('div', {'class': "ValidatorInfo_statusBadge__PBIGr"})
para = acitve.find('p').string
print(para)
Personally, I'd skip selenium and get the data from the api.
import requests
status_dict = {
2:'Inactive',
3:'Active'}
url = 'https://api.cosmostation.io/v1/staking/validator/cosmosvaloper1we6knm8qartmmh2r0qfpsz6pq0s7emv3e0meuw'
jsonData = requests.get(url).json()
uptime = 1 - (jsonData['uptime']['missed_blocks'] / jsonData['uptime']['over_blocks'])
status = jsonData['status']
print(uptime)
print(status_dict[status])
Ouput:
0.91
Active
Here's the json response:
print(jsonData)
{'rank': 38, 'account_address': 'cosmos1we6knm8qartmmh2r0qfpsz6pq0s7emv3um0vsa', 'operator_address': 'cosmosvaloper1we6knm8qartmmh2r0qfpsz6pq0s7emv3e0meuw', 'consensus_pubkey': 'cosmosvalconspub16adydsk7nw3d63qtn30t5rexhfg56pq44sw4l9ld0tcj6jvnx30sm7h6lm', 'bonded_height': 0, 'bonded_time': '0001-01-01T00:00:00Z', 'jailed': False, 'status': 3, 'tokens': '1034297369481', 'delegator_shares': '1034297369481.000000000000000000', 'moniker': 'Staked', 'identity': 'E7BFA6515FB02B3B', 'website': 'https://staked.us/', 'details': 'Staked operates highly available and highly secure, institutional grade staking infrastructure for leading proof-of-stake (PoS) protocols.', 'unbonding_height': '0', 'unbonding_time': '1970-01-01T00:00:00Z', 'rate': '0.100000000000000000', 'max_rate': '0.200000000000000000', 'max_change_rate': '0.020000000000000000', 'update_time': '2019-03-13T23:00:00Z', 'uptime': {'address': '7B3A2EFE5B3FCDF819FCF52607314CEFE4754BB6', 'missed_blocks': 9, 'over_blocks': 100}, 'min_self_delegation': '1', 'keybase_url': ''}
Selenium-based solution:
You can use the below XPath:
//div[text()='Uptime']//following::div
to retrieve Uptime from the HTMLDOM.
Code:
wait = WebDriverWait(driver, 30)
cosmos = "https://www.mintscan.io/cosmos/validators/cosmosvaloper1we6knm8qartmmh2r0qfpsz6pq0s7emv3e0meuw"
driver.get(cosmos)
time.sleep(8)
soup = BeautifulSoup(driver.page_source, 'lxml')
#driver.close()
# uptime = soup.find('li', {'class': "InfoRow_container__2xTzg"})
# uptime_dep = uptime.find('div', {'class': "InfoRow_value__1CHna"}).string
# print(uptime_dep)
ele = wait.until(EC.visibility_of_element_located((By.XPATH, "//div[text()='Uptime']//following::div")))
print(ele.text)
acitve = soup.find('div', {'class': "ValidatorInfo_statusBadge__PBIGr"})
para = acitve.find('p').string
print(para)
Imports:
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
Output:
97%
Active

Crawl table data from 2 drop down menu

I have this website: https://www.adbc.gov.ae/BusinessActivityInfo/BusinessActivity.aspx?culture=en-US
This website has 2 dropdown menu: the Category and SubCategory. After choosing the Category and SubCategory, it will display a table, different Category and SubCategory will display a different table. How can I crawl this table for each Category and SubCategory.
This is what I try so far:
url = 'https://www.adbc.gov.ae/BusinessActivityInfo/BusinessActivity.aspx?culture=en-US'
req = requests.get(url)
soup = BeautifulSoup(req.text, "lxml")
content = soup.find("select",{"name":"ddlNatureId"})
options = content.find_all("option")
options1 = [y.text for y in options]
options1
The output:
['',
'ADVOCATE OFFICES',
'AGENCIES',
'AGRICULTURE',
'AGRICULTURE, LIVESTOCK AND FISHERIES ACTIVITIES',
'ANIMAL HUSBANDRY',
'ANIMAL SHELTERING SERVICES',
'ART GALLERY',
'AUDITING OFFICES',
'BAKERIES AND SWEETS',
...
]
Update:
This is what I got so far. I found that using Selenium to select the value of the dropdown list. This is my code:
Some libraries:
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.expected_conditions import presence_of_element_located
from selenium.webdriver.support.ui import Select
import time
import sys
from bs4 import BeautifulSoup
import requests
Set up webdriver:
url = 'https://www.adbc.gov.ae/BusinessActivityInfo/BusinessActivity.aspx?culture=en-US'
chrome_driver_path = 'D:\\work\\crawl data\\selenium_project\\chromedriver.exe'
chrome_options = Options()
chrome_options.add_argument('--headless')
webdriver = webdriver.Chrome(
executable_path=chrome_driver_path, options=chrome_options
)
Load the website and scrape data code:
with webdriver as driver:
# Set timeout time
wait = WebDriverWait(driver, 10)
# retrive url in headless browser
driver.get(url)
# find select box
search = Select(driver.find_element_by_id("ddlNatureId"))
search.select_by_value('ADVOCATE OFFICES')
req = requests.get(url)
soup = BeautifulSoup(req.text, "lxml")
price=soup.find("select",{"name":"ddlSubCategId"})
options = price.find_all("option")
options1 = [y.text for y in options]
driver.close()
print(options1)
Output:
[]
Expected output (It should be the list of SubCategory which Category is 'ADVOCATE OFFICES'):
['',
'Advertising Agent',
'Advocate Offices',
'Agricultural Equipment And Tools Rental',
'Air Transport',
'Agents',
...
]
My problem right now is I cannot get the data of SubCategory when I select the Category. How can I solve this problem?

print only one 'tr' tag in 'tbody' - Beautifulsoup

I am trying to print the content of only one 'tr' tag in 'tbody'.
I used this code to print all 'tr's in 'tbody' but Python doesn't print me the 'tr's after Berlin. I used this url : https://interaktiv.morgenpost.de/corona-virus-karte-infektionen-deutschland-weltweit/?fbclid=IwAR0xb7zTV0vstu-sLE3ByHZVSw89HyqjSwMhpfXT23RwcFqR57za2J_l7XQ.
This is the table I want to print completely: https://i.stack.imgur.com/i869g.png
from bs4 import BeautifulSoup
from selenium import webdriver
browser = webdriver.Chrome()
url = "https://interaktiv.morgenpost.de/corona-virus-karte-infektionen-deutschland-weltweit/?fbclid=IwAR0xb7zTV0vstu-sLE3ByHZVSw89HyqjSwMhpfXT23RwcFqR57za2J_l7XQ"
browser.get(url)
soup = BeautifulSoup(browser.page_source, "html.parser")
allStat = {}
table_body = soup.find('tbody')
table_rows = table_body.find_all('tr')
for i in table_rows:
region = i.find('td', class_ = 'region').get_text()
confirmed = i.find('td', class_ = 'confirmed').get_text()
deaths = i.find('td', class_= 'deaths' ).get_text()
allStat.update({region: [confirmed,deaths]})
print(allStat)
from selenium import webdriver
import pandas as pd
from selenium.webdriver.firefox.options import Options
options = Options()
options.add_argument('--headless')
driver = webdriver.Firefox(options=options)
driver.get(
"https://interaktiv.morgenpost.de/corona-virus-karte-infektionen-deutschland-weltweit/")
btn = driver.find_element_by_css_selector(
"button.btn.fnktable__expand").click()
df = pd.read_html(driver.page_source)[0]
df.to_csv("data.csv", index=False)
driver.quit()

How to find the nth child heading and print the text using beautifulsoup in python

According to my code, I am able to get the First heading of Project and I want the subheading to be printed (FSI Details). Not able to get the second heading using beautifulsoup.I tried the reference for the nth-child
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
import urllib.request
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.keys import Keys
import time
import pandas as pd
import os
url = 'https://maharerait.mahaonline.gov.in'
chrome_path = r'C:/Users/User/AppData/Local/Programs/Python/Python36/Scripts/chromedriver.exe'
driver = webdriver.Chrome(executable_path=chrome_path)
driver.get(url)
WebDriverWait(driver,
20).until(EC.element_to_be_clickable((By.XPATH,"//div[#class='search-
pro-details']//a[contains(.,'Search Project Details')]"))).click()
Registered_Project_radio= WebDriverWait(driver,
10).until(EC.element_to_be_clickable((By.ID,"Promoter")))
driver.execute_script("arguments[0].click();",Registered_Project_radio)
Application = driver.find_element_by_id("CertiNo")
Application.send_keys("P50500000005")
Search = WebDriverWait(driver,
10).until(EC.element_to_be_clickable((By.ID,"btnSearch")))
driver.execute_script("arguments[0].click();",Search)
View = [item.get_attribute('href') for item in
driver.find_elements_by_tag_name("a") if
item.get_attribute('href') is not None]
View = View[0]
driver.get(View)
request = urllib.request.Request(View)
html = urllib.request.urlopen(request).read()
soup = BeautifulSoup(html, 'html.parser')
divPInfo2 = soup.find("div", {"id": "DivProject"})
Project_title = divPInfo2.find("div", {'class': 'x_panel'},
recursive=False).find("div", {'class': 'x_title'}).find(
"h2").text.strip()
print(Project_title)
Project_title1 = divPInfo2.find("div", {'class': 'x_panel'},
recursive=False).find("div", {'class': 'x_title'}).find_all(
"h2")[1].text.strip()
print(Project_title1 ) # (FSI Detail) heading should be printed here
You can try CSS selector :contains("FSI Details"), which selects element containing string "FSI Details". This code prints labels and values of the "FSI Details" section:
import requests
from bs4 import BeautifulSoup
url = 'https://maharerait.mahaonline.gov.in/PrintPreview/PrintPreview?q=BPUvrrjIzYs%2f2hwYj1YIOfflh9NisZW6zTns2KLjHBZn6cbQ008s91nzlFrDxVvLwR1vAeLID0%2bo%2bD0H0Z6o2t%2b5P%2b%2fbBOcHCbMQHU8gkwdNZJnbbfu6N7mWSpgKXt4AiQyzuEpoDE7FX6HZypqsGXz4ObYD4KpyRzCsFJaWTgA%3d'
soup = BeautifulSoup(requests.get(url).text, 'lxml')
fsi_content = soup.select_one('.x_title:contains("FSI Details") + .x_content')
print('{: <160}{: <8}'.format('Label', 'Value'))
print('-' * 168)
for label, text in zip(fsi_content.select('label'), fsi_content.select('div:has(> label) + div')):
print('{: <160}{: <8}'.format(label.get_text(strip=True), text.get_text(strip=True)))
Prints:
Label Value
------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Built-up-Area as per Proposed FSI (In sqmts) ( Proposed but not sanctioned) ( As soon as approved, should be immediately updated in Approved FSI) 0
Built-up-Area as per Approved FSI (In sqmts) 11566.50
TotalFSI 11566.50
Further reading:
CSS Selectors Refernece

Categories

Resources