Openpyxl Copy Time, returns -1 - python

I am trying to great an excel file that has is the combination of multiple excel files. However, when I copy a cell with a value 00:00, and append it to the master excel file, excel thinks the time is for the year 1899?
Here is my code:
def excel_graphs_all(day, users):
chart_wb = Workbook(write_only=True)
graph_ws = chart_wb.create_sheet(day + ' Graphs', 0)
chart_wb_filename = 'graphs_' + day + '.xlsx'
columnNum = ['A', 'H']
rowNum = 1
i = 0
for user in users:
filename = user[1] + '_' + day + '.xlsx'
iter_wb = load_workbook(filename=filename,read_only=True)
ws = iter_wb.active
chart_ws = chart_wb.create_sheet(user[1])
for row in ws.rows:
chart_ws.append([row[0].value, row[1].value])
chart = ScatterChart()
chart.title = user[1] + ' ' + day + ' Heartrate Data'
chart.x_axis.title = 'Time'
chart.y_axis.title = 'Heartrate'
chart.x_axis.scaling.min = 0
chart.x_axis.scaling.max = 1
xvalues = Reference(chart_ws, min_col=1, min_row=1, max_row= ws.max_row)
yvalues = Reference(chart_ws, min_col=2, min_row=1, max_row= ws.max_row)
series = Series(yvalues, xvalues, title='Heartrate')
chart.series.append(series)
spot = columnNum[i % 2]+str(rowNum)
graph_ws.add_chart(chart, spot)
if ((i+1)%2)== 0:
rowNum += 16
i += 1
chart_wb.save(chart_wb_filename)
return chart_wb_filename
Thanks!

What do you mean by value 00:00? Excel uses formatting and not typing for dates and times. From the specification:
When using the 1900 date system, which has a base date of 30th
December 1899, a serial date- time of 1.5 represents midday on the
31st December 1899
It sounds like you just need to check the formatting for the relevant cells.

Related

Miss calculation of change value from scrape data

Im doing a scrape project from api url, scraping value(datetime,open,close,high,low) and write to mysql database
code
startTime_source = "https://api.binance.com/api/v1/klinesstartTime=0&symbol=btcusdt"&interval=1h&limit=1000"
startTime_source_get = requests.get(startTime_source)
startTime_source_json = json.loads(startTime_source_get.text)
#timestamp now
import time;
tsnw = time.time()*1000.0
tsnw = (int(tsnw))
for y in range(startTime_source_json[0][0],tsnw,3600000000): #scrape data from start to end
data = "https://api.binance.com/api/v1/klines?startTime=" + str(y) + "&symbol="+coin[x]+"&interval=1h&limit=1000"
data_get = requests.get(data)
#convert class response to json list
data_list = json.loads(data_get.text)
#loop for convert timestamp to datetime
for z in range(0,len(data_list)):
#delete all feature except dt_ which is after id=5
del data_list[z][5:]
#assign dt_ as timestamp which is id=1
timestamp = data_list[z][0]
#convert msec to sec
timestamp = time.localtime(timestamp / 1000.0)
#convert timestamp to datetime
dt = time.strftime("%Y-%m-%d %H:%M:%S", timestamp)
#replace value timestamp as dt
data_list[z][0] = dt
#seperate each feature
dt_ = [data_list[z][0]] #datetime
open_ = [data_list[z][1]] #open price
close_ = [data_list[z][4]] #close price
high_ = [data_list[z][2]] #high price
low_ = [data_list[z][3]] #low price
#cal the value of change and write to mysql database
num_1 = float(data_list[z-1][4])
num_2 = float(data_list[z][4])
#calculator formula
cal_change = ((num_2 - num_1) / num_1)*100
#round for 2 digit decimal ex 0.00
change_ = [round(cal_change,2)]
data_to_insert = [dt_+open_+close_+high_+low_+change_]
try:
#insert value
write_data = "INSERT INTO" + " " + coin[x] + "(dt_,open_,close_,high_,low_,change_) VALUES (%s,%s,%s,%s,%s,%s)"
mycursor.executemany(write_data,data_to_insert)
#it show error for multiple times
except mysql.connector.Error as err :
if err.sqlstate == "23000":
print('error')
pass
The problem is multiple rows has wrong calculation, example below dttime 2020-03-10 18:00:00 and the change value should be 0.33 not -18.21, why is that? help please, thank you.

truncate, copy from one xlsx and paste to another with openpyxl: messes up all text boxes, charts etc

I try to automate some steps I have to do each week in python 3.7.3.
Basically, it's all about
truncating xlsx-1
copying from xlsx-2 to the truncated xlsx-1
saving xlsx-1
It all goes well, but after these steps my xlsx-1 is all messed up. All formulas are still working but all the text boxes (with, and without cell referrences in them) are gone, and all charts have a different format (e.g. new borders).
Why does this happen, and what can I do to avoid these issues?
print("Please type in the date first (format: YYMMDD) > ")
last_wk = dt.date.today().isocalendar()[1] - 1
curr_wk = last_wk + 1
prefix = f"{input()}_CW{str(last_wk)}"
prefix_final = f"{input()}_CW{str(curr_wk)}"
p_basic = 'C:\\Users\\Don_Andrej\\OneDrive - Shared Services GmbH\\Profile\\Desktop\\180816_Start\\Reports'
p_export = os.path.join(p_basic, '_process_BE_NL_BRE_exports', f"{prefix}")
p_final = os.path.join(p_basic, '_process_BE_NL_BRE_exports', f"{prefix_final}")
files = ['Offline vs Online sales week',
'Gefactureerd week',
'BE_onlineshopSales_oldOrder']
def truncate():
for file in files:
p = p_export + f"_{file}.xlsm"
wb = opx.load_workbook(filename = p)
ws = wb['rawData']
for all_row in range(1, ws.max_row + 1):
for all_col in range(1, ws.max_column + 1):
ws.cell(row = all_row, column = all_col).value = None
wb.save(p)
wb.close()
print(f"{file} has been truncated!")
def copy_paste():
for file in files:
p_from = p_export + f"_EXPORT_{file}.xlsx"
p_to = p_export + f"_{file}.xlsx"
wb_from = opx.load_workbook(filename = p_from)
ws_from = wb_from['Sheet1']
wb_to = opx.load_workbook(filename = p_to)
ws_to = wb_to['rawData']
for all_row in range(1, ws_from.max_row + 1):
for all_col in range(1, ws_from.max_column + 1):
ws_to.cell(row = all_row, column = all_col).value = ws_from.cell(row = all_row, column = all_col).value
wb_to.save(p_to)
wb_to.close()
wb_from.close()
print(f"The export were copied into {file}")
truncate()
copy_paste()

Python - parse csv data - algorithm error

I need to parse a big csv file (1Gb), which contains weather data.The file itself is here:
ftp://ftp.ncdc.noaa.gov/pub/data/ghcn/daily/by_year/2014.csv.gz
Additional info (stations code and file format):
ftp:// ftp.ncdc.noaa.gov/pub/data/ghcn/daily/ghcnd-stations.txt
ftp:// ftp.ncdc.noaa.gov/pub/data/ghcn/daily/by_year/readme.txt
I need to find there information on Kiev and Dnipropetrovsk and visualise monthly averages.
I have written an algorithm for finding data and it's averages, but it doesn't give me a data on the latest month.
import csv
import matplotlib.pyplot as plt
f = open('2014.csv', 'rb')
try:
reader = csv.reader(f)
avgK = 0
avgD = 0
date = 0
mon = 1
avergK = []
avergD = []
count_date = 1
for row in reader:
if row[2] == 'TAVG':
count_date +=1
date = (int(row[1]) % 10000)
if row[0] == 'UPM00033345':
avgK += float(row[3])/10.0
elif row[0] == 'UPM00034504':
avgD += float(row[3])/10.0
if (date//100 > mon):
print date //100, mon, date%100, avgK, avgD
avergK.append(avgK/count_date)
avergD.append(avgD/count_date)
mon += 1
avgK = 0
avgD = 0
count_date = 1
continue
finally:
f.close()
plt.subplot(2, 1, 1)
plt.plot(avergK)
plt.xlabel('Month')
plt.ylabel('Average Temperature')
plt.title('AVG in Kiev 2014')
plt.grid(True)
plt.subplot(2, 1, 2)
plt.plot(avergD)
plt.xlabel('Month')
plt.ylabel('Average Temperature')
plt.title('AVG in DNIPROPETROVSK 2014')
plt.grid(True)
plt.show()
Is it possible to solve it using pandas?
Maybe you could use pandas here, but you do not need them to solve current problem. What happens is that you store a monthly average only when you find a line with a new month. But when you reach end of file, you should alse process last month.
Your loop should be:
for row in reader:
if row[2] == 'TAVG':
count_date +=1
date = (int(row[1]) % 10000)
if row[0] == 'UPM00033345':
avgK += float(row[3])/10.0
elif row[0] == 'UPM00034504':
avgD += float(row[3])/10.0
if (date//100 > mon):
print date //100, mon, date%100, avgK, avgD
avergK.append(avgK/count_date)
avergD.append(avgD/count_date)
mon += 1
avgK = 0
avgD = 0
count_date = 1
continue
# store values for last month
avergK.append(avgK/count_date)
avergD.append(avgD/count_date)

How to interpret duplicate hourly records - Wunderground API

For a specific historical query: http://api.wunderground.com/api/MY_API_KEY/history_20131231/geolookup/q/Beijing/Beijing.json
I get periodic, duplicate data for a given hour. For example, On January 1st, 2014 at 2:00 pm there will be two rows with distinct data, such as:
"Year", "Month", "Day", "Hour", "Min", "Humidity", "Temperature_F", "Windspeed_mph", "Wind_direction_deg", "Air_Pressure_mb"
"2014","01","01","14","00","7","53","6.7","250","1014"
"2014","01","01","14","00","13","51.8","17.9","300","1014"
I don't think it's a problem with my code, but rather the api. How do I explain this? Anyway, here is my code:
'''
Takes a date and file name, returns an hourly print out of historical weather
data, including humidity, temperature, wind speed, wind direction, and air pressure
'''
def print_column_headings(headers, file_name):
with open (file_name, 'wb') as initial_file:
w = csv.writer(initial_file, quoting=csv.QUOTE_ALL)
w.writerow(headers)
initial_file.close()
def print_one_day_of_weather_data(year, month, day, max_rows, file_to_write):
# get data
url = 'http://api.wunderground.com/api/<MY_API_KEY>/history_' + year + month + day +'/geolookup/q/Beijing/Beijing.json'
f = urllib2.urlopen(url)
print url
json_string = f.read()
parsed_json = json.loads(json_string)
# paramterize data
all_obs = parsed_json['history']['observations']
# if we reach the end of the observations
for n in range(0, len(all_obs)):
# or we exceed the max rows desired
if n > max_rows:
return 0
else:
base_path = all_obs[n]
date_path = base_path['date']
year = date_path['year']
month = date_path['mon']
day = date_path['mday']
hour = date_path['hour']
min = date_path['min']
humidity = base_path['hum']
temp_f = base_path['tempi']
windspeed_mph = base_path['wspdi']
winddir_deg = base_path['wdird']
air_pressure_mb = base_path['pressurem']
# utc time
params = [year, month, day, hour, min, humidity, temp_f, windspeed_mph, winddir_deg, air_pressure_mb]
print params
with open (file_to_write, 'a') as csvfile:
w = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
w.writerow(params)
csvfile.close()
headers = ("Year", "Month", "Day", "Hour", "Min", "Humidity", "Temperature_F", "Windspeed_mph", "Wind_direction_deg", "Air_Pressure_mb")
def append_leading_zeroes(num):
return "%02d" % (num,)
def days_in_a_month(year, month_num):
return calendar.monthrange(year, month_num)[1]
def file_namer(city, month_num, year):
return "raw weather - local time " + city + "-" + calendar.month_name[month_num] + "-" + str(year) + ".csv"
def gen_entire_month(city, month, year, should_print_headers, start_at_day=1):
file_name = file_namer(city, month, year)
if should_print_headers:
print_column_headings(headers, file_name)
for day in range(start_at_day, days_in_a_month(year, month) + 1):
print_one_day_of_weather_data(str(year), append_leading_zeroes(month), append_leading_zeroes(day), 100, file_name)
# if we make too many calls in a short period of time, the API refuses are calls
time.sleep(60)
gen_entire_month("Beijing", 1, 2014, should_print_headers=True)
We get some obs from 2 NWS or World Meteorological Organization sources. Look in the field "metar" and only use readings that start with "METAR ..." because those are from the obs station. The ones that begin with anything else, like AAXX are special readings. They are also legit, but by blending the 2 sources, its confusing.

Fatal Error (INFADI) Missing Directory - Multiprocessing python arcgis script error

I have written a script that uses pool.map to process multiple netCDF files and store information in a table. Each process runs a function to process one year. Each year has it's own individual file geodatabase, table within that geodatabase, and mxd. I also set the default workspace and scratch workspace to that geodatabase. For example when the function loads the year 1979 it accesses the 1979 geodatabase, 1979 table within that geodatabase, and 1979 mxd. 1980 would access the 1980 geodatabase, 1970 table within that geodatabase, and 1980 mxd.
If I run 1 process everything works fine. If I try to run 2 or more I get Fatal Error (INFADI) Missing Directory. Right now I'm running 6 processes. 4 Crash and the other 2 keep going without a problem.
Here is the code:
# Multiprocessing netCDF data into a table
######################################
import arcpy, calendar, datetime, numpy, multiprocessing, sys, re, timeit, os
from arcpy.sa import *
#Receive day and year and return the date str in MM/DD/YYYY
def getDate(day, year):
date = datetime.datetime(year, 1, 1) + datetime.timedelta(day)
date = date.timetuple()
date = str(date[1]) + '/' + str(date[2]) + '/' + str(date[0])
return date
#Main loop
#Receive a year int and process all dates within "good" months
def doCalc(year):
yearstr = str(year)
print('Starting doCalc: ' + yearstr)
############################################
#### CHANGE THIS INPUT ####
Species = 'Mallard'
Regiondb = 'North_America' #Spaces not allowed in filename to map
Region = 'Duck Zone' #Spaces allowed in DB
regionField = 'ZONE_NAME'
############################################
defaultGDB = "D:\\GIS\projects\\LCC_WSI_Climate\\year" + yearstr + ".gdb"
#Setting environmental variables
arcpy.env.workspace = defaultGDB
arcpy.env.scratchWorkspace = defaultGDB
arcpy.env.overwriteOutput = True
#desired months
goodmonth = (1, 2, 3, 9, 10, 11, 12)
#Acquire necessary extension and exit if it can't acquire
#Spatial Extension
try:
if arcpy.CheckExtension("Spatial") == "Available":
arcpy.CheckOutExtension("Spatial")
print("Acquired Spatial license")
else:
sys.exit("No Spatial Analyst license available")
except:
sys.exit("No Spatial Analyst license available")
#Geostats Extension
try:
if arcpy.CheckExtension("GeoStats") == "Available":
arcpy.CheckOutExtension("GeoStats")
print("Acquired GeoStats license")
else:
sys.exit("No GeoStats license available")
except:
sys.exit("No GeoStats license available")
#Try and except statements currently used for debugging and that is why the exceps are not specific.
try:
#Select map document and set up layers. Using a map document because NetCDFRasters aren't
#playing nice if not "living" in a document
print('Starting :' + yearstr)
start = timeit.default_timer()
mxd = arcpy.mapping.MapDocument("D:/GIS/projects/LCC_WSI_Climate/python code/WSI_maps"+yearstr+".mxd")
df = arcpy.mapping.ListDataFrames(mxd)[0]
#Set the table to write to according to the year received
for table in arcpy.mapping.ListTableViews(mxd):
if table.name == 'T'+yearstr:
WSITable = table
#Set the Clip layer according to the Region specified above
for dflayer in arcpy.mapping.ListLayers(mxd,"", df):
if dflayer.name == Region:
WSIClip = dflayer
if dflayer.name == 'wsi_Layer':
WSILayer = dflayer
#Set directory where netCDF files reside
direct = "D:/GIS/projects/LCC_WSI_Climate/python code/wsi/"
#Set netCDF file according to year received
inputLayer = direct +'wsi.' + yearstr + '.nc'
#If it's 1979 it starts in September.
if year == 1979:
startday = 243
else:
startday = 0
#Make sure the wsi_Layer is the correct file.
arcpy.MakeNetCDFRasterLayer_md(inputLayer, "wsi", "x", "y", "wsi_Layer")
#Checks if the current year is a leap year
if calendar.isleap(year):
maxday = 366
else:
maxday = 365
#Cycle through every day within the year
for daycnt in range(startday, maxday):
day = 0
sendday = daycnt+1
date = getDate(daycnt, year)
newdate = datetime.datetime(year, 1, 1) + datetime.timedelta(daycnt)
newdate = newdate.timetuple()
month = newdate[1]
day = newdate[2]
#If the month is not desired it will skip the day and continue with the next day
if month not in goodmonth:
continue
datestr = str(month) + '/' + str(day) + '/' + str(year)
print(datestr)
#Use the Select by Dimension tool to change the netCDF layer to the current date
WSILayerRas = Raster("wsi_Layer")
arcpy.SelectByDimension_md(WSILayerRas, [["time", date]],"BY_VALUE")
#Save the file in defaultGDB. Processing didn't work without saving.
WSILayerRas.save("Temp"+yearstr)
##########################################
## Regions
##
wsikm = 0
datalist = []
#Calculate time
time = 'time ' + str(date)
#Setup the cursor to write to the output Table defined above (taken from mxd).
cursorout = arcpy.da.InsertCursor(WSITable, ("CATEGORY", "STATE", "SUBCATEGORY", "DATE","SQKM", "SPECIES"))
#Setup search cursor to go through the input dataset and clip raster to the shape of each feature.
#Copy data to the output table
with arcpy.da.SearchCursor(WSIClip,(regionField, "SHAPE#", "STATE_NAME")) as cursorin:
for row in cursorin:
AOIname = row[0]
AOIshape = row[1]
AOIextent = AOIshape.extent
AOIstate = row[2]
#dealing with odd characters and spaces
AOIname = re.sub("\s+", "", AOIname)
AOIname = AOIname.strip()
AOIname = AOIname.replace("'", "")
AOIname = AOIname.replace("/", "_")
AOIstatea = re.sub("\s+", "", AOIstate)
#print('State: ' + AOIstate + ', AOI: ' + AOIname)
savetemp = AOIstatea + '_' + AOIname + '_' + yearstr
#Process crashes running this try/except. The except doesn't catch it.
try:
deleteme = Raster(arcpy.gp.ExtractByMask_sa(WSILayerRas, AOIshape))
except:
continue
deleteme.save(savetemp)
#Add raster to an array for deletion later
datalist.append(deleteme)
#Convert the Extracted raster to a NumPy array and extract desired values
#by incrementing a counter and calculating area.
my_array = arcpy.RasterToNumPyArray(deleteme)
rows, cols = my_array.shape
countAOI = 0
wsikm = 0
#time calculation
for rowNum in xrange(rows):
for colNum in xrange(cols):
value = my_array.item(rowNum, colNum)
if value >= 7.2:
countAOI +=1
wsikm = countAOI * 1024
#write to the output Table
cursorout.insertRow((Region,AOIstate, AOIname, datestr, wsikm, Species))
#Cleanup the geodatabase
## print('Cleaning up')
arcpy.Delete_management(savetemp)
datasetList = arcpy.ListDatasets("Extract_W*", "Raster")
try:
for dataset in datasetList:
arcpy.Delete_management(dataset)
except:
continue
#attempts at fixing the error
deleteme = None
del cursorout
del cursorin
#Finish calculating time processing 1 entire year
stop = timeit.default_timer()
print stop - start
except Exception as e:
#print sys.exc_traceback.tb_lineno
return e
####
# MAIN
####
if __name__ == '__main__':
print('Starting script')
#Start timing entire process
start = timeit.default_timer()
#Year Range
#Entire dataset
#yearlist = list(range(1979, 2013))
#Sample
yearlist = list(range(1979, 1986))
#Create pool
print("Creating pool")
pool = multiprocessing.Pool(7)
#Call doCalc and pass the year list
pool.map(doCalc, yearlist)
## e = doCalc(1979)
print("Closing pool")
pool.close()
print("Joining pool")
pool.join()
#print(e[0])
stop = timeit.default_timer()
print stop - start
print("Complete")
The fix was found and posted http://forums.arcgis.com/threads/109606-Multiprocessing-script-errors-on-geoprocessing-line-of-code-INFADI-(Missing-Dir)?p=387987&posted=1#post387987
The truck is to set your os.environ("TEMP") as well as TMP uniquely within the process.
def doCalc(year):
yearstr = str(year)
import time
time.sleep(1.1)
newTempDir = r"C:\temp\gptmpenvr_" + time.strftime('%Y%m%d%H%M%S') + yearstr
os.mkdir(newTempDir)
os.environ["TEMP"] = newTempDir
os.environ["TMP"] = newTempDir
print('Starting doCalc: ' + yearstr)

Categories

Resources