Check two excel files for common products with Python Pandas and pick the product with the lowest price - python

I have two excel files from two different wholesalers with products and stock quantity information.
Some of the products in the two files are common, so they exist in both files.
The number of products in the files is different e.g. the first has 65000 products and the second has 9000 products.
I need to iterate through the products of the first file based on the common column 'EAN CODE' and check if the current product exists also in the EAN column of the 2nd file.
Afterwards check which product has the lower price (which has stock > 0) and print the matching row of this product to another output excel file.

import os
import re
from datetime import datetime
import pandas
from utils import recognize_excel_type
dataframes = []
input_directory = 'in'
for file in os.listdir(input_directory):
file_path = os.path.join(input_directory, file)
if file.lower().endswith('xlsx') or file.lower().endswith('xls'):
dataframes.append(
pandas.read_excel(file_path)
)
elif file.lower().endswith('csv'):
dataframes.append(
pandas.read_csv(file_path, delimiter=';')
)
combined_dataframe = pandas.DataFrame(columns=['Price', 'Stock', 'EAN Code'])
for dataframe in dataframes:
this_type = recognize_excel_type(dataframe)
if this_type == 'DIFOX':
dataframe.rename(columns={
'retail price': 'Price',
'availability (steps)': 'Stock',
'EAN number 1': 'EAN Code',
}, inplace=True)
tuned_dataframe = pandas.DataFrame(
dataframe[combined_dataframe.columns],
)
combined_dataframe = combined_dataframe.append(tuned_dataframe, ignore_index=True)
elif this_type == 'ECOM_VGA':
headers = dataframe.iloc[2]
dataframe = dataframe[3:]
dataframe.columns = headers
dataframe.rename(columns={
'Price (€)': 'Price',
'Stock': 'Stock',
'EAN Code': 'EAN Code',
}, inplace=True)
tuned_dataframe = pandas.DataFrame(
dataframe[combined_dataframe.columns],
)
combined_dataframe = combined_dataframe.append(tuned_dataframe, ignore_index=True)
elif this_type == 'MAXCOM':
dataframe.rename(columns={
'VK-Preis': 'Price',
'Verfügbar': 'Stock',
'EAN-Code': 'EAN Code',
}, inplace=True)
tuned_dataframe = pandas.DataFrame(
dataframe[combined_dataframe.columns],
)
combined_dataframe = combined_dataframe.append(tuned_dataframe, ignore_index=True)
combined_dataframe.dropna(inplace=True)
combined_dataframe['Stock'].replace('> ?', '', inplace=True, regex=True)
combined_dataframe['Price'].replace('> ?', '', inplace=True, regex=True)
combined_dataframe = combined_dataframe.astype(
{'Stock': 'int32', 'Price': 'float32'}
)
combined_dataframe = combined_dataframe[combined_dataframe['Stock'] > 0]
combined_dataframe = combined_dataframe.loc[combined_dataframe.groupby('EAN Code')['Price'].idxmin()]
combined_dataframe.to_excel('output_backup/output-{}.xlsx'.format(datetime.now().strftime('%Y-%m-%d')), index=False)
if os.path.exists('output/output.xlsx'):
os.remove("output/output.xlsx")
combined_dataframe.to_excel('output/output.xlsx'.format(datetime.now().strftime('%Y-%m-%d')), index=False)
print('Output saved to output directory')
for file in os.listdir(input_directory):
file_path = os.path.join(input_directory, file)
os.remove(file_path)
print('All input files removed')

Related

Python using loop to update the cells in excel

The dataframe is created with the Join_Date and Name
data = {'Join_Date': ['2023-01', '2023-01', '2023-02', '2023-03'],
'Name': ['Tom', 'Amy', 'Peter', 'Nick']}
df = pd.DataFrame(data)
I have split the df by Join_Date, can it be printed into excel date by date by using for loop?
df_split = [df[df['Join_Date'] == i] for i in df['Join_Date'].unique()]
Expected result:
You can use the ExcelWriter method in pandas:
import pandas as pd
import xlsxwriter
data = {'Join_Date': ['2023-01', '2023-01', '2023-02', '2023-03'],
'Name': ['Tom', 'Amy', 'Peter', 'Nick']}
df = pd.DataFrame(data)
df_split = [df[df['Join_Date'] == i] for i in df['Join_Date'].unique()]
writer = pd.ExcelWriter("example.xlsx", engine='xlsxwriter')
skip_rows = 0
for df in df_split:
df.to_excel(writer, sheet_name='Sheet1', startcol=2, startrow=2+skip_rows, index=False)
skip_rows += df.shape[0]+2
writer.close()
You can use the pandas methods to do so, like this. (You can add a empty line if you really need it)
import pandas as pd
data = {'Join_Date': ['2023-01', '2023-01', '2023-02', '2023-03'],
'Name': ['Tom', 'Amy', 'Peter', 'Nick']}
df = pd.DataFrame(data)
def add_header(x):
x.loc[-1] = 'Join_date', 'Name'
return x.sort_index().reset_index(drop=True)
df_split = df.groupby(['Join_Date'], group_keys=False)
df_group = df_split.apply(add_header)
df_group.to_excel('output.xlsx', index=False, header=False)
You can add the empty line editing the add_header func like:
def add_header(x):
x.loc[-1] = ' ', ' '
x = x.sort_index().reset_index(drop=True)
x.loc[0.5] = 'Join_date', 'Name'
x = x.sort_index().reset_index(drop=True)
return x

Removing None from the list generated using excel file, opened using openpyxl

I have used OpenPyXL to read excel file. I want a list of names extracted from excel file in 4th column and all rows. But problem is that after certain rows the values are not assigned.So it is storeing it as 'None' in list.But i don't want None in my list , just the names.
#Loading excel file
file_path = '/Volumes/DATA/Project/NLP/Web-scraping/Client_Dataset.xlsx'
data = openpyxl.load_workbook(filename=file_path)
# ws = data.get_sheet_by_name('Data - Sheet1 - Data - Sheet1')
data_excel = data.active
#print(data)
list_ = []
for cell in data_excel['D']:
if 'Company' == cell.value:
continue
list_.append(cell.value)
print(list_)
Output of above code
['Addepar', 'ADDI', 'Agicap', 'Airbase', 'Airwallex', 'Alan', 'Albert', 'Alchemy', 'Alloy', 'AlphaSense', 'Alto IRA', 'Amber Group', 'Amount', 'Anchorage Digital', 'Arturo', 'At-Bay', 'Atom Finance', 'Autobooks', 'AvidXchange', 'Balance', 'Belvo', 'Bestow', 'Betterment', 'BharatPe', 'Bitcoin Suisse', 'Bitpanda', 'Bitso', 'Bitwise Asset Management', 'Blockchain.com', 'Blockdaemon', 'BlockFi', 'BlueVine', 'Bolt', 'Borrowell', 'Bought By Many\nManyPets (new name)', 'Brex', 'Brightflag', 'C2FO', 'Cambridge Mobile Telematics', 'Capchase', 'Capital Float',None, None, None, None, None, None, None, None, None, None, None, None]
Expected outcome that i need is.
['Addepar', 'ADDI', 'Agicap', 'Airbase', 'Airwallex', 'Alan', 'Albert', 'Alchemy', 'Alloy', 'AlphaSense', 'Alto IRA', 'Amber Group', 'Amount', 'Anchorage Digital', 'Arturo', 'At-Bay', 'Atom Finance', 'Autobooks', 'AvidXchange', 'Balance', 'Belvo', 'Bestow', 'Betterment', 'BharatPe', 'Bitcoin Suisse', 'Bitpanda', 'Bitso', 'Bitwise Asset Management', 'Blockchain.com', 'Blockdaemon', 'BlockFi', 'BlueVine', 'Bolt', 'Borrowell', 'Bought By Many\nManyPets (new name)', 'Brex', 'Brightflag', 'C2FO', 'Cambridge Mobile Telematics', 'Capchase', 'Capital Float']
Really simple check if cell.value is not None and it will only append the values that are not equal to None:
#Loading excel file
file_path = '/Volumes/DATA/Project/NLP/Web-scraping/Client_Dataset.xlsx'
data = openpyxl.load_workbook(filename=file_path)
# ws = data.get_sheet_by_name('Data - Sheet1 - Data - Sheet1')
data_excel = data.active
#print(data)
list_ = []
for cell in data_excel['D']:
if 'Company' == cell.value:
continue
if cell.value is not None: ## <- Et voila
list_.append(cell.value)
print(list_)

How to write to excel sheet only those rows which match the condition using Python pandas

I have a data frame which contains 3 columns(Issue id, Creator, Versions).I need to extract the row which does not contain the value "<JIRA Version" in the "versions" column(Which is the third and fifth row in my case.Similarly there could be multiple rows in the data frame)
Below is the code i'm trying, but this is actually printing all the rows from the data frame. Any help/suggestions are appreciated.
allissues = []
for i in issues:
d = {
'Issue id': i.id,
'creator' : i.fields.creator,
'resolution': i.fields.resolution,
'status.name': i.fields.status.name,
'versions': i.fields.versions,
}
allissues.append(d)
df = pd.DataFrame(allissues, columns=['Issue id', 'creator', 'versions'])
matchers = ['<JIRA Version']
for ind in df.values:
if matchers not in df.values:
print(df['versions'][ind], df['Issue id'][ind])
some minor changes in your code:
allissues = []
for i in issues:
d = {
'Issue id': i.id,
'creator' : i.fields.creator,
'resolution': i.fields.resolution,
'status.name': i.fields.status.name,
'versions': i.fields.versions,
}
allissues.append(d)
df = pd.DataFrame(allissues, columns=['Issue id', 'creator', 'versions'])
matchers = '<JIRA Version'
for ind,row in df.iterrows():
if matchers not in row.versions:
print(row['versions'], row['Issue id'])

Python Exec not passing full variables to exec shell - with working errors

Python "Exec" command is not passing local values in exec shell. I thought this should be a simple question but all seem stumped. Here is a repeatable working version of the problem ... it took me a bit to recreate a working problem (my files are much larger than examples shown here, there are up to 10-dfs per loop, often 1800 items per df )
EXEC was only passing "PRODUCT" (as opposed to "PRODUCT.AREA" before I added "["{ind_id}"]" and then also it also shows an error "<string> in <module>".
datum_0 = {'Products': ['Stocks', 'Bonds', 'Notes'],'PRODUCT.AREA': ['10200', '50291','50988']}
df_0 = pd.DataFrame (datum_0, columns = ['Products','PRODUCT.AREA'])
datum_1 = {'Products': ['Stocks', 'Bonds', 'Notes'],'PRODUCT.CODE': ['66', '55','22']}
df_1 = pd.DataFrame (datum_1, columns = ['Products','PRODUCT.CODE'])
df_0
summary = {'Prodinfo': ['PRODUCT.AREA', 'PRODUCT.CODE']}
df_list= pd.DataFrame (summary, columns = ['Prodinfo'])
df_list
# Create a rankings column for the Prodinfo tables
for rows in df_list.itertuples():
row = rows.Index
ind_id = df_list.loc[row]['Prodinfo']
print(row, ind_id)
exec(f'df_{row}["rank"] = df_{row}["{ind_id}"].rank(ascending=True) ')
Of course its this last line that is throwing exec errors. Any ideas? Have you got a working global or local variable assignment that fixes it? etc... thanks!
I would use list to keep all DataFrames
all_df = [] # list
all_df.append(df_1)
all_df.append(df_2)
and then I would no need exec
for rows in df_list.itertuples():
row = rows.Index
ind_id = df_list.loc[row]['Prodinfo']
print(row, ind_id)
all_df[row]["rank"] = all_df[row][ind_id].rank(ascending=True)
Eventually I would use dictionary
all_df = {} # dict
all_df['PRODUCT.AREA'] = df_1
all_df['PRODUCT.CODE'] = df_2
and then I don't need exec and df_list
for key, df in all_df.items():
df["rank"] = df[key].rank(ascending=True)
Minimal working code with list
import pandas as pd
all_df = [] # list
datum = {
'Products': ['Stocks', 'Bonds', 'Notes'],
'PRODUCT.AREA': ['10200', '50291', '50988']
}
all_df.append( pd.DataFrame(datum) )
datum = {
'Products': ['Stocks', 'Bonds', 'Notes'],
'PRODUCT.CODE': ['66', '55', '22']
}
all_df.append( pd.DataFrame(datum) )
#print( all_df[0] )
#print( all_df[1] )
print('--- before ---')
for df in all_df:
print(df)
summary = {'Prodinfo': ['PRODUCT.AREA', 'PRODUCT.CODE']}
df_list = pd.DataFrame(summary, columns=['Prodinfo'])
#print(df_list)
for rows in df_list.itertuples():
row = rows.Index
ind_id = df_list.loc[row]['Prodinfo']
#print(row, ind_id)
all_df[row]["rank"] = all_df[row][ind_id].rank(ascending=True)
print('--- after ---')
for df in all_df:
print(df)
Minimal working code with dict
import pandas as pd
all_df = {} # dict
datum = {
'Products': ['Stocks', 'Bonds', 'Notes'],
'PRODUCT.AREA': ['10200', '50291', '50988']
}
all_df['PRODUCT.AREA'] = pd.DataFrame(datum)
datum = {
'Products': ['Stocks', 'Bonds', 'Notes'],
'PRODUCT.CODE': ['66', '55', '22']
}
all_df['PRODUCT.CODE'] = pd.DataFrame (datum)
print('--- before ---')
for df in all_df.values():
print(df)
for key, df in all_df.items():
df["rank"] = df[key].rank(ascending=True)
print('--- after ---')
for df in all_df.values():
print(df)
Frankly, for two dataframes I wouldn't waste time for df_list and for-loop
import pandas as pd
datum = {
'Products': ['Stocks', 'Bonds', 'Notes'],
'PRODUCT.AREA': ['10200', '50291', '50988']
}
df_0 = pd.DataFrame(datum)
datum = {
'Products': ['Stocks', 'Bonds', 'Notes'],
'PRODUCT.CODE': ['66', '55', '22']
}
df_1 = pd.DataFrame(datum)
print('--- before ---')
print( df_0 )
print( df_1 )
df_0["rank"] = df_0['PRODUCT.AREA'].rank(ascending=True)
df_1["rank"] = df_1['PRODUCT.CODE'].rank(ascending=True)
print('--- after ---')
print( df_0 )
print( df_1 )
And probably I would even put all in one dataframe
import pandas as pd
df = pd.DataFrame({
'Products': ['Stocks', 'Bonds', 'Notes'],
'PRODUCT.AREA': ['10200', '50291', '50988'],
'PRODUCT.CODE': ['66', '55', '22'],
})
print('--- before ---')
print( df )
#df["rank PRODUCT.AREA"] = df['PRODUCT.AREA'].rank(ascending=True)
#df["rank PRODUCT.CODE"] = df['PRODUCT.CODE'].rank(ascending=True)
for name in ['PRODUCT.AREA', 'PRODUCT.CODE']:
df[f"rank {name}"] = df[name].rank(ascending=True)
print('--- after ---')
print( df )
Result:
--- before ---
Products PRODUCT.AREA PRODUCT.CODE
0 Stocks 10200 66
1 Bonds 50291 55
2 Notes 50988 22
--- after ---
Products PRODUCT.AREA PRODUCT.CODE rank PRODUCT.AREA rank PRODUCT.CODE
0 Stocks 10200 66 1.0 3.0
1 Bonds 50291 55 2.0 2.0
2 Notes 50988 22 3.0 1.0
As expected, this was an easy fix. Thanks to answerers who gave much to think about ...
Kudos to #holdenweb and his answer at ... Create multiple dataframes in loop
dfnew = {} # CREATE A DICTIONARY!!! - THIS WAS THE TRICK I WAS MISSING
df_ = {}
for rows in df_list.itertuples():
row = rows.Index
ind_id = df_list.loc[row]['Prodinfo']
dfnew[row] = df_[row] # or pd.read_csv(csv_file) or database_query or ...
dfnew[row].dropna(inplace=True)
dfnew[row]["rank"] = dfnew[row][ind_id].rank(ascending=True)
Works well and very simple...

No output file created

Very simply, this code should be outputting a summary file, which it isn't, in a specified output directory. I can't figure out why
I have tried editing the configuration as well as changing directories.
import os
import pandas as pd
def summarise(indir, outfile):
os.chdir(indir)
filelist = ""
dflist = []
colnames = ["DSP Code", "Report Date", "Initial Date", "End Date", "Transaction Type", "Sale Type",
"Distribution Channel", "Products Origin ID", "Product ID", "Artist", "Title", "Units Sold",
"Retail Price", "Dealer Price", "Additional Revenue", "Warner Share", "Entity to be billed",
"E retailer name", "E retailer Country", "End Consumer Country", "Price Code", "Currency Code"]
for filename in filelist:
print(filename)
df = pd.read_csv('SYB_M_20171001_20171031.txt', header=None, encoding='utf-8', sep='\t',
names=colnames, skiprows=1, usecols=['Units Sold', 'Dealer Price', 'End Consumer Country',
'Currency Code'])
# Multiplying units by dealer price will give you sum of file
df['Sum of Revenue'] = df['Units Sold'] * df['Dealer Price']
# Get those first two columns
d = {'Sum of Revenue': 'Total Revenue', 'Units Sold': 'Total Units'}
for col, newcol in d.items():
df.loc[df.index[0], newcol] = df[col].sum()
# Add the rest for every country:
s = df.groupby('End Consumer Country')['Units Sold'].sum().to_frame().T.add_suffix(' Total')
s.index = [df.index[0]]
df = pd.concat([df, s], 1, sort=False)
df.to_csv(outfile + r"\output.csv", index=None)
dflist.append(filename)
summarise(r"O:\James Upson\Sound Track Your Brand Testing\SYB Test",
r"O:\James Upson\Sound Track Your Brand Testing\SYB Test Formatted")
I am expecting an output file called 'output.csv'
Hmm, ok, I see filelist = "" and then for filename in filelist:
Your trying to loop over an empty list

Categories

Resources