Calculating row and column totals form csv files - python

I have the following CSV file about family expenses:
Family, Medical, Travel, Education
Smith, 346, 566, 45
Taylor, 56,837,848
I want to be able to calculate the row totals and column totals. For example:
Smith = 346+566+45
Taylor = 56+837+848
Medical = 346+56
Travel = 566+837
Education = 45+848
I have the following so far:
import csv
file = open('Family expenses.csv', newline='')
reader = csv.reader(file)
header = next(reader)
data = [row for row in header]
ndata = []
x = 0
for x in range(0, 3):
for i in data[x]:
i.split(',')
x += 1
ndata.append(i)
rdata = [int(s) if s.isdecimal() else s for s in ndata]

There's no need for pandas for this; using DictReader makes it easy:
import csv
file = open("Family expenses.csv", newline="")
reader = csv.DictReader(file, skipinitialspace=True)
results = {}
for row in reader:
results[row["Family"]] = 0 # initialize result for each family name
for key, value in row.items():
if key == "Family":
continue
if key not in results: # initialize result for each category
results[key] = 0
results[key] += float(value) # add value for category
results[row["Family"]] += float(value) # add value for family name
for key, result in results.items():
print(key, result)
I used skipinitialspace because there were some whitespaces in your CSV data.

#Using a list in Python. Here you go
import csv
file = open('Family expenses.csv', newline='')
reader = csv.reader(file)
header = next(reader) #read first row & skip first row (header)
header.pop(0) #removing [0,0] first row first column for column wise sum heading
num_of_cols = len(header) #counting #columns
sum_col=[0,0,0] #a list for columnwise sum
j,temp=0,0
for row in reader:
sum_row,i = 0,0
print(row[0])
for i in range(1,len(row)):
sum_row+=int(row[i])
sum_col[i-1]=int(sum_col[i-1])+int(row[i])
print(sum_row)
print(header)
print(sum_col)`

Related

How to add a value obtained from a dictionary to a new column in a CSV file?

I have a CSV file list1.csv which looks like this:-
MVA,REG
8144801,KYB765ZP
I am trying to add a column to list1.csv so that it will look like this:
MVA,REG,NO
8144801,KYB765ZP,4598
The code a=dict1.get(row[1]) evaluates to 4598, so how do I add this value to the CSV file?
import csv
dict1 = {}
dict2 = {}
list = []
list2 = []
counta = 0
with open('/Users/list2.csv') as file_handler:
reader = csv.reader(file_handler)
next(reader, None) # skip headerfor row in reader:
for row in reader:
key = row[1]
counta += 1
#print(key)
if key in dict1.keys():
list.append(key)
else:
dict1[row[1]] = counta
with open('/Users/list1.csv') as file_handler:
reader = csv.reader(file_handler)
next(reader, None) # skip headerfor row in reader:
for row in reader:
key = row[1]
#print(key)
if key in dict1.keys():
a = dict1.get(row[1])
else:
list2.append(key)

Checking a CSV for the existence of a similar value in Python

Consider the following CSV:
date,description,amount
14/02/2020,march contract,-99.00
15/02/2020,april contract,340.00
16/02/2020,march contract,150.00
17/02/2020,april contract,-100.00
What I'd like to do is:
Iterate through all of the rows
Total the amounts of lines which have the same description
Return the last line which has that newly-calculated amount
Applied to the above example, the CSV would look like this:
16/02/2020,march contract,51.00
17/02/2020,april contract,240.00
So far, I've tried nesting csv.reader()s inside of each other and I'm not getting the result I am wanting.
I'd like to achieve this without any libraries and/or modules.
Here is the code I have so far, where first_row is each row in the CSV and second_row is the iteration of looking for matching descriptions:
csv_reader = csv.reader(report_file)
for first_row in csv_reader:
description_index = 5
amount_index = 13
print(first_row)
for second_row in csv_reader:
if second_row is not first_row:
print(first_row[description_index] == second_row[description_index])
if first_row[description_index] == second_row[description_index]:
first_row[amount_index] = float(first_row[amount_index]) + float(second_row[amount_index])
This will work:
import csv
uniques = {} # dictionary to store key/value pairs
with open(report_file, newline='') as f:
reader = csv.reader(f, delimiter=',')
next(reader, None) # skip header row
for data in reader:
date = data[0]
description = data[1]
if description in uniques:
cumulative_total = uniques[description][0]
uniques[description] = [cumulative_total+float(data[2]), date]
else:
uniques[description] = [float(data[2]), date]
# print output
for desc, val in uniques.items():
print(f'{val[0]}, {desc}, {val[1]}')
I know that you've asked for a solution without pandas, but you'll save yourself a lot of time if you use it:
df = pd.read_csv(report_file)
totals = df.groupby(df['description']).sum()
print(totals)
I suggest you should use pandas, it'll be efficient.
or if you still want to go with your way then this will help.
import csv
with open('mycsv.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
value_dict = {}
line_no = 0
for row in csv_reader:
if line_no == 0:
line_no += 1
continue
cur_date = row[0]
cur_mon = row[1]
cur_val = float(row[2])
if row[1] not in value_dict.keys():
value_dict[cur_mon] = [cur_date, cur_val]
else:
old_date, old_val = value_dict[cur_mon]
value_dict[cur_mon] = [cur_date, (old_val + cur_val)]
line_no += 1
for key, val_list in value_dict.items():
print(f"{val_list[0]},{key},{val_list[1]}")
Output:
16/02/2020,march contract,51.0
17/02/2020,april contract,240.0
Mark this as answer if it helps you.
working with dictionary makes it easy to access values
import csv
from datetime import datetime
_dict = {}
with open("test.csv", "r") as f:
reader = csv.reader(f, delimiter=",")
for i, line in enumerate(reader):
if i==0:
headings = [line]
else:
if _dict.get(line[1],None) is None:
_dict[line[1]] = {
'date':line[0],
'amount':float(line[2])
}
else:
if datetime.strptime(_dict.get(line[1]).get('date'),'%d/%m/%Y') < datetime.strptime(line[0],'%d/%m/%Y'):
_dict[line[1]]['date'] = line[0]
_dict[line[1]]['amount'] = _dict[line[1]]['amount'] + float(line[2])
Here your _dict will contain unique description and values
>>> print(_dict)
{'march contract': {'date': '16/02/2020', 'amount': 51.0},
'april contract': {'date': '17/02/2020', 'amount': 240.0}}
convert to list and add headings
headings.extend([[value['date'],key,value['amount']] for key,value in _dict.items()])
>>>print(headings)
[['date', 'description', 'amount'],['16/02/2020', 'march contract', 51.0], ['17/02/2020', 'april contract', 240.0]]
save list to csv
with open("out.csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(headings)
You can also use itertools.groupby and sum() for this if you don't mind outputting in sorted form.
from datetime import datetime
from itertools import groupby
import csv
with open(report_file, 'r') as f:
reader = csv.reader(f)
lst = list(reader)[1:]
sorted_input = sorted(lst, key=lambda x : (x[1], datetime.strptime(x[0],'%d/%m/%Y'))) #sort by description and date
groups = groupby(sorted_input, key=lambda x : x[1])
for k,g in groups:
rows = list(g)
total = sum(float(row[2]) for row in rows)
print(f'{rows[-1][0]},{k},{total}') #print last date, description, total
Output:
17/02/2020,april contract,240.0
16/02/2020,march contract,51.0

Search for string in CSV Files using python and write the results

#!/usr/bin/python
import csv
import re
string_1 = ('OneTouch AT')
string_2 = ('LinkRunner AT')
string_3 = ('AirCheck')
#searched = ['OneTouch AT', 'LinkRunner AT', 'AirCheck']
print "hello Pythong! "
#def does_match(string):
# stringl = string.lower()
# return any(s in stringl for s in searched)
inFile = open('data.csv', "rb")
reader = csv.reader(inFile)
outFile = open('data2.csv', "wb")
writer = csv.writer(outFile, delimiter='\t', quotechar='"', quoting=csv.QUOTE_ALL)
for row in reader:
found = False
for col in row:
if col in [string_1, string_2, string_3] and not found:
writer.writerow(row)
found = True
#for row in reader:
# if any(does_match(col) for col in row):
# writer.writerow(row[:2]) # write only 2 first columns
inFile.close()
outFile.close()
I'm trying to figure out how to search a CSV file for 3 items. If those items exist print the row. Ideally I would like only Columns 1 and 3 to print to a new file.
Sample Data File
LinkRunner AT Video,10,20
Wireless Performance Video OneTouch AT,1,2
Wired OneTouch AT,200,300
LinkRunner AT,200,300
AirCheck,200,300
I'm trying to figure out how to search a CSV file for 3 items. If
those items exist print the row. Ideally I would like only Columns 1
and 3 to print to a new file.
Try this:
import csv
search_for = ['OneTouch AT','LinkRunner AT','AirCheck']
with open('in.csv') as inf, open('out.csv','w') as outf:
reader = csv.reader(inf)
writer = csv.writer(outf, delimiter='\t', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for row in reader:
if row[0] in search_for:
print('Found: {}'.format(row))
writer.writerow(row)
#!/usr/bin/python
import csv
import numpy as np
class search_csv(object):
def __init__(self, infile, outfile):
infile = open(infile, 'rb')
read_infile = [i for i in csv.reader(infile, delimiter='\t', quotechar='"', quoting=csv.QUOTE_MINIMAL)]
self.non_numpy_data = read_infile
self.data = np.array(read_infile, dtype=None)
self.outfile = open(outfile, 'wb')
self.writer_ = csv.writer(self.outfile, delimiter='\t', quotechar='"', quoting=csv.QUOTE_MINIMAL)
def write_to(self, matched_values):
self.writer_.writerows(matched_values)
print ' Matched Values Written '
return True
def searcher(self, items, return_cols=[0,2]): ##// items should be passed as list -> ['OneTouch AT', 'LinkRunner AT', 'AirCheck']
find_these = np.array(items, dtype=None)
matching_y = np.in1d(self.data, find_these).reshape(self.data.shape).nonzero()[0]
matching_data = self.data[matching_y][:,return_cols]
self.write_to(matching_data)
self.outfile.close()
return True
def non_numpy_search(self, items, return_cols=[0,2]):
lst = []
for i in self.non_numpy_data:
for ii in items:
if ii in i:
z = []
for idx in return_cols:
z.append(i[idx])
lst.append(z)
break
self.write_to(lst)
return True
### now use the class ###
SEARCHING_FOR = ['OneTouch AT', 'LinkRunner AT', 'AirCheck']
IN_FILE = 'in_file.csv'
OUT_FILE = 'out_file.csv'
non_numpy_search(IN_FILE, OUT_FILE).non_numpy_search(SEARCHING_FOR)
By the phrasing of your question I'm assuming you just want to complete the task at hand and don't really care how. So copy and paste this in and use your data file as the 'IN_FILE' value and the file name you want to write to as the 'OUT_FILE' value. Place the values you want to search for in the 'SEARCHING_FOR' list as you're done.
Things to note....
SEARCHING_FOR should be a list.
the values in SEARCHING_FOR are matched EXACTLY so 'A' will not match 'a'. If you want a to use a regex or something more complex let me know.
In function 'non_numpy_search' there is a 'return_cols' parameter. It defaults to the first and 3rd column.
If you don't have numpy let me know.
#!/usr/bin/python
import csv
import re
import sys
import gdata.docs.service
#string_1 = ('OneTouch AT')
#string_2 = ('LinkRunner AT')
#string_3 = ('AirCheck')
searched = ['aircheck', 'linkrunner at', 'onetouch at']
def find_group(row):
"""Return the group index of a row
0 if the row contains searched[0]
1 if the row contains searched[1]
etc
-1 if not found
"""
for col in row:
col = col.lower()
for j, s in enumerate(searched):
if s in col:
return j
return -1
def does_match(string):
stringl = string.lower()
return any(s in stringl for s in searched)
#Opens Input file for read and output file to write.
inFile = open('data.csv', "rb")
reader = csv.reader(inFile)
outFile = open('data2.csv', "wb")
writer = csv.writer(outFile, delimiter='\t', quotechar='"', quoting=csv.QUOTE_ALL)
#for row in reader:
# found = False
# for col in row:
# if col in [string_1, string_2, string_3] and not found:
# writer.writerow(row)
# found = True
"""Built a list of items to sort. If row 12 contains 'LinkRunner AT' (group 1),
one stores a triple (1, 12, row)
When the triples are sorted later, all rows in group 0 will come first, then
all rows in group 1, etc.
"""
stored = []
for i, row in enumerate(reader):
g = find_group(row)
if g >= 0:
stored.append((g, i, row))
stored.sort()
for g, i, row in stored:
writer.writerow(tuple(row[k] for k in (0,2))) # output col 1 & 5
#for row in reader:
# if any(does_match(col) for col in row):
# writer.writerow(row[:2]) # write only 2 first columns
# Closing Input and Output files.
inFile.close()
outFile.close()

Python: Replace one cell in a CSV file

I have a CSV file that has a single cell that I want to edit.
I can write a pretty simple function that, for instance, can look up an ID field in the file and return the row of the ID in question:
id = 3 #column number of the ID field
csvfile = open(os.path.join(LOCAL_FOLDER, "csvfile.csv"), "rU")
csvFile= csv.reader(csvfile, delimiter=",")
def lookup(ID):
rowNo = 1
for row in csvFile:
if row[id] == ID:
return rowNo
else:
rowNo += 1
return 0
What I want to do is to write a corresponding replace function that will take in an ID, a column variable and a data variable:
def replace(ID, col, data):
row = lookup(ID)
#use a CSV writer to replace the item at row, col with data
I have no idea how to do this, all of the examples I can find for how to use the writer only show you how to completely rewrite an entire .CSV file, which is not what I'm looking to do; I want an equivalent of a PUT rather than a POST.
fwiw, per inspectorG4dget's suggestion, I rewrote my code as follows:
LOCAL_FOLDER = os.getcwd()
CSV_FILE = "csvfile.csv"
def lookup(ID):
csvfile = open(os.path.join(LOCAL_FOLDER, CSV_FILE), "rU")
csvFile= csv.reader(csvfile, delimiter=",")
rowNo = 1
for row in csvFile:
if row[id] == ID:
csvfile.close()
return rowNo
else:
rowNo += 1
csvfile.close()
return 0
def replace(ID, col, data):
index = 1
row = lookup(ID)
if row == 0:
return 0
csvwritefile = open(os.path.join(LOCAL_FOLDER, "temp.csv"), "w")
csvWriteFile = csv.writer(csvwritefile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) #obviously change this if you want a diff quoting format
csvreadfile = open(os.path.join(LOCAL_FOLDER, CSV_FILE), "rU")
csvReadFile= csv.reader(csvreadfile, delimiter=",")
for readrow in csvReadFile:
if index == row:
temp = readrow
temp[col] = data
csvWriteFile.writerow(temp)
index += 1
else:
index += 1
csvWriteFile.writerow(readrow)
csvwritefile.close()
csvreadfile.close()
os.rename(os.path.join(LOCAL_FOLDER, "temp.csv"), os.path.join(LOCAL_FOLDER, CSV_FILE))
return 1

Python Duplicate Removal

I have a question about removing duplicates in Python. I've read a bunch of posts but have not yet been able to solve it. I have the following csv file:
EDIT
Input:
ID, Source, 1.A, 1.B, 1.C, 1.D
1, ESPN, 5,7,,,M
1, NY Times,,10,12,W
1, ESPN, 10,,Q,,M
Output should be:
ID, Source, 1.A, 1.B, 1.C, 1.D, duplicate_flag
1, ESPN, 5,7,,,M, duplicate
1, NY Times,,10,12,W, duplicate
1, ESPN, 10,,Q,,M, duplicate
1, NY Times, 5 (or 10 doesn't matter which one),7, 10, 12, W, not_duplicate
In words, if the ID is the same, take values from the row with source "NY Times", if the row with "NY Times" has a blank value and the duplicate row from the "ESPN" source has a value for that cell, take the value from the row with the "ESPN" source. For outputting, flag the original two lines as duplicates and create a third line.
To clarify a bit further, since I need to run this script on many different csv files with different column headers, I can't do something like:
def main():
with open(input_csv, "rb") as infile:
input_fields = ("ID", "Source", "1.A", "1.B", "1.C", "1.D")
reader = csv.DictReader(infile, fieldnames = input_fields)
with open(output_csv, "wb") as outfile:
output_fields = ("ID", "Source", "1.A", "1.B", "1.C", "1.D", "d_flag")
writer = csv.DictWriter(outfile, fieldnames = output_fields)
writer.writerow(dict((h,h) for h in output_fields))
next(reader)
first_row = next(reader)
for next_row in reader:
#stuff
Because I want the program to run on the first two columns independently of whatever other columns are in the table. In other words, "ID" and "Source" will be in every input file, but the rest of the columns will change depending on the file.
Would greatly appreciate any help you can provide! FYI, "Source" can only be: NY Times, ESPN, or Wall Street Journal and the order of priority for duplicates is: take NY Times if available, otherwise take ESPN, otherwise take Wall Street Journal. This holds for every input file.
The below code reads all of the records into a big dictionary whose keys are their identifiers and whose values are dictionaries mapping source names to entire data rows. Then it iterates through the dictionary and gives you the output you asked for.
import csv
header = None
idfld = None
sourcefld = None
record_table = {}
with open('input.csv', 'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
row = [x.strip() for x in row]
if header is None:
header = row
for i, fld in enumerate(header):
if fld == 'ID':
idfld = i
elif fld == 'Source':
sourcefld = i
continue
key = row[idfld]
sourcename = row[sourcefld]
if key not in record_table:
record_table[key] = {sourcename: row, "all_rows": [row]}
else:
if sourcename in record_table[key]:
cur_row = record_table[key][sourcename]
for i, fld in enumerate(row):
if cur_row[i] == '':
record_table[key][sourcename][i] = fld
else:
record_table[key][sourcename] = row
record_table[key]["all_rows"].append(row)
print ', '.join(header) + ', duplicate_flag'
for recordid in record_table:
rowdict = record_table[recordid]
final_row = [''] * len(header)
rowcount = len(rowdict)
for sourcetype in ['NY Times', 'ESPN', 'Wall Street Journal']:
if sourcetype in rowdict:
row = rowdict[sourcetype]
for i, fld in enumerate(row):
if final_row[i] != '':
continue
if fld != '':
final_row[i] = fld
if rowcount > 1:
for row in rowdict["all_rows"]:
print ', '.join(row) + ', duplicate'
print ', '.join(final_row) + ', not_duplicate'

Categories

Resources