Beginner deleting columns from CSV (no pandas) - python

I've just started coding, I'm trying to remove certain columns from a CSV for a project, we aren't supposed to use pandas. For instance, one of the fields I have to delete is called DwTm, but there's about 15 columns I have to get rid of; I only want the first few, Here's what I've gotten:
import csv
FTemp = "D:/tempfile.csv"
FOut = "D:/NewFile.csv"
with open(FTemp, 'r') as csv_file:
csv_reader = csv.reader(csv_file)
with open(FOut, 'w') as new_file:
fieldnames = ['Stn_Name', 'Lat', 'Long', 'Prov', 'Tm']
csv_writer = csv.DictWriter(new_file, fieldnames=fieldnames)
for line in csv_reader:
del line['DwTm']
csv_writer.writerow(line)
When I run this, I get the error
del line['DwTm']
TypeError: list indices must be integers or slices, not str
This is the only method I've found to almost work without using pandas. Any ideas?

The easiest way around this is to use a DictReader to read the file. Like DictWriter, which you are using to write the file, DictReader uses dictionaries for rows, so your approach of deleting keys from the old row then writing to the new file will work as you expect.
import csv
FTemp = "D:/tempfile.csv"
FOut = "D:/NewFile.csv"
with open(FTemp, 'r') as csv_file:
# Adjust the list to be have the correct order
old_fieldnames = ['Stn_Name', 'Lat', 'Long', 'Prov', 'Tm', 'DwTm']
csv_reader = csv.DictReader(csv_file, fieldnames=old_fieldnames)
with open(FOut, 'w') as new_file:
fieldnames = ['Stn_Name', 'Lat', 'Long', 'Prov', 'Tm']
csv_writer = csv.DictWriter(new_file, fieldnames=fieldnames)
for line in csv_reader:
del line['DwTm']
csv_writer.writerow(line)

Below
import csv
# We only want to read the 'department' field
# We are not interested in 'name' and 'birthday month'
# Make sure the list items are in ascending order
NON_INTERESTING_FIELDS_IDX = [2,0]
rows = []
with open('example.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
for idx in NON_INTERESTING_FIELDS_IDX:
del row[idx]
rows.append(','.join(row))
with open('example_out.csv','w') as out:
for row in rows:
out.write(row + '\n')
example.csv
name,department,birthday month
John Smith,Accounting,November
Erica Meyers,IT,March
example_out.csv
department
Accounting
IT

It's possible to simultaneously open the file to read from and the file to write to. Let's say you know the indices of the columns you want to keep, say, 0,2, and 4:
good_cols = (0,2,4)
with open(Ftemp, 'r') as fin, open(Fout, 'w') as fout:
for line in fin:
line = line.rstrip() #clean up newlines
temp = line.split(',') #make a list from the line
data = [temp[x] for x in range(len(temp)) if x in good_cols]
fout.write(','.join(data) + '\n')
The list comprehension (data) pulls only the columns you want to keep out of each row and immediately writes line-by-line to your new file, using the join method (plus tacking on an endline for each new row).
If you only know the names of the fields you want to keep/remove it's a bit more involved, you have to extract the indices from the first line of the csv file, but it's not much more difficult.

Related

Compare two CSV files and write difference in the same file as an extra column in python

Hey intelligent community,
I need a little bit of help because i think i don't see the the wood in the trees.
i have to CSV files that look like this:
Name,Number
AAC;2.2.3
AAF;2.4.4
ZCX;3.5.2
Name,Number
AAC;2.2.3
AAF;2.4.4
ZCX;3.5.5
I would like to compare both files and than write any changes like this:
Name,Number,Changes
AAC;2.2.3
AAF;2.4.4
ZCX;5.5.5;change: 3.5.2
So on every line when there is a difference in the number, i want to add this as a new column at the end of the line.
The Files are formated the same but sometimes have a new row so thats why i think i have to map the keys.
I come this far but now iam lost in my thoughts:
Python 3.10.9
import csv
Reading the first csv and set mapping
with open('test1.csv', 'r') as csvfile:
reader= csv.reader(csvfile)
rows = list(reader)
file1_dict = {row[1]: row[0] for row in rows}
Reading the second csv and set mapping
with open('test2.csv', 'r') as csvfile:
reader= csv.reader(csvfile)
rows = list(reader)
file2_dict = {row[1]: row[0] for row in rows}
comparing the keys and find the diff
for k in test1_dict:
if test1_dict[k] != test2:dict[k]
test1_dict[k] = test2_dict[k]
for row in rows:
if row[1] == k:
row.append(test2_dict[k])
#write the csv (not sure how to add the word "change:")
with open('test1.csv', 'w', newline ='') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(rows)
If i try this, i don't get a new column, it just "updates" the csv file with the same columns.
For example this code gives me the diff row but i'am not able to just add it to existing file and row.
with open('test1.csv') as fin1:
with open('test2.csv') as fin2:
read1 = csv.reader(fin1)
read2 = csv.reader(fin2)
diff_rows = (row1 for row1, row2 in zip(read1, read2) if row1 != row2)
with open('test3.csv', 'w') as fout:
writer = csv.writer(fout)
writer.writerows(diff_rows)
Does someone have any tips or help for my problem? I read many answers on here but can't figure it out.
Thanks alot.
#bigkeefer
Thanks for your answer, i tried to change it for the delimiter ; but it gives an "list index out of range error".
with open('test3.csv', 'r') as file1:
reader = csv.reader(file1, delimiter=';')
rows = list(reader)[1:]
file1_dict = {row[0]: row[1] for row in rows}
with open('test4.csv', 'r') as file2:
reader = csv.reader(file2, delimiter=';')
rows = list(reader)[1:]
file2_dict = {row[0]: row[1] for row in rows}
new_file = ["Name;Number;Changes\n"]
with open('output.csv', 'w') as nf:
for key, value in file1_dict.items():
if value != file2_dict[key]:
new_file.append(f"{key};{file2_dict[key]};change: {value}\n")
else:
new_file.append(f"{key};{value}\n")
nf.writelines(new_file)
You will need to adapt this to overwrite your first file etcetera, as you mentioned above, but I've left it like this for your testing purposes. Hopefully this will help you in some way.
I've assumed you've actually got the headers above in each file. If not, remove the slicing on the list creations, and change the new_file variable assignment to an empty list ([]).
with open('f1.csv', 'r') as file1:
reader = csv.reader(file1, delimiter=";")
rows = list(reader)[1:]
file1_dict = {row[0]: row[1] for row in rows if row}
with open('f2.csv', 'r') as file2:
reader = csv.reader(file2, delimiter=";")
rows = list(reader)[1:]
file2_dict = {row[0]: row[1] for row in rows if row}
new_file = ["Name,Number,Changes\n"]
for key, value in file1_dict.items():
if value != file2_dict[key]:
new_file.append(f"{key};{file2_dict[key]};change: {value}\n")
else:
new_file.append(f"{key};{value}\n")
with open('new.csv', 'w') as nf:
nf.writelines(new_file)

Dictwriter delete rows without enumeration or looping

Trying to read in a CSV, adding a row at the bottom, and deleting a row at the top. I have not been able to find a way to delete a row in the dictwriter object without converting to a list, deleting the row in the list, then writing it out using csv.writer.
Should be a better way than reading/writing twice.
python3.8, ubuntu
Thx.
stime = get_time_str()
new_dict = {'Time': stime, 'Queries': Querycounter.value}
Querycounter.value = 0
# list of column names
field_names = ['Time', 'Queries']
# Open CSV file in append mode
#append the new queries count at the end of the file
with open(AlpacaQueriesCSVfile, 'a') as f_object:
dictwriter_object = DictWriter(f_object, fieldnames=field_names)
dictwriter_object.writerow(new_dict)
f_object.close()
#open using csv.reader, delete the rows(s).
with open(AlpacaQueriesCSVfile, "r") as f:
reader = csv.reader(f, delimiter=",")
data = list(reader) #should be a better way of doing this by deleting rows in the dictwriter_object above....later
row_count = len(data)
if row_count > 2880:
logger.debug('Deleting row from Queries.csv ')
to_skip = row_count-2880
del data[1:to_skip] # leave first row
with open(QueriesCSVfile, 'w') as f:
write = csv.writer(f)
write.writerows(data)

CSV reading and writing; outputted CSV is blank

My program needs a function that reads data from a csv file ("all.csv") and extracts all the data pertaining to 'Virginia' (extract each row that has 'Virginia in it), then writes the extracted data to another csv file named "Virginia.csv" The program runs without error; however, when I open the "Virginia.csv" file, it is blank. My guess is that the issue is with my nested for loop, but I am not entirely sure what is causing the issue.
Here is the data within the all.csv file:
https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv
Here is my code:
import csv
input_file = 'all.csv'
output_file = 'Virginia.csv'
state = 'Virginia'
mylist = []
def extract_records_for_state (input_file, output_file, state):
with open(input_file, 'r') as infile:
contents = infile.readlines()
with open(output_file, 'w') as outfile:
writer = csv.writer(outfile)
for row in range(len(contents)):
contents[row] = contents[row].split(',') #split elements
for row in range(len(contents)):
for word in range(len(contents[row])):
if contents[row][2] == state:
writer.writerow(row)
extract_records_for_state(input_file,output_file,state)
I ran your code and it gave me an error
Traceback (most recent call last):
File "c:\Users\Dolimight\Desktop\Stack Overflow\Geraldo\main.py", line 27, in
extract_records_for_state(input_file, output_file, state)
File "c:\Users\Dolimight\Desktop\Stack Overflow\Geraldo\main.py", line 24, in extract_records_for_state
writer.writerow(row)
_csv.Error: iterable expected, not int,
I fixed the error by putting the contents of the row [contents[row]] into the writerow() function and ran it again and the data showed up in Virginia.csv. It gave me duplicates so I also removed the word for-loop.
import csv
input_file = 'all.csv'
output_file = 'Virginia.csv'
state = 'Virginia'
mylist = []
def extract_records_for_state(input_file, output_file, state):
with open(input_file, 'r') as infile:
contents = infile.readlines()
with open(output_file, 'w') as outfile:
writer = csv.writer(outfile)
for row in range(len(contents)):
contents[row] = contents[row].split(',') # split elements
print(contents)
for row in range(len(contents)):
if contents[row][2] == state:
writer.writerow(contents[row]) # this is what I changed
extract_records_for_state(input_file, output_file, state)
You have two errors. The first is that you try to write the row index at writer.writerow(row) - the row is contents[row]. The second is that you leave the newline in the final column on read but don't strip it on write. Instead you could leverage the csv module more fully. Let the reader parse the rows. And instead of reading into a list, which uses a fair amount of memory, filter and write row by row.
import csv
input_file = 'all.csv'
output_file = 'Virginia.csv'
state = 'Virginia'
mylist = []
def extract_records_for_state (input_file, output_file, state):
with open(input_file, 'r', newline='') as infile, \
open(output_file, 'w', newline="") as outfile:
reader = csv.reader(infile)
writer = csv.writer(outfile)
# add header
writer.writerow(next(reader))
# filter for state
writer.writerows(row for row in reader if row[2] == state)
extract_records_for_state(input_file,output_file,state)
Looking at your code two things jump out at me:
I see a bunch of nested statements (logic)
I see you reading a CSV as plain text, then interpreting it as CSV yourself (contents[row] = contents[row].split(',')).
I recommend two things:
break up logic into distinct chunks: all that nesting can be hard to interpret and debug; do one thing, prove that works; do another thing, prove that works; etc...
use the CSV API to its fullest: use it to both read and write your CSVs
I don't want to try and replicate/fix your code, instead I'm offering this general approach to achieve those two goals:
import csv
# Read in
all_rows = []
with open('all.csv', 'r', newline='') as f:
reader = csv.reader(f)
next(reader) # discard header (I didn't see you keep it)
for row in reader:
all_rows.append(row)
# Process
filtered_rows = []
for row in all_rows:
if row[2] == 'Virginia':
filtered_rows.append(row)
# Write out
with open('filtered.csv', 'w', newline='') as f:
writer = csv.writer(f)
writer.writerows(filtered_rows)
Once you understand both the logic and the API of those discrete steps, you can move on (advance) to composing something more complex, like the following which reads a row, decides if it should be written, and if so, writes it:
import csv
with open('filtered.csv', 'w', newline='') as f_out:
writer = csv.writer(f_out)
with open('all.csv', 'r', newline='') as f_in:
reader = csv.reader(f_in)
next(reader) # discard header
for row in reader:
if row[2] == 'Virginia':
writer.writerow(row)
Using either of those two pieces of code on this (really scaled-down) sample of all.csv:
date,county,state,fips,cases,deaths
2020-03-09,Fairfax,Virginia,51059,4,0
2020-03-09,Virginia Beach city,Virginia,51810,1,0
2020-03-09,Chelan,Washington,53007,1,1
2020-03-09,Clark,Washington,53011,1,0
gets me a filtered.csv that looks like:
2020-03-09,Fairfax,Virginia,51059,4,0
2020-03-09,Virginia Beach city,Virginia,51810,1,0
Given the size of this dataset, the second approach of write-on-demand-inside-the-read-loop is both faster (about 5x faster on my machine) and uses significantly less memory (about 40x less on my machine) because there's no intermediate storage with all_rows.
But, please take the time to run both, read them carefully, and see how each works the way it does.

How to add a header to an existing CSV file without replacing the first row?

What I want to do is actually as it is written in the title.
with open(path, "r+", newline='') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
list_of_column_names = []
num_cols = len(next(csv_reader))
for i in range(num_cols):
list_of_column_names.append(i)
fields = list_of_column_names
with open(example.csv, "r+", newline='') as writeFile:
csvwriter = csv.DictWriter(writeFile, delimiter=',', lineterminator='\n', fieldnames=fields)
writeFile.seek(0, 0)
csvwriter.writeheader()
I want to enumerate the columns which initially doesn't have any column names. But when I run the code, it replaces the data in the first row. For example:
example.csv:
a,b
c,d
e,f
what I want:
0,1
a,b
c,d
e,f
what happens after running the code:
0,1
c,d
e,f
Is there a way to prevent this from happening?
There's no magical way to insert a line into an existing text file.
The following is how I think of doing this, and your code is already getting steps 2-4. Also, I wouldn't mess with the DictWriter since you're not trying to convert a Python dict to CSV (I can see you using it for writing the header, but that's easy enough to do with the regular reader/writer):
open a new file for writing
read the first row of your CSV
interpret the column indexes as the header
write the header
write the first row
read/write the rest of the rows
move the new file back to the old file, overwrite (not shown)
Here's what that looks like in code:
import csv
with open('output.csv', 'w', newline='') as out_f:
writer = csv.writer(out_f)
with open('input.csv', newline='') as in_f:
reader = csv.reader(in_f)
# Read the first row
first_row = next(reader)
# Count the columns in first row; equivalent to your `for i in range(len(first_row)): ...`
header = [i for i, _ in enumerate(first_row)]
# Write header and first row
writer.writerow(header)
writer.writerow(first_row)
# Write rest of rows
for row in reader:
writer.writerow(row)

Merge CSVs in Python with different columns

I have hundreds of large CSV files that I would like to merge into one. However, not all CSV files contain all columns. Therefore, I need to merge files based on column name, not column position.
Just to be clear: in the merged CSV, values should be empty for a cell coming from a line which did not have the column of that cell.
I cannot use the pandas module, because it makes me run out of memory.
Is there a module that can do that, or some easy code?
The csv.DictReader and csv.DictWriter classes should work well (see Python docs). Something like this:
import csv
inputs = ["in1.csv", "in2.csv"] # etc
# First determine the field names from the top line of each input file
# Comment 1 below
fieldnames = []
for filename in inputs:
with open(filename, "r", newline="") as f_in:
reader = csv.reader(f_in)
headers = next(reader)
for h in headers:
if h not in fieldnames:
fieldnames.append(h)
# Then copy the data
with open("out.csv", "w", newline="") as f_out: # Comment 2 below
writer = csv.DictWriter(f_out, fieldnames=fieldnames)
for filename in inputs:
with open(filename, "r", newline="") as f_in:
reader = csv.DictReader(f_in) # Uses the field names in this file
for line in reader:
# Comment 3 below
writer.writerow(line)
Comments from above:
You need to specify all the possible field names in advance to DictWriter, so you need to loop through all your CSV files twice: once to find all the headers, and once to read the data. There is no better solution, because all the headers need to be known before DictWriter can write the first line. This part would be more efficient using sets instead of lists (the in operator on a list is comparatively slow), but it won't make much difference for a few hundred headers. Sets would also lose the deterministic ordering of a list - your columns would come out in a different order each time you ran the code.
The above code is for Python 3, where weird things happen in the CSV module without newline="". Remove this for Python 2.
At this point, line is a dict with the field names as keys, and the column data as values. You can specify what to do with blank or unknown values in the DictReader and DictWriter constructors.
This method should not run out of memory, because it never has the whole file loaded at once.
For those of us using 2.7, this adds an extra linefeed between records in "out.csv". To resolve this, just change the file mode from "w" to "wb".
The solution by #Aaron Lockey, which is the accepted answer has worked well for me except, there were no headers for the file. The out put had no headers and only the row data. Each column was without headings (keys). So I inserted following:
writer.writeheader()
and it worked perfectly fine for me! So now the entire code appears like this:
import csv
inputs = ["in1.csv", "in2.csv"] # etc
# First determine the field names from the top line of each input file
fieldnames = []
for filename in inputs:
with open(filename, "r", newline="") as f_in:
reader = csv.reader(f_in)
headers = next(reader)
for h in headers:
if h not in fieldnames:
fieldnames.append(h)
# Then copy the data
with open("out.csv", "w", newline="") as f_out:
writer = csv.DictWriter(f_out, fieldnames=fieldnames)
writer.writeheader() #this is the addition.
for filename in inputs:
with open(filename, "r", newline="") as f_in:
reader = csv.DictReader(f_in) # Uses the field names in this file
for line in reader:
writer.writerow(line)
You can use the pandas module to do this pretty easily. This snippet assumes all your csv files are in the current folder.
import pandas as pd
import os
all_csv = [file_name for file_name in os.listdir(os.getcwd()) if '.csv' in file_name]
li = []
for filename in all_csv:
df = pd.read_csv(filename, index_col=None, header=0, parse_dates=True, infer_datetime_format=True)
li.append(df)
frame = pd.concat(li, axis=0, ignore_index=True)
frame.to_csv('melted_csv.csv', index=False)
I've faced a situation where not only the number of columns are different, but also some column names are missing. For this kind of situation and obviously for your case, this code snippet can help you :)
The tricky part is naming the columns which have no names and adding them to dictionary. The read_csv_file function is playing the main role here.
def read_csv_file(csv_file_path):
headers = []
data = []
with open(csv_file_path, 'r') as f:
csv_reader = csv.reader(f)
rows = []
for i, row in enumerate(csv_reader):
if i == 0:
for j in range(len(row)):
if row[j].strip() == "":
col_name = f"col-{j+1}"
else:
col_name = row[j]
if col_name not in headers:
headers.append(col_name)
else:
rows.append(row)
if len(row) > len(headers):
for j in range(len(row)):
if j+1 > len(headers):
col_name = f"col-{j+1}"
if col_name not in headers:
headers.append(col_name)
for i, row in enumerate(rows):
row_data = {}
for j in range(len(headers)):
if len(row) > j:
row_data[headers[j]] = row[j]
else:
row_data[headers[j]] = ''
data.append(row_data)
return headers, data
def write_csv_file(file_path, rows):
if len(rows) > 0:
headers = list(rows[0].keys())
with open(file_path, 'w', newline='', encoding='UTF8') as f:
writer = csv.DictWriter(f, fieldnames=headers)
writer.writeheader()
writer.writerows(rows)
# The list of the csv file paths which will be merged
files_to_be_merged = [
'file-1.csv',
'file-2.csv',
'file-3.csv'
]
# Read and store all the file data in new_file_data
final_headers = []
new_file_data = []
for f1 in files_to_be_merged:
single_file_data = read_csv_file(f1)
for h in single_file_data[0]:
if h not in final_headers:
final_headers.append(h)
new_file_data += single_file_data[1]
# Add the missing keys to the dictionaries
for d in new_file_data:
for h in final_headers:
if d.get(h) is None:
d[h] = ""
# Write a new file
target_file_name = 'merged_file.csv'
write_csv_file(target_file_name, new_file_data)

Categories

Resources