Combinging Multiple Json Objects as one DataFrame in Python Pandas - python

I'm not sure what I'm missing here but I have 2 zip files that contain json files and I'm just trying to combine the data I extract from the files and combine as one dataframe but my loop keeps giving me separate records. Here is what I have prior to constructing DF. I tried pd.concat but I think my issue is more to do with the way I'm reading the files in the first place.
data = []
for FileZips in glob.glob('*.zip'):
with zipfile.ZipFile(FileZips, 'r') as myzip:
for logfile in myzip.namelist():
with myzip.open(logfile) as f:
contents = f.readlines()[-2]
jfile = json.loads(contents)
print len(jfile)
returns:
40935
40935

You can use read_json (assuming it's valid json).
I would also break this up into more functions for readability:
def zip_to_df(zip_file):
with zipfile.ZipFile(zip_file, 'r') as myzip:
return pd.concat((log_as_df(loglife, myzip)
for logfile in myzip.namelist()),
ignore_index=True)
def log_as_df(logfile, myzip):
with myzip.open(logfile, 'r') as f:
contents = f.readlines()[-2]
return pd.read_json(contents)
df = pd.concat(map(zip_to_df, glob.glob('*.zip')), ignore_index=True)
Note: This does more concats, but I think it's worth it for readability, you could do just one concat...

I was able to get what I need with a small adjustment to my indent!!
dfs = []
for FileZips in glob.glob('*.zip'):
with zipfile.ZipFile(FileZips, 'r') as myzip:
for logfile in myzip.namelist():
with myzip.open(logfile, 'r') as f:
contents = f.readlines()[-2]
jfile = json.loads(contents)
dfs.append(pd.DataFrame(jfile))
df = pd.concat(dfs, ignore_index=True)
print len(df)

Related

How to write the filename and rowcount in a csv in python?

I've been trying to make a CSV from a big list of another CSVs and here's the deal: I want to get the names of these CSV files and put them in the CSV that I want to create, plus, I also need the row count from the CSV files that I'm getting the names of, here's what I've tried so far:
def getRegisters(file):
results = pd.read_csv(file, header = None, error_bad_lines= False, sep = '\t', low_memory = False)
print(len(results))
return len(results)
path = "C:/Users/gdldieca/Desktop/TESTSFORPANW/New folder"
dirs = os.listdir(path)
with open("C:/Users/gdldieca/Desktop/TESTSFORPANW/New folder/FilesNames.csv", 'w', newline='') as f:
writer = csv.writer(f, delimiter = '\t')
writer.writerow(("File", "Rows"))
for names in dirs:
sfile = getRegisters("C:/Users/gdldieca/Desktop/TESTSFORPANW/New folder/" + str(names))
writer.writerow((names, sfile))
However I can't seem to get the files row count even tho Pandas actually returns it. I'm getting this error:
_csv.Error: iterable expected, not int
The final result would be something like this written into the CSV
File1 90
File2 10
If you are using pandas , I think you can use also for make a csv file with all values that you need..Here an alternative
import os
import pandas as pd
directory='D:\\MY\\PATH\\ALLCSVFILE\\'
#create a list for add all
rows_list = []
for filename in os.listdir(directory):
if filename.endswith(".csv"):
file=os.path.join(directory, filename)
df=pd.read_csv(file)
#Count rows
rowcount=len(df.index)
new_row = {'namefile':filename, 'count':rowcount}
rows_list.append(new_row)
#pass list to dataframe
df1 = pd.DataFrame(rows_list)
print(df1)
df1.to_csv('test.csv', sep=',')
result :

Combine two rows into one in a csv file with Python

I am trying to combine multiple rows in a csv file together. I could easily do it in Excel but I want to do this for hundreds of files so I need it to be as a code. I have tried to store rows in arrays but it doesn't seem to work. I am using Python to do it.
So lets say I have a csv file;
1,2,3
4,5,6
7,8,9
All I want to do is to have a csv file as this;
1,2,3,4,5,6,7,8,9
The code I have tried is this;
fin = open("C:\\1.csv", 'r+')
fout = open("C:\\2.csv",'w')
for line in fin.xreadlines():
new = line.replace(',', ' ', 1)
fout.write (new)
fin.close()
fout.close()
Could you please help?
You should be using the csv module for this as splitting CSV manually on commas is very error-prone (single columns can contain strings with commas, but you would incorrectly end up splitting this into multiple columns). The CSV module uses lists of values to represent single rows.
import csv
def return_contents(file_name):
with open(file_name) as infile:
reader = csv.reader(infile)
return list(reader)
data1 = return_contents('csv1.csv')
data2 = return_contents('csv2.csv')
print(data1)
print(data2)
combined = []
for row in data1:
combined.extend(row)
for row in data2:
combined.extend(row)
with open('csv_out.csv', 'w', newline='') as outfile:
writer = csv.writer(outfile)
writer.writerow(combined)
That code gives you the basis of the approach but it would be ugly to extend this for hundreds of files. Instead, you probably want os.listdir to pull all the files in a single directory, one by one, and add them to your output. This is the reason that I packed the reading code into the return_contents function; we can repeat the same process millions of times on different files with only one set of code to do the actual reading. Something like this:
import csv
import os
def return_contents(file_name):
with open(file_name) as infile:
reader = csv.reader(infile)
return list(reader)
all_files = os.listdir('my_csvs')
combined_output = []
for file in all_files:
data = return_contents('my_csvs/{}'.format(file))
for row in data:
combined_output.extend(row)
with open('csv_out.csv', 'w', newline='') as outfile:
writer = csv.writer(outfile)
writer.writerow(combined_output)
If you are specially dealing with csv file format. I recommend you to use csv package for the file operations. If you also use with...as statement, you don't need to worry about closing the file etc. You just need to define the PATH then program will iterate all .csv files
Here is what you can do:
PATH = "your folder path"
def order_list():
data_list = []
for filename in os.listdir(PATH):
if filename.endswith(".csv"):
with open("data.csv") as csvfile:
read_csv = csv.reader(csvfile, delimiter=',', quoting=csv.QUOTE_NONNUMERIC)
for row in read_csv:
data_list.extend(row)
print(data_list)
if __name__ == '__main__':
order_list()
Store your data in pandas df
import pandas as pd
df = pd.read_csv('file.csv')
Store the modified dataframe into new one
df_2 = df.groupby('Column_Name').agg(lambda x: ' '.join(x)).reset_index() ## Write Name of your column
Write the df to new csv
df2.to_csv("file_modified.csv")
You could do it also like this:
fIn = open("test.csv", "r")
fOut = open("output.csv", "w")
fOut.write(",".join([line for line in fIn]).replace("\n",""))
fIn.close()
fOut.close()
I've you want now to run it on multiple file you can run it as script with arguments:
import sys
fIn = open(sys.argv[1], "r")
fOut = open(sys.argv[2], "w")
fOut.write(",".join([line for line in fIn]).replace("\n",""))
fIn.close()
fOut.close()
So now expect you use some Linux System and the script is called csvOnliner.py you could call it with:
for i in *.csv; do python csvOnliner.py $i changed_$i; done
With windows you could do it in a way like this:
FOR %i IN (*.csv) DO csvOnliner.py %i changed_%i

concatenate all files into a directory with python [duplicate]

Guys, I here have 200 separate csv files named from SH (1) to SH (200). I want to merge them into a single csv file. How can I do it?
As ghostdog74 said, but this time with headers:
with open("out.csv", "ab") as fout:
# first file:
with open("sh1.csv", "rb") as f:
fout.writelines(f)
# now the rest:
for num in range(2, 201):
with open("sh"+str(num)+".csv", "rb") as f:
next(f) # skip the header, portably
fout.writelines(f)
Why can't you just sed 1d sh*.csv > merged.csv?
Sometimes you don't even have to use python!
Use accepted StackOverflow answer to create a list of csv files that you want to append and then run this code:
import pandas as pd
combined_csv = pd.concat( [ pd.read_csv(f) for f in filenames ] )
And if you want to export it to a single csv file, use this:
combined_csv.to_csv( "combined_csv.csv", index=False )
fout=open("out.csv","a")
for num in range(1,201):
for line in open("sh"+str(num)+".csv"):
fout.write(line)
fout.close()
I'm just going to throw another code example into the basket:
from glob import glob
with open('singleDataFile.csv', 'a') as singleFile:
for csvFile in glob('*.csv'):
for line in open(csvFile, 'r'):
singleFile.write(line)
It depends what you mean by "merging" -- do they have the same columns? Do they have headers? For example, if they all have the same columns, and no headers, simple concatenation is sufficient (open the destination file for writing, loop over the sources opening each for reading, use shutil.copyfileobj from the open-for-reading source into the open-for-writing destination, close the source, keep looping -- use the with statement to do the closing on your behalf). If they have the same columns, but also headers, you'll need a readline on each source file except the first, after you open it for reading before you copy it into the destination, to skip the headers line.
If the CSV files don't all have the same columns then you need to define in what sense you're "merging" them (like a SQL JOIN? or "horizontally" if they all have the same number of lines? etc, etc) -- it's hard for us to guess what you mean in that case.
Quite easy to combine all files in a directory and merge them
import glob
import csv
# Open result file
with open('output.txt','wb') as fout:
wout = csv.writer(fout,delimiter=',')
interesting_files = glob.glob("*.csv")
h = True
for filename in interesting_files:
print 'Processing',filename
# Open and process file
with open(filename,'rb') as fin:
if h:
h = False
else:
fin.next()#skip header
for line in csv.reader(fin,delimiter=','):
wout.writerow(line)
A slight change to the code above as it does not actually work correctly.
It should be as follows...
from glob import glob
with open('main.csv', 'a') as singleFile:
for csv in glob('*.csv'):
if csv == 'main.csv':
pass
else:
for line in open(csv, 'r'):
singleFile.write(line)
If you are working on linux/mac you can do this.
from subprocess import call
script="cat *.csv>merge.csv"
call(script,shell=True)
If the merged CSV is going to be used in Python then just use glob to get a list of the files to pass to fileinput.input() via the files argument, then use the csv module to read it all in one go.
OR, you could just do
cat sh*.csv > merged.csv
You can simply use the in-built csv library. This solution will work even if some of your CSV files have slightly different column names or headers, unlike the other top-voted answers.
import csv
import glob
filenames = [i for i in glob.glob("SH*.csv")]
header_keys = []
merged_rows = []
for filename in filenames:
with open(filename) as f:
reader = csv.DictReader(f)
merged_rows.extend(list(reader))
header_keys.extend([key for key in reader.fieldnames if key not in header_keys])
with open("combined.csv", "w") as f:
w = csv.DictWriter(f, fieldnames=header_keys)
w.writeheader()
w.writerows(merged_rows)
The merged file will contain all possible columns (header_keys) that can be found in the files. Any absent columns in a file would be rendered as blank / empty (but preserving rest of the file's data).
Note:
This won't work if your CSV files have no headers. In that case you can still use the csv library, but instead of using DictReader & DictWriter, you'll have to work with the basic reader & writer.
This may run into issues when you are dealing with massive data since the entirety of the content is being store in memory (merged_rows list).
Over the solution that made #Adders and later on improved by #varun, I implemented some little improvement too leave the whole merged CSV with only the main header:
from glob import glob
filename = 'main.csv'
with open(filename, 'a') as singleFile:
first_csv = True
for csv in glob('*.csv'):
if csv == filename:
pass
else:
header = True
for line in open(csv, 'r'):
if first_csv and header:
singleFile.write(line)
first_csv = False
header = False
elif header:
header = False
else:
singleFile.write(line)
singleFile.close()
Best regards!!!
You could import csv then loop through all the CSV files reading them into a list. Then write the list back out to disk.
import csv
rows = []
for f in (file1, file2, ...):
reader = csv.reader(open("f", "rb"))
for row in reader:
rows.append(row)
writer = csv.writer(open("some.csv", "wb"))
writer.writerows("\n".join(rows))
The above is not very robust as it has no error handling nor does it close any open files.
This should work whether or not the the individual files have one or more rows of CSV data in them. Also I did not run this code, but it should give you an idea of what to do.
I modified what #wisty said to be worked with python 3.x, for those of you that have encoding problem, also I use os module to avoid of hard coding
import os
def merge_all():
dir = os.chdir('C:\python\data\\')
fout = open("merged_files.csv", "ab")
# first file:
for line in open("file_1.csv",'rb'):
fout.write(line)
# now the rest:
list = os.listdir(dir)
number_files = len(list)
for num in range(2, number_files):
f = open("file_" + str(num) + ".csv", 'rb')
f.__next__() # skip the header
for line in f:
fout.write(line)
f.close() # not really needed
fout.close()
Here is a script:
Concatenating csv files named SH1.csv to SH200.csv
Keeping the headers
import glob
import re
# Looking for filenames like 'SH1.csv' ... 'SH200.csv'
pattern = re.compile("^SH([1-9]|[1-9][0-9]|1[0-9][0-9]|200).csv$")
file_parts = [name for name in glob.glob('*.csv') if pattern.match(name)]
with open("file_merged.csv","wb") as file_merged:
for (i, name) in enumerate(file_parts):
with open(name, "rb") as file_part:
if i != 0:
next(file_part) # skip headers if not first file
file_merged.write(file_part.read())
Updating wisty's answer for python3
fout=open("out.csv","a")
# first file:
for line in open("sh1.csv"):
fout.write(line)
# now the rest:
for num in range(2,201):
f = open("sh"+str(num)+".csv")
next(f) # skip the header
for line in f:
fout.write(line)
f.close() # not really needed
fout.close()
Let's say you have 2 csv files like these:
csv1.csv:
id,name
1,Armin
2,Sven
csv2.csv:
id,place,year
1,Reykjavik,2017
2,Amsterdam,2018
3,Berlin,2019
and you want the result to be like this csv3.csv:
id,name,place,year
1,Armin,Reykjavik,2017
2,Sven,Amsterdam,2018
3,,Berlin,2019
Then you can use the following snippet to do that:
import csv
import pandas as pd
# the file names
f1 = "csv1.csv"
f2 = "csv2.csv"
out_f = "csv3.csv"
# read the files
df1 = pd.read_csv(f1)
df2 = pd.read_csv(f2)
# get the keys
keys1 = list(df1)
keys2 = list(df2)
# merge both files
for idx, row in df2.iterrows():
data = df1[df1['id'] == row['id']]
# if row with such id does not exist, add the whole row
if data.empty:
next_idx = len(df1)
for key in keys2:
df1.at[next_idx, key] = df2.at[idx, key]
# if row with such id exists, add only the missing keys with their values
else:
i = int(data.index[0])
for key in keys2:
if key not in keys1:
df1.at[i, key] = df2.at[idx, key]
# save the merged files
df1.to_csv(out_f, index=False, encoding='utf-8', quotechar="", quoting=csv.QUOTE_NONE)
With the help of a loop you can achieve the same result for multiple files as it is in your case (200 csv files).
If the files aren't numbered in order, take the hassle-free approach below:
Python 3.6 on windows machine:
import pandas as pd
from glob import glob
interesting_files = glob("C:/temp/*.csv") # it grabs all the csv files from the directory you mention here
df_list = []
for filename in sorted(interesting_files):
df_list.append(pd.read_csv(filename))
full_df = pd.concat(df_list)
# save the final file in same/different directory:
full_df.to_csv("C:/temp/merged_pandas.csv", index=False)
An easy-to-use function:
def csv_merge(destination_path, *source_paths):
'''
Merges all csv files on source_paths to destination_path.
:param destination_path: Path of a single csv file, doesn't need to exist
:param source_paths: Paths of csv files to be merged into, needs to exist
:return: None
'''
with open(destination_path,"a") as dest_file:
with open(source_paths[0]) as src_file:
for src_line in src_file.read():
dest_file.write(src_line)
source_paths.pop(0)
for i in range(len(source_paths)):
with open(source_paths[i]) as src_file:
src_file.next()
for src_line in src_file:
dest_file.write(src_line)
import pandas as pd
import os
df = pd.read_csv("e:\\data science\\kaggle assign\\monthly sales\\Pandas-Data-Science-Tasks-master\\SalesAnalysis\\Sales_Data\\Sales_April_2019.csv")
files = [file for file in os.listdir("e:\\data science\\kaggle assign\\monthly sales\\Pandas-Data-Science-Tasks-master\\SalesAnalysis\\Sales_Data")
for file in files:
print(file)
all_data = pd.DataFrame()
for file in files:
df=pd.read_csv("e:\\data science\\kaggle assign\\monthly sales\\Pandas-Data-Science-Tasks-master\\SalesAnalysis\\Sales_Data\\"+file)
all_data = pd.concat([all_data,df])
all_data.head()
I have done it by implementing a function that expect output file and paths of the input files.
The function copy the file content of the first file into the output file and then does the same for the rest of input files but without the header line.
def concat_files_with_header(output_file, *paths):
for i, path in enumerate(paths):
with open(path) as input_file:
if i > 0:
next(input_file) # Skip header
output_file.writelines(input_file)
Usage example of the function:
if __name__ == "__main__":
paths = [f"sh{i}.csv" for i in range(1, 201)]
with open("output.csv", "w") as output_file:
concat_files_with_header(output_file, *paths)

More Efficient way to parse JSON and convert to CSV in Python

I'm not sure if this will be enough information to provide but I am currently trying to extract and format into csv a subset of data from a very large file containing many JSON objects as lines and dumping it into one csv file. I have the below implementation. Speed isn't too bad but I was wondering if there is a more efficient way to do this. I feel like the pandas part where I create dataframes can be a little nicer:
for files in zip_files:
with zipfile.ZipFile(files, 'r') as myzip:
for logfile in myzip.namelist():
list1 = []
list2 = []
f = myzip.open(logfile)
contents = f.readlines()
for line in contents[:]:
try:
parsed = json.loads(line[:-2])
if "key1" in parsed.keys():
if "val1" in parsed['key1']['key2']:
if "val2" in parsed['key3']:
list1.append(parsed['key1'])
list2.append(parsed['key3'])
except ValueError as e:
pass
else:
pass
df1 = pd.DataFrame(list1)
df2 = pd.DataFrame(list2)
df3 = df2.join(df1)
df3['col1'] = df3['col1'].apply(lambda x: ','.join([str(i) for i in x]))
df3 = df3.drop_duplicates()
with open(csvout, 'a') as f2:
df.to_csv(f2, header=None, index=False)
f2.close()
f.close()
I did the following:
Annotated the original version with STRANGE (= I am not sure what you are doing), EFFICIENT (= can be made more efficient), SIMPLIFY (=can be made simpler).
Created two other versions that may be more efficient, but change behavior (in terms of memory, or execution behavior). This is elaborated below.
To verify that these suggestions are actually helpful, consider using ipython %timeit. Enter the following at the IPython prompt:
In [0]: %timeit -n<N> %run script.py
Where <N> is the number of runs to average over (1000 by default, which might take too long).
Annotated Original
for files in zip_files:
with zipfile.ZipFile(files, 'r') as myzip:
for logfile in myzip.namelist():
list1 = []
list2 = []
f = myzip.open(logfile)
# contents = f.readlines()
# for line in contents[:]:
for line in f: # EFFICIENT: does the same without making a copy
try:
parsed = json.loads(line[:-2])
# if "key1" in parsed.keys():
if "key1" in parsed: # EFFICIENT: no copy
# STRANGE: 'val' in dict checks for key existence by
# default, are you sure this is what you want?
if "val1" in parsed['key1']['key2']:
if "val2" in parsed['key3']:
list1.append(parsed['key1'])
list2.append(parsed['key3'])
except ValueError as e:
pass
# STRANGE: Why is this here?
# else:
# pass
df1 = pd.DataFrame(list1)
df2 = pd.DataFrame(list2)
df3 = df2.join(df1)
# EFFICIENT: prefer generator over list comprehension
# df3['col1'] = df3['col1'].apply(lambda x: ','.join([str(i) for i in x]))
df3['col1'] = df3['col1'].apply(lambda x: ','.join(str(i) for i in x))
df3.drop_duplicates(inplace=True)
# SIMPLIFY:
# with open(csvout, 'a') as f2:
# df.to_csv(f2, header=None, index=False)
# f2.close()
# STRANGE: where does `df` come from? Shouldn't this be df3?
df.to_csv(csvout, mode='a', header=None, index=False)
# STRANGE: you open f in a loop, but close it outside of the loop?
f.close()
Build in Memory, Write Once
If you have enough memory, the following might be faster: rather than appending to the file you concatenate all files in memory first.
This also changes the behavior slightly:
duplicates are filtered across all files
Also some stylistic changes:
for files in zip_files:
with zipfile.ZipFile(files, 'r') as myzip:
list1, list2 = [], [] # Notice these are outside the loop
for logfile in myzip.namelist():
with myzip.open(logfile) as f:
for line in f:
try:
parsed = json.loads(line[:-2])
except ValueError as e: # Presumably we only wish to catch json value errors
pass
else:
if ("key1" in parsed
and "val1" in parsed['key1']['key2']
and "val2" in parsed['key3']):
list1.append(parsed['key1'])
list2.append(parsed['key3'])
# Write only once
df = pd.DataFrame(list2).join(pd.DataFrame(list1))
df['col1'] = df['col1'].apply(lambda x: ','.join(str(i) for i in x))
df.drop_duplicates(inplace=True)
df.to_csv(csvout, header=None, index=False)
Build in Memory, Write Once, Filter Duplicates Only Per File
Keeping the duplicate filtering local to each file:
for files in zip_files:
with zipfile.ZipFile(files, 'r') as myzip:
dfs = []
for logfile in myzip.namelist():
list1, list2 = [], []
with myzip.open(logfile) as f:
for line in f:
try:
parsed = json.loads(line[:-2])
except ValueError as e: # Presumably we only wish to catch json value errors
pass
else:
if ("key1" in parsed
and "val1" in parsed['key1']['key2']
and "val2" in parsed['key3']):
list1.append(parsed['key1'])
list2.append(parsed['key3'])
# Build a temporary dataframe to filter the duplicates:
tmp = pd.DataFrame(list2).join(pd.DataFrame(list1))
tmp['col1'] = tmp['col1'].apply(lambda x: ','.join(str(i) for i in x))
tmp.drop_duplicates(inplace=True)
dfs.append(tmp)
# Write only once
pd.concat(dfs, ignore_index=True).to_csv(csvout, header=None, index=False)

how to merge 200 csv files in Python

Guys, I here have 200 separate csv files named from SH (1) to SH (200). I want to merge them into a single csv file. How can I do it?
As ghostdog74 said, but this time with headers:
with open("out.csv", "ab") as fout:
# first file:
with open("sh1.csv", "rb") as f:
fout.writelines(f)
# now the rest:
for num in range(2, 201):
with open("sh"+str(num)+".csv", "rb") as f:
next(f) # skip the header, portably
fout.writelines(f)
Why can't you just sed 1d sh*.csv > merged.csv?
Sometimes you don't even have to use python!
Use accepted StackOverflow answer to create a list of csv files that you want to append and then run this code:
import pandas as pd
combined_csv = pd.concat( [ pd.read_csv(f) for f in filenames ] )
And if you want to export it to a single csv file, use this:
combined_csv.to_csv( "combined_csv.csv", index=False )
fout=open("out.csv","a")
for num in range(1,201):
for line in open("sh"+str(num)+".csv"):
fout.write(line)
fout.close()
I'm just going to throw another code example into the basket:
from glob import glob
with open('singleDataFile.csv', 'a') as singleFile:
for csvFile in glob('*.csv'):
for line in open(csvFile, 'r'):
singleFile.write(line)
It depends what you mean by "merging" -- do they have the same columns? Do they have headers? For example, if they all have the same columns, and no headers, simple concatenation is sufficient (open the destination file for writing, loop over the sources opening each for reading, use shutil.copyfileobj from the open-for-reading source into the open-for-writing destination, close the source, keep looping -- use the with statement to do the closing on your behalf). If they have the same columns, but also headers, you'll need a readline on each source file except the first, after you open it for reading before you copy it into the destination, to skip the headers line.
If the CSV files don't all have the same columns then you need to define in what sense you're "merging" them (like a SQL JOIN? or "horizontally" if they all have the same number of lines? etc, etc) -- it's hard for us to guess what you mean in that case.
Quite easy to combine all files in a directory and merge them
import glob
import csv
# Open result file
with open('output.txt','wb') as fout:
wout = csv.writer(fout,delimiter=',')
interesting_files = glob.glob("*.csv")
h = True
for filename in interesting_files:
print 'Processing',filename
# Open and process file
with open(filename,'rb') as fin:
if h:
h = False
else:
fin.next()#skip header
for line in csv.reader(fin,delimiter=','):
wout.writerow(line)
A slight change to the code above as it does not actually work correctly.
It should be as follows...
from glob import glob
with open('main.csv', 'a') as singleFile:
for csv in glob('*.csv'):
if csv == 'main.csv':
pass
else:
for line in open(csv, 'r'):
singleFile.write(line)
If you are working on linux/mac you can do this.
from subprocess import call
script="cat *.csv>merge.csv"
call(script,shell=True)
If the merged CSV is going to be used in Python then just use glob to get a list of the files to pass to fileinput.input() via the files argument, then use the csv module to read it all in one go.
OR, you could just do
cat sh*.csv > merged.csv
You can simply use the in-built csv library. This solution will work even if some of your CSV files have slightly different column names or headers, unlike the other top-voted answers.
import csv
import glob
filenames = [i for i in glob.glob("SH*.csv")]
header_keys = []
merged_rows = []
for filename in filenames:
with open(filename) as f:
reader = csv.DictReader(f)
merged_rows.extend(list(reader))
header_keys.extend([key for key in reader.fieldnames if key not in header_keys])
with open("combined.csv", "w") as f:
w = csv.DictWriter(f, fieldnames=header_keys)
w.writeheader()
w.writerows(merged_rows)
The merged file will contain all possible columns (header_keys) that can be found in the files. Any absent columns in a file would be rendered as blank / empty (but preserving rest of the file's data).
Note:
This won't work if your CSV files have no headers. In that case you can still use the csv library, but instead of using DictReader & DictWriter, you'll have to work with the basic reader & writer.
This may run into issues when you are dealing with massive data since the entirety of the content is being store in memory (merged_rows list).
Over the solution that made #Adders and later on improved by #varun, I implemented some little improvement too leave the whole merged CSV with only the main header:
from glob import glob
filename = 'main.csv'
with open(filename, 'a') as singleFile:
first_csv = True
for csv in glob('*.csv'):
if csv == filename:
pass
else:
header = True
for line in open(csv, 'r'):
if first_csv and header:
singleFile.write(line)
first_csv = False
header = False
elif header:
header = False
else:
singleFile.write(line)
singleFile.close()
Best regards!!!
You could import csv then loop through all the CSV files reading them into a list. Then write the list back out to disk.
import csv
rows = []
for f in (file1, file2, ...):
reader = csv.reader(open("f", "rb"))
for row in reader:
rows.append(row)
writer = csv.writer(open("some.csv", "wb"))
writer.writerows("\n".join(rows))
The above is not very robust as it has no error handling nor does it close any open files.
This should work whether or not the the individual files have one or more rows of CSV data in them. Also I did not run this code, but it should give you an idea of what to do.
I modified what #wisty said to be worked with python 3.x, for those of you that have encoding problem, also I use os module to avoid of hard coding
import os
def merge_all():
dir = os.chdir('C:\python\data\\')
fout = open("merged_files.csv", "ab")
# first file:
for line in open("file_1.csv",'rb'):
fout.write(line)
# now the rest:
list = os.listdir(dir)
number_files = len(list)
for num in range(2, number_files):
f = open("file_" + str(num) + ".csv", 'rb')
f.__next__() # skip the header
for line in f:
fout.write(line)
f.close() # not really needed
fout.close()
Here is a script:
Concatenating csv files named SH1.csv to SH200.csv
Keeping the headers
import glob
import re
# Looking for filenames like 'SH1.csv' ... 'SH200.csv'
pattern = re.compile("^SH([1-9]|[1-9][0-9]|1[0-9][0-9]|200).csv$")
file_parts = [name for name in glob.glob('*.csv') if pattern.match(name)]
with open("file_merged.csv","wb") as file_merged:
for (i, name) in enumerate(file_parts):
with open(name, "rb") as file_part:
if i != 0:
next(file_part) # skip headers if not first file
file_merged.write(file_part.read())
Updating wisty's answer for python3
fout=open("out.csv","a")
# first file:
for line in open("sh1.csv"):
fout.write(line)
# now the rest:
for num in range(2,201):
f = open("sh"+str(num)+".csv")
next(f) # skip the header
for line in f:
fout.write(line)
f.close() # not really needed
fout.close()
Let's say you have 2 csv files like these:
csv1.csv:
id,name
1,Armin
2,Sven
csv2.csv:
id,place,year
1,Reykjavik,2017
2,Amsterdam,2018
3,Berlin,2019
and you want the result to be like this csv3.csv:
id,name,place,year
1,Armin,Reykjavik,2017
2,Sven,Amsterdam,2018
3,,Berlin,2019
Then you can use the following snippet to do that:
import csv
import pandas as pd
# the file names
f1 = "csv1.csv"
f2 = "csv2.csv"
out_f = "csv3.csv"
# read the files
df1 = pd.read_csv(f1)
df2 = pd.read_csv(f2)
# get the keys
keys1 = list(df1)
keys2 = list(df2)
# merge both files
for idx, row in df2.iterrows():
data = df1[df1['id'] == row['id']]
# if row with such id does not exist, add the whole row
if data.empty:
next_idx = len(df1)
for key in keys2:
df1.at[next_idx, key] = df2.at[idx, key]
# if row with such id exists, add only the missing keys with their values
else:
i = int(data.index[0])
for key in keys2:
if key not in keys1:
df1.at[i, key] = df2.at[idx, key]
# save the merged files
df1.to_csv(out_f, index=False, encoding='utf-8', quotechar="", quoting=csv.QUOTE_NONE)
With the help of a loop you can achieve the same result for multiple files as it is in your case (200 csv files).
If the files aren't numbered in order, take the hassle-free approach below:
Python 3.6 on windows machine:
import pandas as pd
from glob import glob
interesting_files = glob("C:/temp/*.csv") # it grabs all the csv files from the directory you mention here
df_list = []
for filename in sorted(interesting_files):
df_list.append(pd.read_csv(filename))
full_df = pd.concat(df_list)
# save the final file in same/different directory:
full_df.to_csv("C:/temp/merged_pandas.csv", index=False)
An easy-to-use function:
def csv_merge(destination_path, *source_paths):
'''
Merges all csv files on source_paths to destination_path.
:param destination_path: Path of a single csv file, doesn't need to exist
:param source_paths: Paths of csv files to be merged into, needs to exist
:return: None
'''
with open(destination_path,"a") as dest_file:
with open(source_paths[0]) as src_file:
for src_line in src_file.read():
dest_file.write(src_line)
source_paths.pop(0)
for i in range(len(source_paths)):
with open(source_paths[i]) as src_file:
src_file.next()
for src_line in src_file:
dest_file.write(src_line)
import pandas as pd
import os
df = pd.read_csv("e:\\data science\\kaggle assign\\monthly sales\\Pandas-Data-Science-Tasks-master\\SalesAnalysis\\Sales_Data\\Sales_April_2019.csv")
files = [file for file in os.listdir("e:\\data science\\kaggle assign\\monthly sales\\Pandas-Data-Science-Tasks-master\\SalesAnalysis\\Sales_Data")
for file in files:
print(file)
all_data = pd.DataFrame()
for file in files:
df=pd.read_csv("e:\\data science\\kaggle assign\\monthly sales\\Pandas-Data-Science-Tasks-master\\SalesAnalysis\\Sales_Data\\"+file)
all_data = pd.concat([all_data,df])
all_data.head()
I have done it by implementing a function that expect output file and paths of the input files.
The function copy the file content of the first file into the output file and then does the same for the rest of input files but without the header line.
def concat_files_with_header(output_file, *paths):
for i, path in enumerate(paths):
with open(path) as input_file:
if i > 0:
next(input_file) # Skip header
output_file.writelines(input_file)
Usage example of the function:
if __name__ == "__main__":
paths = [f"sh{i}.csv" for i in range(1, 201)]
with open("output.csv", "w") as output_file:
concat_files_with_header(output_file, *paths)

Categories

Resources