read in csv files from a folder and create html files - python

I'm new to python and hoping for some help to read in csv files from a folder and converting each file to a html folder...this is what I have so far:
import pandas as pd
import os
import glob
path = "htmlplots"
csv_files = glob.glob(os.path.join(path, "*.csv"))
for file in csv_files:
# read the csv file
df = pd.read_csv(file)
# print the filename
print('File Name:', file.split("\\")[-1])
# print the content
display(df)
Ideally I then need to create html files from the resulting csv files that have a 'next' and 'previous' link from one to two, two to three (next) and three to two, two to one (previous).

Use:
import pandas as pd
import os
import glob
path = ""
csv_files = glob.glob(os.path.join(path, "*.csv"))
for i, file in enumerate(csv_files):
df = pd.read_csv(file, header = None)
name = file.split('.')[-1]
if i>0:
prev = csv_files[i-1]
df.loc['prev',:]=f'http://{prev}'
else:
df.loc['prev',:]=''
if i!=len(csv_files)-1:
next = csv_files[i+1]
df.loc['next',:]=f'http://{next}'
else:
df.loc['next',:]=''
df.to_html(f"{file}.html", render_links = True)
Input csv file:
Output html:

Related

how to convert all json files of directory to text files in python?

I want to convert all json files of directory to text files through this command: but I got error. How can I change it?
import pandas as pd
df = pd.read_json(r"/media/New Volume/a3d/pdb/json_parser/ *.json ")
df.to_csv(r"/media/New Volume/a3d/pdb/json_parser/ *.txt ", index = False)
import os
import pandas as pd
# Get the list of json files, which are in the folder:
str_address = r"/media/New Volume/a3d/pdb/json_parser/"
lst_files = [i for i in os.listdir(str_address) if i.endswith(".json")]
# Loop through the json files
for file_ in lst_files:
df = pd.read_json(str_address + file_)
df.to_csv(str_address+ file_+ ".txt", index = False)

Renaming multiple csv files within a folder in Python

I have a folder with 50 .csv files. The .csv files are auto-generated and a results/ output from a process-based model (long and automatically named). For example, sandbox_username_vetch_scaleup_IA_1.csv; sandbox_username_vetch_scaleup_IA_2.csv, and it continues till sandbox_username_vetch_scaleup_IA_50.csv.
I am trying to shorten the file names in a way so that the files are names are IA_1, IA_2 ...up to IA_50 and subsequently the new .csv file name gets added as a column to the data frame. Here is what I have tried so far
# import necessary libraries
import pandas as pd
import os
import glob
import sys
from pathlib import Path
import re
data_p = "/Users/Username/Documents/HV_Scale/CWAD"
output_p = "/Users/Username/Documents/HV_Scale/CWAD"
retval = os.getcwd()
print (retval) # see in which folder you are
os.chdir(data_p) # move to the folder with your data
os.getcwd()
filenames = sorted(glob.glob('*.csv'))
fnames = list(filenames) # get the names of all your files
#print(fnames)
#Loop over
for f in range(len(fnames)):
print(f'fname: {fnames[f]}\n')
pfile = pd.read_csv(fnames[f], delimiter=",") # read in file
#extract filename
filename = fnames[f]
parts = filename.split(".") # giving you the number in file name and .csv
only_id = parts[0].split("_") # if there is a bracket included
# get IA from your file
filestate = pfile["IA"][0] # assuming this is on the first row
filestate = str(filestate)
# get new filename
newfilename = only_id[0]+"-"+filestate+parts[1]
# save your file (don't put a slash at the end of your directories on top)
pfile.to_csv(output_p+"/"+newfilename, index = False, header = True)
Here is the code for adding the csv file name as a column
import glob
import os
import shutil
import sys
import pandas as pd
path = '/Users/Username/Documents/HV_Scale/IA_CWAD/short'
all_files = glob.glob(os.path.join(path, "*.csv"))
names = [os.path.basename(x) for x in glob.glob(path+'\*.csv')]
df = pd.DataFrame()
for file_ in all_files:
file_df = pd.read_csv(file_,sep=';', parse_dates=[0], infer_datetime_format=True,header=None )
file_df['file_name'] = file_
df = df.append(file_df)
#However, this adds the old csv file name and not the renamed one
In order to rename and move these files, all you need is:
import glob
import os
import shutil
import sys
SOURCE = '<Your source directory>'
TARGET = '<Your target directory>'
for file in glob.glob(os.path.join(SOURCE, '*_IA_*.csv')):
idx = file.index('_IA_')
filename = file[idx+1:]
target = os.path.join(TARGET, filename)
if os.path.exists(target):
print(f'Target file {target} already exists', file=sys.stderr)
else:
shutil.copy(file, target)
As there's nothing in the OP's question that tries to handle modification of the CSV files, that is left as an exercise for the OP.
Source and target directories should be different otherwise this can lead to ambiguous results

zipped multiple excel files than merge its content into one file using python

I am trying to create 2 functions with python
first function zip multiple excel files that exist in the given
path.
second function read the content of the zip file than merge all
existing file into one excel file.(all files has same structure.)
The problem is that when i run the script it crashs when it comes to read the zip file and display the below error:
AttributeError: 'ZipFile' object has no attribute 'seek'
code:
import pandas as pd
import numpy as np
import zipfile
import os
def get_all_file_path(directory):
file_paths=[]
for root,directories,files in os.walk(directory):
for filename in files:
filepath = os.path.join(root,filename)
file_paths.append(filepath)
return file_paths
# Excel file merge function
def excel_file_merge(zip_file_name):
df = pd.DataFrame()
archive = zipfile.ZipFile(zip_file_name, 'r')
with zipfile.ZipFile(zip_file_name, "r") as f:
for file in f.namelist():
xlfile = archive.open(file)
if file.endswith('.xlsx'):
# Add a note indicating the file name that this dataframe originates from
df_xl = pd.read_excel(xlfile, engine='openpyxl')
df_xl['Note'] = file
# Appends content of each Excel file iteratively
df = df.append(df_xl, ignore_index=True)
return df
uploaded_file = 'F:/AIenv/test_zip'
file_paths = get_all_file_path(uploaded_file)
print("following files will be zipped: ")
for file_name in file_paths:
print(file_name)
with zipfile.ZipFile("my _python_files.zip","w")as f:
for file in file_paths:
f.write(file)
f.close()
print("All Files Zipped successfully")
df = excel_file_merge(f)
print(df)

Is there a way to load data from all files in a directory using Python?

My question: Is there a way to load data from all files in a directory using Python
Input: Get all files in a given directory of mine (wow.txt, testting.txt,etc.)
Process: I want to run all the files through a def function
Output: I want the output to be all the files names and their respective content below it.For example:
/home/file/wow.txt
"all of its content"
/home/file/www.txt
"all of its content"
Here is my code:
# Import Functions
import os
import sys
# Define the file path
path="/home/my_files"
file_name="wow.txt"
#Load Data Function
def load_data(path,file_name):
"""
Input : path and file_name
Purpose: loading text file
Output : list of paragraphs/documents and
title(initial 100 words considered as title of document)
"""
documents_list = []
titles=[]
with open( os.path.join(path, file_name) ,"rt", encoding='latin-1') as fin:
for line in fin.readlines():
text = line.strip()
documents_list.append(text)
print("Total Number of Documents:",len(documents_list))
titles.append( text[0:min(len(text),100)] )
return documents_list,titles
#Output
load_data(path,file_name)
Here is my output:
My Problem is that my output only takes one file and shows its content. Obviously, i defined the path and file name in my code to one file but I am confused as to how to write the path in a way to load all the files and output each of its contents separately. Any suggestions?
Using glob:
import glob
files = glob.glob("*.txt") # get all the .txt files
for file in files: # iterate over the list of files
with open(file, "r") as fin: # open the file
# rest of the code
Using os.listdir():
import os
arr = os.listdir()
files = [x for x in arr if x.endswith('.txt')]
for file in files: # iterate over the list of files
with open(file, "r") as fin: # open the file
# rest of the code
Try this:
import glob
for file in glob.glob("test/*.xyz"):
print(file)
if my directory name was "test" and I had lots of xyz files in them...
You can use glob and pandas
import pandas as pd
import glob
path = r'some_directory' # use your path
all_files = glob.glob(path + "/*.txt")
li = []
for filename in all_files:
#read file here
# if you decide to use pandas you might need to use the 'sep' paramaeter as well
df = pd.read_csv(filename, index_col=None, header=0)
li.append(df)
# get it all together
frame = pd.concat(li, axis=0, ignore_index=True)
I will take advantage of the function you have already written, so use the following:
data = []
path="/home/my_files"
dirs = os.listdir( path )
for file in dirs:
data.append(load_data(path, file))
In this case you will have all data in the list data.
Hi you can use a for loop on a listdir:
os.listdir(<path of your directory>)
this gives you the list of files in your directory, but this gives you also the name of folders in that directory
Try generating a file list first, then passing that to a modified version of your function.
def dir_recursive(dirName):
import os
import re
fileList = list()
for (dir, _, files) in os.walk(dirName):
for f in files:
path = os.path.join(dir, f)
if os.path.exists(path):
fileList.append(path)
fList = list()
prog = re.compile('.txt$')
for k in range(len(fileList)):
binMatch = prog.search(fileList[k])
if binMatch:
fList.append(binMatch.string)
return fList
def load_data2(file_list):
documents_list = []
titles=[]
for file_path in file_list:
with open( file_path ,"rt", encoding='latin-1') as fin:
for line in fin.readlines():
text = line.strip()
documents_list.append(text)
print("Total Number of Documents:",len(documents_list))
titles.append( text[0:min(len(text),100)] )
return documents_list,titles
# Generate a file list & load the data from it
file_list = dir_recursive(path)
documents_list, titles = load_data2(file_list)

Python: Collect all xlsx. - Open - Add Column - Save in new folder

I am new with Python but trying to write a code which add a column on multiple .xlsx files and saves this files with the origin name to a new folder.
I have started with some coding beneath, but missing some code in open all files and saving to my DestPath. Would be pleased if any has a solution for this:
from os import listdir, path
import pandas as pd
import xlrd
SourcePath = 'C:\' #Source Path
DestPath = 'C:\' #Destination Path
# Listing up all .xlsx files from Source
def find_xlsx_filenames( path_to_dir, suffix=".xlsx" ):
filenames = listdir(path_to_dir)
return [ filename for filename in filenames if filename.endswith( suffix ) ]
filenames = find_xlsx_filenames(SourcePath)
fname = path.join(SourcePath, filenames[0]) # Tar første fil i mappa.
outname = path.join(outputdata, filenames[0])
for i in range(len(filenames)):
fname = path.join(SourcePath, filenames[i])
df = pd.read_excel(fname) #Read Excel file as a DataFrame
df['new_col'] = 'Sort Data' #Adding a new column named <Sort Data>
#To save it back as Excel
df.to_excel(DestPath, outname) #Write DateFrame back as Excel file
Thanks in Advance
check if this works
import os
import pandas as pd
path = 'C:/'
for roots, dirs, files in os.walk(path):
xlsfile = [ _ for _ in files if _.endswith('.xlsx')]
for xlsf in xlsfile:
df = pd.read_excel(os.path.join(roots, xlsf))
df['Sort Data'] = ' '
df.to_excel(os.path.join(roots, xlsf), index = False)

Categories

Resources