I'm pretty new in python and trying to make a python script to put images together into a video(.mp4) using MoviePy.
However, I have multiple files and would like to be more efficient by sort of.... naming the folder and selecting all images within that folder than having to select all images individually.
Here's my Code:
from moviepy.editor import *
import os
clips = []
clip1 = ImageClip('imagesfolder\images0.jpg').set_duration(4)
clip2 = ImageClip('imagesfolder\images1.jpg').set_duration(4)
clip3 = ImageClip('imagesfolder\images2.jpg').set_duration(4)
clip4 = ImageClip('imagesfolder\images3.jpg').set_duration(4)
clip5 = ImageClip('imagesfolder\images4.jpg').set_duration(4)
clip6 = ImageClip('imagesfolder\images5.jpg').set_duration(4)
clip7 = ImageClip('imagesfolder\images6.jpg').set_duration(4)
clip8 = ImageClip('imagesfolder\images7.jpg').set_duration(4)
clip9 = ImageClip('imagesfolder\images8.jpg').set_duration(4)
clip10 = ImageClip('imagesfolder\images9.jpg').set_duration(4)
clips.append(clip1)
clips.append(clip2)
clips.append(clip3)
clips.append(clip4)
clips.append(clip5)
clips.append(clip6)
clips.append(clip7)
clips.append(clip8)
clips.append(clip9)
clips.append(clip10)
video_clip = concatenate_videoclips(clips, method='compose')
video_clip.write_videofile("memes.mp4", fps=24, remove_temp=True, codec="libx264",
audio_codec="aac")
You can use a function called glob to find all files in a directly which match a pattern.
Eg
from glob import glob
clips = [ImageClip(clip).set_duration(4) for clip in glob("imagesfolder\*.gif")]
video_clip = concatenate_videoclips(clips, method="compose")
Related
the purpose of the following code is to copy image files from one directory to another directory and reorganize the images in a hierarchical structure based on information extracted from XML files.
from bs4 import BeautifulSoup as bs
import shutil
import os
import glob
campaign_name = "CAMPAIGN_2020"
xml_directory = r'XML_DIRECTORY'
picture_directory = r'PICTURE_DIRECTORY'
output_directory = r'OUTPUT_DIRECTORY'
def copy_files(content, picture_files):
bs_content = bs(content, "lxml")
images = bs_content.find_all("images")
for picture in picture_files:
for i, image_group in enumerate(images):
for image in image_group.find_all('img'):
if os.path.basename(image['src']) == os.path.basename(picture):
src = image['src']
station = image['station']
first_field = image.parent.parent.data['first_field']
second_field = image.parent.parent.data['second_field']
start = int(image.parent.parent.data['start'])
end = int(image.parent.parent.data['end'])
length = start - end
class_name = image.parent.parent.assignment['class_name']
number = image.parent.parent.assignment['number']
img_nr = int(image['img_nr'])
location = image.parent.parent.assignment['location']
date = image.parent.parent['date']
# set the complete picture path
picture_path = f'{class_name}{number}\{first_field}_{second_field}_{length}_{start}_{end}\{adjust_date(date)}\{campaign_name}\{adjust_location(location)}\{adjust_img_nr(img_nr)}\{station.zfill(5)}.jpg'
# create new subdirectories if they do not already exist
os.makedirs(os.path.join(output_directory, os.path.dirname(picture_path)), exist_ok=True)
src_file = picture # original picture path
dst_file = os.path.join(output_directory, picture_path) # assembled target path
shutil.copy(src_file, dst_file)
picture_list = []
for pic in glob.glob(picture_directory + '\**\*.jpg', recursive=True): # consider files in all subdirectories that end with .jpg, adjust if necessary
picture_list.append(pic)
for path in os.listdir(xml_directory):
if path.endswith(".xml"): # only consider files that end with .xml
with open(os.path.join(xml_directory, path), "r") as file:
xml_content = file.readlines()
xml_content = "".join(xml_content)
copy_files(xml_content, picture_list)
I tested the code and it works for the most part. To copy 20 pictures the tool needs around 2 hours, so i have to drasticly improve the execution time. How can I do that?
To give you an idea: I have around 8k xml files and around 400k pictures :D
I am adjusting a script.
I have 4427 images in a specified folder, named
(1).png
(2).png
(3).png
etc.
Along with those, I have another 14 images, named:
1.png
2.png
3.png
etc.
Basically the script should:
Take a specific image I tell it to open of the 4427
Then, open one of the 14 images at random
Merge the two and save it to a specified directory.
Code
import os
import random
from PIL import Image
path = r"C:\Users\17379\Desktop\images\Low effort glasses"
random_filename = random.choice([
x for x in os.listdir(path)
if os.path.isfile(os.path.join(path, x))
])
print(random_filename)
x = Image.open(r"(1).png").convert("RGBA")
y = Image.open(random_filename)
z = Image.alpha_composite(x, y)
z.save(r"C:\Users\17379\Desktop\images\Resized punks\Resized punks\punk1.png")
How to do this to all 4427 images and then save each file to the specified directory?
The pseudo-code for your task is:
for file_name in source_image_list:
# 1. Take a specific image I tell it to open of the 4427
source_image = open_source(file_name)
# 2. Then, open one of the 14 images at random
random_image = open_random_of(random_image_list)
# 3. Merge the two and save it to a specified directory.
target_image = merge(source_image, random_image)
save(target_image, file_name, directory)
Translate this to Python:
import os
import glob
import random
from PIL import Image
def open_random(random_image_list):
random_filename = random.choice(in random_image_list)
print(f"Random: {random_filename}")
return Image.open(random_filename)
def open_source(file_name):
return Image.open(file_name).convert("RGBA")
def merge(source, random):
return Image.alpha_composite(source, random)
def save(image, original_name, directory):
target = os.path.join(directory, os.path.basename(original_name))
print(f"Saving: {target}")
image.save(target)
if __name__ == '__main__':
source_path = r"C:\Users\17379\Desktop\images"
random_path = r"C:\Users\17379\Desktop\images\Low effort glasses"
directory = r"C:\Users\17379\Desktop\images\Resized punks\Resized punks"
random_image_list = os.listdir(random_path) # can also use glob here to filter for (specific) images only
source_image_list = glob.glob(f"{source_path}/\([0-9]+\).png")
for file_name in source_image_list:
print(f"Source: {file_name}")
# 1. Take a specific image I tell it to open of the 4427
source_image = open_source(file_name)
# 2. Then, open one of the 14 images at random
random_image = open_random_of(random_image_list)
# 3. Merge the two and save it to a specified directory.
target_image = merge(source_image, random_image)
save(target_image, file_name, directory)
Note: I had to replace os.listdir(source_path) for glob.glob because it accepts a regular-expression to filter for specific files only. See also
Python3 create list of image in a folder
I'm working on a machine learning (IMAGE CLASSIFICATION)
and I found a data set that has two files:
The images (20,000 images) "The images "The images are numbered from 1 to 20,000 (not classified into classes)"
A JSON file that has the information and classification of the images (12 classes of images)
The JSON file is structured as follows:
{
"<image_number>": {
"image_filepath": "images/<image_number>.jpg",
"anomaly_class": "<class_name>"
},
...
}
So I'm trying to read the JSON file and split the data set so I can deal with each class individually..
Then take 80% of "each class" as a training set and 20% for the testing set
I tried to find a way to match the JSON file with the dataset (images) So I can classify the classes in individual folders then divide them into training and testing sets
Anyone can help me with that?
THANK YOU
Something like the following would create folders for each of the classes and then move the images into them.
import json
import os
from os import path
# Open the json file containing the classifications
with open("clasification.json", "r") as f:
classification = json.load(f)
# Create a set which contains all the classes
classes = set([i["anomaly_class"] for i in classification.values()])
# For each of the classes make a folder to contain them
for c in classes:
os.makedirs(c)
# For each image entry in the json move the image to the folder named it's class
for image_number, image_data in classification.items():
os.rename(image_data["image_filepath"], path.join(image_data["anomaly_class"], "{}.jpg".format(image_number)))
Something like this should work:
import json
from pathlib import Path
currDir = Path(__file__).resolve().parent
# Path where the images will be moved to
imagesDir = currDir / 'images'
testingDir = imagesDir / 'testing'
trainingDir = imagesDir / 'training'
# Load data
infoPerImage = {}
# This has to be the path to the file containing the data
# I assumed it is in the current directory
infoFilePath = currDir / 'data.json'
with infoFilePath.open() as f:
infoPerImage = json.loads(f.read())
# Separate into classes
infoPerClass = {}
for imageNumber, imageInfo in infoPerImage.items():
imageClass = imageInfo['anomaly_class']
imagePath = imageInfo['image_filepath']
currentClassImages = infoPerClass.setdefault(imageClass, [])
currentClassImages.append(imagePath)
# Create directories for the classes
for imageClass in infoPerClass:
pathToImageClassTraining = trainingDir / imageClass
pathToImageClassTraining.mkdir(parents=True)
pathToImageClassTesting = testingDir / imageClass
pathToImageClassTesting.mkdir(parents=True)
# Separate into training and testing images
trainingImages = {}
testingImages = {}
for imageClass, imagePaths in infoPerClass.items():
lenImagePaths = len(imagePaths)
upperLimit = int(lenImagePaths * 0.8)
trainingImages[imageClass] = imagePaths[:upperLimit]
testingImages[imageClass] = imagePaths[upperLimit:]
def moveImagesToTheirDir(imagesDict, imagesBasePath):
for imageClass, imagePaths in imagesDict.items():
for imagePath in imagePaths:
imageSrc = Path(imagePath)
imageDest = imagesBasePath / imageClass / imageSrc.name
imageSrc.rename(imageDest)
moveImagesToTheirDir(trainingImages, trainingDir)
moveImagesToTheirDir(testingImages, testingDir)
The following code is to combine multiple images into one pdf. I am trying to run this code on multiple folder where each folder has several images as result, each folder will has one pdf.
import os
from PIL import Image
from fpdf import FPDF
pdf = FPDF()
sdir = "imageFolder/"
w,h = 0,0
for i in range(1, 100):
fname = sdir + "IMG%.3d.png" % i
if os.path.exists(fname):
if i == 1:
cover = Image.open(fname)
w,h = cover.size
pdf = FPDF(unit = "pt", format = [w,h])
image = fname
pdf.add_page()
pdf.image(image,0,0,w,h)
else:
print("File not found:", fname)
print("processed %d" % i)
pdf.output("output.pdf", "F")
print("done")
I was thinking to create another loop to bring the folder path which will come before the first loop:
For j in range(1 to 70):
folderP=sdir+folder%1
And loop in each folder
Sorry I am still learning python. Any suggestion would be great!
You can use glob to get the paths of all pdfs and add them to a list, then you just iterate through the list and you wouldn't even need to check if they exist:
from glob import glob
sDir = 'imageFolder/'
pdfPaths = []
pdfPaths.extend(glob(f'{sDir}**/*.pdf', recursive=True))
for pdf in pdfPaths:
# do stuff
I am trying to export in x3d format OpenFOAM results using paraview-python script. When I do it via paraview graphical interface it works and results can be visualized in Blender, see the following picture
However, when I try to do the same operation using the following script
from paraview.simple import *
import fnmatch
import os
import shutil
#create alist of all vtk files
vtkFiles = []
for root, dirnames, filenames in os.walk('.'):
for filename in fnmatch.filter(filenames, '*.vtk'):
vtkFiles.append(os.path.join(root, filename))
vtkFilesGroups=[
'U',
]
def ResetSession():
pxm = servermanager.ProxyManager()
pxm.UnRegisterProxies()
del pxm
Disconnect()
Connect()
def x3dExport(output,r):
#export in x3d format
exporters = servermanager.createModule("exporters")
Show(r)
view = GetActiveView()
render = Render()
x3dExporter = exporters.X3DExporter(FileName=output)
x3dExporter.SetView(view)
x3dExporter.Write()
ResetSession()
# group VTK files by gruop (fields in openfoam "vtkFilesGroups")
# then loop over all and save it into different formats
groupedVtkFiles=[]
for group in vtkFilesGroups:
vtkDir = os.path.join('.', group, 'vtk')
if not os.path.exists(vtkDir):
os.makedirs(vtkDir)
vtuDir = os.path.join('.', group, 'vtu')
if not os.path.exists(vtuDir):
os.makedirs(vtuDir)
x3dDir = os.path.join('.', group, 'x3d')
if not os.path.exists(x3dDir):
os.makedirs(x3dDir)
for stepFile in vtkFiles:
tmp = stepFile.split(os.sep)
oldFileName = tmp[-1].split('.')[0]
time = tmp[-2]
fileNameVtk = '{}_{}.vtk'.format(oldFileName, time)
fileNameVtp = '{}_{}.vtp'.format(oldFileName, time)
fileNameX3d = '{}_{}.x3d'.format(oldFileName, time)
r = LegacyVTKReader(FileNames=[stepFile])
w = XMLUnstructuredGridWriter()
w.FileName = os.path.join(vtuDir, fileNameVtp)
w.UpdatePipeline()
x3dExport(os.path.join(x3dDir, fileNameX3d), r)
the field values (velocity U) are not exported as you can see from this picture!
Can someone tell me what I am doing wrong?
Thank you!
Your problem is that the .foam file it's not a scientific visualization file, as VTK, .foam file is only used for ParaView (by its extension, not by its content) to identify the reader OpenFOAMReader and then us it for post-processing.
I have two solutions for you:
Read the reader documentation to find a way to do this.
Convert the results into VTK files with FoamToVTK and then loop over the results.
EDIT
I Use this code to transform do that thing long time ago:
from paraview.simple import *
import fnmatch
import os
import shutil
#create alist of all vtk files
vtkFiles = []
for root, dirnames, filenames in os.walk('.'):
for filename in fnmatch.filter(filenames, '*.vtk'):
vtkFiles.append(os.path.join(root, filename))
vtkFilesGroups=('p', 'U')
def ResetSession():
pxm = servermanager.ProxyManager()
pxm.UnRegisterProxies()
del pxm
Disconnect()
Connect()
def x3dExport(output,r):
#export in x3d format
exporters = servermanager.createModule("exporters")
Show(r)
view = GetActiveView()
render = Render()
x3dExporter = exporters.X3DExporter(FileName=output)
x3dExporter.SetView(view)
x3dExporter.Write()
ResetSession()
# group VTK files by gruop (fields in openfoam "vtkFilesGroups")
# then loop over all and save it into different formats
for group in vtkFilesGroups:
x3dDir = os.path.join('.', group, 'x3d')
if not os.path.exists(x3dDir):
os.makedirs(x3dDir)
for stepFile in (f for f in vtkFiles if group in f):
tmp = stepFile.split(os.sep)
oldFileName = tmp[-1].split('.')[0]
time = tmp[-2]
fileNameX3d = '{}_{}.x3d'.format(oldFileName, time)
x3dExport(os.path.join(x3dDir, fileNameX3d), r)
You need to color your data in your script, with something like :
ColorBy(yourRep, ('POINTS', ('YourScalar', 'YourComp'))
Documentation