So far I have been able to ''manually'' process images by replacing 'picture' in
photo = ''directory/picture.jpg''
for every image I'm processing.
This is effective, sure, but it's very slow.
any ideas?
The code i'm using:
from deepdreamer import model, load_image, recursive_optimize
import numpy as np
import PIL.Image
layer_tensor = ...
photo = "directory/picture.jpg"
img_result = load_image(filename='{}'.format(photo))
img_result = recursive_optimize(...)
img_result = np.clip(img_result, 0.0, 255.0)
img_result = img_result.astype(np.uint8)
result = PIL.Image.fromarray(img_result, mode='RGB')
result.save(photo)
Assuming all image files are in the same folder/directory, you can:
Encapsulate your image processing into a function.
Find all the filenames using os.listdir().
Loop over the filenames, passing each into the imageProcessing()
function which takes action on each image.
Python Code:
from deepdreamer import model, load_image, recursive_optimize
import numpy as np
import PIL.Image
import os //for file management stuff
layer_tensor = ...
def imageProcess(aFile):
photo = "directory/{aFile}"
img_result = load_image(filename='{}'.format(photo))
img_result = recursive_optimize(...)
img_result = np.clip(img_result, 0.0, 255.0)
img_result = img_result.astype(np.uint8)
result = PIL.Image.fromarray(img_result, mode='RGB')
result.save(photo)
for filename in os.listdir('dirname'):
imageProcess(filename)
It'll be something like this. Let me know how that works, I didn't run the code.
Related
I wrote a code to remove the background of 8000 images but that whole code is taking approximately 8 hours to give the result.
How to improve its time complexity as I have to work on a large dataset in future?
Or do I have to write a whole new code? If it is, please suggest some sample codes.
from rembg import remove
import cv2
import glob
for img in glob.glob('../images/*.jpg'):
a = img.split('../images/')
a1 = a[1].split('.jpg')
try:
cv_img = cv2.imread(img)
output = remove(cv_img)
except:
continue
cv2.imwrite('../output image/' + str(a1[0]) + '.png', output)
One simple approach would be to divide the work into multiple threads. See ThreadPoolExecutor for more.
You can play around with max_workers= to see what get's the best results. Note that max-workers can be any number between 1 and 32.
This sample code is ready to run. It assumes the image files are in the same directory as your main.py and the output_image directory exits.
import cv2
import rembg
import sys
from pathlib import Path
from concurrent.futures import ThreadPoolExecutor
out_dir = Path("output_image")
in_dir = Path(".")
def is_image(absolute_path: Path):
return absolute_path.is_file and str(absolute_path).endswith('.png')
input_filenames = [p for p in filter(is_image, Path(in_dir).iterdir())]
def process_image(in_dir):
try:
image = cv2.imread(str(in_dir))
if image is None or not image.data:
raise cv2.error("read failed")
output = rembg.remove(image)
in_dir = out_dir / in_dir.with_suffix(".png").name
cv2.imwrite(str(in_dir), output)
except Exception as e:
print(f"{in_dir}: {e}", file=sys.stderr)
executor = ThreadPoolExecutor(max_workers=4)
for result in executor.map(process_image, input_filenames):
print(f"Processing image: {result}")
Check out the U^2Net repository. Like u2net_test.py, Writing your own remove function and using dataloaders can speed up the process. if it is not necessary skip the alpha matting else you can add the alpha matting code from rembg.
def main():
# --------- 1. get image path and name ---------
model_name='u2net'#u2netp
image_dir = os.path.join(os.getcwd(), 'test_data', 'test_images')
prediction_dir = os.path.join(os.getcwd(), 'test_data', model_name + '_results' + os.sep)
model_dir = os.path.join(os.getcwd(), 'saved_models', model_name, model_name + '.pth')
img_name_list = glob.glob(image_dir + os.sep + '*')
print(img_name_list)
#1. dataloader
test_salobj_dataset = SalObjDataset(img_name_list = img_name_list,
lbl_name_list = [],
transform=transforms.Compose([RescaleT(320),
ToTensorLab(flag=0)])
)
test_salobj_dataloader = DataLoader(test_salobj_dataset,
batch_size=1,
shuffle=False,
num_workers=1)
for i_test, data_test in enumerate(test_salobj_dataloader):
print("inferencing:",img_name_list[i_test].split(os.sep)[-1])
inputs_test = data_test['image']
inputs_test = inputs_test.type(torch.FloatTensor)
if torch.cuda.is_available():
inputs_test = Variable(inputs_test.cuda())
else:
inputs_test = Variable(inputs_test)
d1,d2,d3,d4,d5,d6,d7= net(inputs_test)
# normalization
pred = d1[:,0,:,:]
pred = normPRED(pred)
# save results to test_results folder
if not os.path.exists(prediction_dir):
os.makedirs(prediction_dir, exist_ok=True)
save_output(img_name_list[i_test],pred,prediction_dir)
del d1,d2,d3,d4,d5,d6,d7
Try to use parallelization with multiprocessing like Mark Setchell mentioned in his comment. I rewrote your code according to Method 8 from here. Multiprocessing should speed up your execution time. I did not test the code, try if it works.
import glob
from multiprocessing import Pool
import cv2
from rembg import remove
def remove_background(filename):
a = filename.split("../images/")
a1 = a[1].split(".jpg")
try:
cv_img = cv2.imread(filename)
output = remove(cv_img)
except:
continue
cv2.imwrite("../output image/" + str(a1[0]) + ".png", output)
files = glob.glob("../images/*.jpg")
pool = Pool(8)
results = pool.map(remove_background, files)
Ah, you used the example from https://github.com/danielgatis/rembg#usage-as-a-library as template for your code. Maybe try the other example with PIL image instead of OpenCV. The latter is mostly less fast, but who knows. Try it with maybe 10 images and compare execution time.
Here is your code using PIL instead of OpenCV. Not tested.
import glob
from PIL import Image
from rembg import remove
for img in glob.glob("../images/*.jpg"):
a = img.split("../images/")
a1 = a[1].split(".jpg")
try:
cv_img = Image.open(img)
output = remove(cv_img)
except:
continue
output.save("../output image/" + str(a1[0]) + ".png")
I tried get image from my gige camera. In the camera's own software its working just fine, but when I do it with harvesters my image has a weird grid and I don't know why is it there and how to remove it. I need this for a stereovision project. Any idea?
Don't mind the brightness I tried it with higher expo as well, it did not changed a thing. :D
enter image description here
import genicam.genapi as ge
import cv2
from harvesters.core import Harvester
import matplotlib.pyplot as plt
import numpy as np
# Create a Harvester object:
h = Harvester()
# Load a GenTL Producer; you can load many more if you want to:
h.add_file("C:/Program Files\MATRIX VISION/mvIMPACT Acquire/bin/x64/mvGenTLProducer.cti")
# Enumerate the available devices that GenTL Producers can handle:
h.update()
# Select a target device and create an ImageAcquire object that
# controls the device:
ia = h.create(0)
ia2 = h.create(1)
# Configure the target device; it looks very small but this is just
# for demonstration:
ia.remote_device.node_map.Width.value = 1456
ia.remote_device.node_map.Height.value = 1088
# ia.remote_device.node_map.PixelFormat.symbolics
ia.remote_device.node_map.PixelFormat.value = 'BayerRG8'
ia2.remote_device.node_map.Width.value = 1456
ia2.remote_device.node_map.Height.value = 1088
# ia2.remote_device.node_map.PixelFormat.symbolics
ia2.remote_device.node_map.PixelFormat.value = 'BayerRG8'
ia.remote_device.node_map.ChunkSelector.value = 'ExposureTime'
ia.remote_device.node_map.ExposureTime.set_value(100000.0)
ia2.remote_device.node_map.ChunkSelector.value = 'ExposureTime'
ia2.remote_device.node_map.ExposureTime.set_value(100000.0)
# Allow the ImageAcquire object to start image acquisition:
ia.start()
ia2.start()
# We are going to fetch a buffer filled up with an image:
# Note that you'll have to queue the buffer back to the
# ImageAcquire object once you consumed the buffer; the
# with statement takes care of it on behalf of you:
while True:
with ia.fetch() as buffer:
component = buffer.payload.components[0]
_2d = component.data.reshape(1088, 1456)
img = _2d
img = cv2.resize(img,(640,480))
cv2.imshow('right',img)
cv2.imwrite('test_left.png',img)
cv2.waitKey(10)
with ia2.fetch() as buffer:
component = buffer.payload.components[0]
_2d = component.data.reshape(component.height, component.width)
img2 = _2d
img2 = cv2.resize(img2, (640, 480))
cv2.imshow('left', img2)
cv2.imwrite('test_right.png',img2)
cv2.waitKey(10)
ia.stop()
ia2.stop()
ia.destroy()
ia2.destroy()
h.reset()
I just had to convert it to Gray or RGB with cvtColor, and its working.
Thanks anyway.
I have a stream of images and have to display it in Google Colab notebook such that it looks like a video, But what I get is a image under image ...
from google.colab import drive
drive.mount('/content/drive')
# importing cv2
import cv2
import imutils
from google.colab.patches import cv2_imshow
from IPython.display import clear_output
import os
folder = r'/content/drive/images/'
for filename in os.listdir(folder) :
VALID_FORMAT = (".jpg", ".JPG", ".jpeg", ".JPEG", ".png", ".PNG")
if filename.upper().endswith(VALID_FORMAT):
path = folder + filename
image = cv2.imread(path)
# resize image
frame = imutils.resize(image, width=1200)
# show the image
cv2_imshow(frame)
cv2.waitKey(20)
I don't know if some function can display image in the same place.
But I have code which I used with cv2 to display frames from webcam as video.
Here reduced version.
imshow(name, image) creates <img id="name"> and replaces src/url with image converted to string base64 and browser shows it as image.
imshow() uses name to check if already exist <img id="name"> and it replaces previous image.
from IPython.display import display, Javascript
from google.colab.output import eval_js
from base64 import b64encode
import cv2
def imshow(name, img):
"""Put frame as <img src="data:image/jpg;base64,...."> """
js = Javascript('''
async function showImage(name, image, width, height) {
img = document.getElementById(name);
if(img == null) {
img = document.createElement('img');
img.id = name;
document.body.appendChild(img);
}
img.src = image;
img.width = width;
img.height = height;
}
''')
height, width = img.shape[:2]
ret, data = cv2.imencode('.jpg', img) # compress array of pixels to JPG data
data = b64encode(data) # encode base64
data = data.decode() # convert bytes to string
data = 'data:image/jpg;base64,' + data # join header ("data:image/jpg;base64,") and base64 data (JPG)
display(js)
eval_js(f'showImage("{name}", "{data}", {width}, {height})') # run JavaScript code to put image (JPG as string base64) in <img>
# `name` and `data` in needs `" "` to send it as text, not as name of variabe.
And here code which uses it to display image Lenna from Wikipedia.
import requests
import cv2
import numpy as np
import time
url = 'https://upload.wikimedia.org/wikipedia/en/7/7d/Lenna_%28test_image%29.png'
data = requests.get(url)
frame1 = cv2.imdecode(np.frombuffer( data.content, np.uint8), 1)
frame2 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
for _ in range(3):
imshow("temp", frame1)
time.sleep(1)
imshow("temp", frame2)
time.sleep(1)
EDIT
Display images in two "windows" using imshow("img1", ...) and imshow("img2", ...)
import os
import cv2
import imutils
import time
folder = r'/content/drive/images/'
VALID_FORMAT = (".JPG", ".JPEG", ".PNG")
for number, filename in enumerate(os.listdir(folder)):
if filename.upper().endswith(VALID_FORMAT):
path = os.path.join(folder, filename)
image = cv2.imread(path)
frame = imutils.resize(image, width=400)
number = number % 2
imshow(f"img{number}", frame)
time.sleep(1)
Hi i've a script to run image process on a image. But i'm trying to get a loop or another way to read multiple images from a file
e.g
C:\Users\student\Desktop\Don\program (opencv version)\Images\move1
move1 contains images named as frame1.jpg , frame2.jpg , frame3.jpg...
The script i'm using to run the image process is something like
img = cv2.imread('frame1.jpg')
mimg = cv2.medianBlur(img,15)
gimg = cv2.cvtColor(mimg,cv2.COLOR_RGB2GRAY)
ret,th1 = cv2.threshold(gimg, 160,255,cv2.THRESH_BINARY)
ret,th2 = cv2.threshold(th1, 160,255,cv2.THRESH_BINARY_INV)
cv2.imwrite('threshbinaryinv.jpg', th2)
My script above could only read images that i manually keyed in e.g 'frame1.jg'. Sorry i'm very new to python. Thanks!
EDIT
This the code i edited with you guys help.. still getting error as "Traceback (most recent call last):
File "C:\Users\student\Desktop\Don\program (opencv version)\prog.py", line 32, in
gimg = cv2.cvtColor(mimg,cv2.COLOR_RGB2GRAY) #convert RBG to Grayscale
cv2.error: D:\Build\OpenCV\opencv-3.3.1\modules\imgproc\src\color.cpp:11048: error: (-215) scn == 3 || scn == 4 in function cv::cvtColor"
CODE
path_of_images = 'C:/Users/student/Desktop/Don/program (opencv version)/Images'
list_of_images = os.listdir(path_of_images)
for image in list_of_images:
img = cv2.imread(os.path.join(path_of_images, image))
mimg = cv2.medianBlur(img,15)
gimg = cv2.cvtColor(mimg,cv2.COLOR_RGB2GRAY)
ret,th1 = cv2.threshold(gimg, 160,255,cv2.THRESH_BINARY)
ret,th2 = cv2.threshold(th1, 160,255,cv2.THRESH_BINARY_INV)
cv2.imwrite('threshbinaryinv.jpg', th2)
You can use os.listdir() to get the names of all images in your specified path which is "C:\Users\student\Desktop\Don\program (opencv version)\Images". Then you can loop over the names of images like :
import os
import cv2
path_of_images = r"C:\Users\student\Desktop\Don\program (opencv version)\Images"
list_of_images = os.listdir(path_of_images)
for image in list_of_images:
img = cv2.imread(os.path.join(path_of_images, image))
"""Your code here"""
It can be done using a for loop and generating a new str file name and then processing it as:
IMG_FOLDER_PREFIX = "absolute/path/to/frame"
IMG_EXTENSION = ".jpg"
NUM_IMAGES = 10
for i in xrange(NUM_IMAGES):
image_path = IMG_FOLDER_PREFIX + str(i) + IMG_EXTENSION
img = cv2.imread(image_path)
# Other Image Processing.
A better way to iterate images would be os.listdir, glob, etc. but in that case you may have lesser control over the order of files traversed.
I'm trying to convert an UploadedFile to a PIL Image object to thumbnail it, and then convert the PIL Image object that my thumbnail function returns back into a File object. How can I do this?
The way to do this without having to write back to the filesystem, and then bring the file back into memory via an open call, is to make use of StringIO and Django InMemoryUploadedFile. Here is a quick sample on how you might do this. This assumes that you already have a thumbnailed image named 'thumb':
import StringIO
from django.core.files.uploadedfile import InMemoryUploadedFile
# Create a file-like object to write thumb data (thumb data previously created
# using PIL, and stored in variable 'thumb')
thumb_io = StringIO.StringIO()
thumb.save(thumb_io, format='JPEG')
# Create a new Django file-like object to be used in models as ImageField using
# InMemoryUploadedFile. If you look at the source in Django, a
# SimpleUploadedFile is essentially instantiated similarly to what is shown here
thumb_file = InMemoryUploadedFile(thumb_io, None, 'foo.jpg', 'image/jpeg',
thumb_io.len, None)
# Once you have a Django file-like object, you may assign it to your ImageField
# and save.
...
Let me know if you need more clarification. I have this working in my project right now, uploading to S3 using django-storages. This took me the better part of a day to properly find the solution here.
I've had to do this in a few steps, imagejpeg() in php requires a similar process. Not to say theres no way to keep things in memory, but this method gives you a file reference to both the original image and thumb (usually a good idea in case you have to go back and change your thumb size).
save the file
open it from filesystem with PIL,
save to a temp directory with PIL,
then open as a Django file for this to work.
Model:
class YourModel(Model):
img = models.ImageField(upload_to='photos')
thumb = models.ImageField(upload_to='thumbs')
Usage:
#in upload code
uploaded = request.FILES['photo']
from django.core.files.base import ContentFile
file_content = ContentFile(uploaded.read())
new_file = YourModel()
#1 - get it into the DB and file system so we know the real path
new_file.img.save(str(new_file.id) + '.jpg', file_content)
new_file.save()
from PIL import Image
import os.path
#2, open it from the location django stuck it
thumb = Image.open(new_file.img.path)
thumb.thumbnail(100, 100)
#make tmp filename based on id of the model
filename = str(new_file.id)
#3. save the thumbnail to a temp dir
temp_image = open(os.path.join('/tmp',filename), 'w')
thumb.save(temp_image, 'JPEG')
#4. read the temp file back into a File
from django.core.files import File
thumb_data = open(os.path.join('/tmp',filename), 'r')
thumb_file = File(thumb_data)
new_file.thumb.save(str(new_file.id) + '.jpg', thumb_file)
This is actual working example for python 3.5 and django 1.10
in views.py:
from io import BytesIO
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import InMemoryUploadedFile
def pill(image_io):
im = Image.open(image_io)
ltrb_border = (0, 0, 0, 10)
im_with_border = ImageOps.expand(im, border=ltrb_border, fill='white')
buffer = BytesIO()
im_with_border.save(fp=buffer, format='JPEG')
buff_val = buffer.getvalue()
return ContentFile(buff_val)
def save_img(request)
if request.POST:
new_record = AddNewRecordForm(request.POST, request.FILES)
pillow_image = pill(request.FILES['image'])
image_file = InMemoryUploadedFile(pillow_image, None, 'foo.jpg', 'image/jpeg', pillow_image.tell, None)
request.FILES['image'] = image_file # really need rewrite img in POST for success form validation
new_record.image = request.FILES['image']
new_record.save()
return redirect(...)
Putting together comments and updates for Python 3+
from io import BytesIO
from django.core.files.base import ContentFile
import requests
# Read a file in
r = request.get(image_url)
image = r.content
scr = Image.open(BytesIO(image))
# Perform an image operation like resize:
width, height = scr.size
new_width = 320
new_height = int(new_width * height / width)
img = scr.resize((new_width, new_height))
# Get the Django file object
thumb_io = BytesIO()
img.save(thumb_io, format='JPEG')
photo_smaller = ContentFile(thumb_io.getvalue())
To complete for those who, like me, want to couple it with Django's FileSystemStorage:
(What I do here is upload an image, resize it to 2 dimensions and save both files.
utils.py
def resize_and_save(file):
size = 1024, 1024
thumbnail_size = 300, 300
uploaded_file_url = getURLforFile(file, size, MEDIA_ROOT)
uploaded_thumbnail_url = getURLforFile(file, thumbnail_size, THUMBNAIL_ROOT)
return [uploaded_file_url, uploaded_thumbnail_url]
def getURLforFile(file, size, location):
img = Image.open(file)
img.thumbnail(size, Image.ANTIALIAS)
thumb_io = BytesIO()
img.save(thumb_io, format='JPEG')
thumb_file = InMemoryUploadedFile(thumb_io, None, file.name, 'image/jpeg', thumb_io.tell, None)
fs = FileSystemStorage(location=location)
filename = fs.save(file.name, thumb_file)
return fs.url(filename)
In views.py
if request.FILES:
fl, thumbnail = resize_and_save(request.FILES['avatar'])
#delete old profile picture before saving new one
try:
os.remove(BASE_DIR + user.userprofile.avatarURL)
except Exception as e:
pass
user.userprofile.avatarURL = fl
user.userprofile.thumbnailURL = thumbnail
user.userprofile.save()
Here is an app that can do that: django-smartfields
from django.db import models
from smartfields import fields
from smartfields.dependencies import FileDependency
from smartfields.processors import ImageProcessor
class ImageModel(models.Model):
image = fields.ImageField(dependencies=[
FileDependency(processor=ImageProcessor(
scale={'max_width': 150, 'max_height': 150}))
])
Make sure to pass keep_orphans=True to the field, if you want to keep old files, otherwise they are cleaned up upon replacement.
For those using django-storages/-redux to store the image file on S3, here's the path I took (the example below creates a thumbnail of an existing image):
from PIL import Image
import StringIO
from django.core.files.storage import default_storage
try:
# example 1: use a local file
image = Image.open('my_image.jpg')
# example 2: use a model's ImageField
image = Image.open(my_model_instance.image_field)
image.thumbnail((300, 200))
except IOError:
pass # handle exception
thumb_buffer = StringIO.StringIO()
image.save(thumb_buffer, format=image.format)
s3_thumb = default_storage.open('my_new_300x200_image.jpg', 'w')
s3_thumb.write(thumb_buffer.getvalue())
s3_thumb.close()