Python FTP Upload calling variable - python

I'm uploading a file to the ftp server, the actual settings for the upload are correct but it isn't uploading the correct filename, it is uploading filename as the actual name of the file instead of capture......
#!/usr/bin/python
#
# Lightweight Motion Detection using python picamera libraries
# based on code from raspberry pi forum by user utpalc
# modified by Claude Pageau for this working example
# ------------------------------------------------------------
# original code on github https://github.com/pageauc/picamera-motion
# This is sample code that can be used for further development
verbose = True
if verbose:
print "Loading python libraries ....."
else:
print "verbose output has been disabled verbose=False"
import picamera
import picamera.array
import datetime
import time
import ftplib
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from fractions import Fraction
#Constants
SECONDS2MICRO = 1000000 # Constant for converting Shutter Speed in Seconds to Microseconds
# User Customizable Settings
imageDir = "images"
imagePath = "/home/pi/pimotion/" + imageDir
imageNamePrefix = 'capture-' # Prefix for all image file names. Eg front-
imageWidth = 1980
imageHeight = 1080
imageVFlip = False # Flip image Vertically
imageHFlip = False # Flip image Horizontally
imagePreview = False
numberSequence = False
threshold = 10 # How Much pixel changes
sensitivity = 100 # How many pixels change
nightISO = 800
nightShutSpeed = 6 * SECONDS2MICRO # seconds times conversion to microseconds constant
# Advanced Settings not normally changed
testWidth = 100
testHeight = 75
def checkImagePath(imagedir):
# Find the path of this python script and set some global variables
mypath=os.path.abspath(__file__)
baseDir=mypath[0:mypath.rfind("/")+1]
baseFileName=mypath[mypath.rfind("/")+1:mypath.rfind(".")]
# Setup imagePath and create folder if it Does Not Exist.
imagePath = baseDir + imagedir # Where to save the images
# if imagePath does not exist create the folder
if not os.path.isdir(imagePath):
if verbose:
print "%s - Image Storage folder not found." % (progName)
print "%s - Creating image storage folder %s " % (progName, imagePath)
os.makedirs(imagePath)
return imagePath
def takeDayImage(imageWidth, imageHeight, filename):
if verbose:
print "takeDayImage - Working ....."
with picamera.PiCamera() as camera:
camera.resolution = (imageWidth, imageHeight)
# camera.rotation = cameraRotate #Note use imageVFlip and imageHFlip variables
if imagePreview:
camera.start_preview()
camera.vflip = imageVFlip
camera.hflip = imageHFlip
# Day Automatic Mode
camera.exposure_mode = 'auto'
camera.awb_mode = 'auto'
camera.capture(filename)
sftp = ftplib.FTP('ftpdomainname','myftpusername','myftppassword') # Connect
fp = open(filename) # file to send
sftp.storbinary('STOR filename', fp) # Send the file
fp.close() # Close file and FTP
sftp.quit()
if verbose:
print "takeDayImage - Captured %s" % (filename)
return filename
def takeNightImage(imageWidth, imageHeight, filename):
if verbose:
print "takeNightImage - Working ....."
with picamera.PiCamera() as camera:
camera.resolution = (imageWidth, imageHeight)
if imagePreview:
camera.start_preview()
camera.vflip = imageVFlip
camera.hflip = imageHFlip
# Night time low light settings have long exposure times
# Settings for Low Light Conditions
# Set a frame rate of 1/6 fps, then set shutter
# speed to 6s and ISO to approx 800 per nightISO variable
camera.framerate = Fraction(1, 6)
camera.shutter_speed = nightShutSpeed
camera.exposure_mode = 'off'
camera.iso = nightISO
# Give the camera a good long time to measure AWB
# (you may wish to use fixed AWB instead)
time.sleep(10)
camera.capture(filename)
if verbose:
print "checkNightMode - Captured %s" % (filename)
return filename
def takeMotionImage(width, height, daymode):
with picamera.PiCamera() as camera:
time.sleep(1)
camera.resolution = (width, height)
with picamera.array.PiRGBArray(camera) as stream:
if daymode:
camera.exposure_mode = 'auto'
camera.awb_mode = 'auto'
else:
# Take Low Light image
# Set a framerate of 1/6 fps, then set shutter
# speed to 6s and ISO to 800
camera.framerate = Fraction(1, 6)
camera.shutter_speed = nightShutSpeed
camera.exposure_mode = 'off'
camera.iso = nightISO
# Give the camera a good long time to measure AWB
# (you may wish to use fixed AWB instead)
time.sleep( 10 )
camera.capture(stream, format='rgb')
return stream.array
def scanIfDay(width, height, daymode):
data1 = takeMotionImage(width, height, daymode)
while not motionFound:
data2 = takeMotionImage(width, height, daymode)
pCnt = 0L;
diffCount = 0L;
for w in range(0, width):
for h in range(0, height):
# get the diff of the pixel. Conversion to int
# is required to avoid unsigned short overflow.
diff = abs(int(data1[h][w][1]) - int(data2[h][w][1]))
if diff > threshold:
diffCount += 1
if diffCount > sensitivity:
break; #break outer loop.
if diffCount > sensitivity:
motionFound = True
else:
# print "Sum of all pixels=", pxCnt
data2 = data1
return motionFound
def scanMotion(width, height, daymode):
motionFound = False
data1 = takeMotionImage(width, height, daymode)
while not motionFound:
data2 = takeMotionImage(width, height, daymode)
diffCount = 0L;
for w in range(0, width):
for h in range(0, height):
# get the diff of the pixel. Conversion to int
# is required to avoid unsigned short overflow.
diff = abs(int(data1[h][w][1]) - int(data2[h][w][1]))
if diff > threshold:
diffCount += 1
if diffCount > sensitivity:
break; #break outer loop.
if diffCount > sensitivity:
motionFound = True
else:
data2 = data1
return motionFound
def getFileName(imagePath, imageNamePrefix, currentCount):
rightNow = datetime.datetime.now()
if numberSequence :
filename = imagePath + "/" + imageNamePrefix + str(currentCount) + ".jpg"
else:
filename = "%s/%s%04d%02d%02d-%02d%02d%02d.jpg" % ( imagePath, imageNamePrefix ,rightNow.year, rightNow.month, rightNow.day, rightNow.hour, rightNow.minute, rightNow.second)
return filename
def motionDetection():
print "Scanning for Motion threshold=%i sensitivity=%i ......" % (threshold, sensitivity)
isDay = True
currentCount= 1000
while True:
if scanMotion(testWidth, testHeight, isDay):
filename = getFileName(imagePath, imageNamePrefix, currentCount)
if numberSequence:
currentCount += 1
if isDay:
takeDayImage( imageWidth, imageHeight, filename )
else:
takeNightImage( imageWidth, imageHeight, filename )
if __name__ == '__main__':
try:
motionDetection()
finally:
print ""
print "+++++++++++++++"
print "Exiting Program"
print "+++++++++++++++"
print ""

Instead of 'STOR filename', use the actual name of the file
sftp.storbinary('STOR ' + filename, fp)

Related

how to insert the data into pixels faster?

I'm currently working on a steagnographic application,
and i'm taking each pixel value and embedding data into it one by one
this sequencial processing is taking a long time to process,
the code:
import config_loader
import numpy as np
from PIL import Image
import encryption
import time
def byte2bin(bytestring):
# print("\n from byte 2 bin\n")
# print(bytestring)
bitstring = bin(int.from_bytes(bytestring, byteorder="big"))
return bitstring[2:]
def insert_data_in_pixel(raw_data, string, ptr, bits=1): # this function takes a pixel's data and then converts it to
# binary and then change the last bit to the secret
color = bin(int(raw_data))[2:]
# old = color # troubleshooting lines
color = color[:len(color) - bits]
color = color + string[ptr: ptr + bits]
# print("original-> ", old,"| |added bits ",string[ptr: ptr+bits],"| |Modified-> ", color) # troubleshooting lines
return np.uint8(int(color, 2))
def insert_length(length, new_img): # inserts length of our secret and the length itself is obfuscated
secret_string_len = '<l>' + str(int(length / 4) + 16) + '<l>' # Added ambiguity
secret_string_len = ''.join(format(_, '08b') for _ in bytearray(str(secret_string_len), encoding='utf-8'))
length = len(secret_string_len)
str_len_ptr = 0
for y in range(length):
x = 0
if str_len_ptr < length:
new_img[x][y][0] = insert_data_in_pixel(new_img[x][y][0], secret_string_len, str_len_ptr, bits=3)
str_len_ptr += 3
if str_len_ptr == length:
break
new_img[x][y][1] = insert_data_in_pixel(new_img[x][y][1], secret_string_len, str_len_ptr, bits=3)
str_len_ptr += 3
if str_len_ptr == length:
break
new_img[x][y][2] = insert_data_in_pixel(new_img[x][y][2], secret_string_len, str_len_ptr, bits=2)
str_len_ptr += 2
if str_len_ptr == length:
break
def secret_Loader(): # loads secret from a file
with open('Message.txt', 'r', encoding='utf-8', errors='ignore') as file:
lines = file.readlines()
message = ''.join(lines)
key = config_loader.read('''data['key']''')
# print(key)
enc_message = encryption.encrypt(message, key)
return enc_message
def insert():
start = time.time()
image_path = config_loader.read('''data['environment']['cover_image']''')
photo = Image.open(image_path).convert('RGB') # just insert the image name here
data = np.asarray(photo).copy()
width, height = photo.size
secret = byte2bin(secret_Loader())
secret_pointer = 0
lensecret = len(secret)
insert_length(lensecret, data)
insertion = time.time()
for x in range(1, height):
for y in range(width):
if lensecret > secret_pointer:
# RED
data[x][y][0] = insert_data_in_pixel(data[x][y][0], secret, secret_pointer, bits=2)
secret_pointer += 2
if lensecret == secret_pointer:
break
# Green
data[x][y][1] = insert_data_in_pixel(data[x][y][1], secret, secret_pointer, bits=2)
secret_pointer += 2
if lensecret == secret_pointer:
break
# Blue
data[x][y][2] = insert_data_in_pixel(data[x][y][2], secret, secret_pointer, bits=1)
secret_pointer += 1
if lensecret == secret_pointer:
break
print("data insertion",time.time()-insertion)
generation = time.time()
# print(data)
data = Image.fromarray(data)
print("image generation in ", time.time()-generation)
# data.show()
_ = time.time()
data = data.save(r'stg.PNG')
print("saving time ", time.time()-_)
print('Exectuted in->', time.time() - start)
if __name__ == '__main__':
insert()
the timings
encryption in 1.0841524600982666
data insertion 9.439783811569214
image generation in 0.039893388748168945
saving time 6.283206939697266
Exectuted in-> 17.11327576637268
I thought about multithreading but that is unreliable as every bit in the data is important and it's position in the sequence is also important.
P.S the data insertion time is for 10000
lines of this
this is a message to test the limit of the program let's check when it breaks and how, also i'm running out of words0
so this isn't bad but if it can be improved how can i achieve it?

How to run a script but print only the output obtained after 1 min for first time and 3 min after every time till the script runs?(Python)

So I have am working with YOLOv4 to process video frames for object detection of one class : Human and every time a Human is detected in frame it prints a line in the terminal " Number of human detected :" and gives the number of human detected in a particular frame. Now I want the code to run as it is but instead of printing the above output for every frame, it should print the output of the videoframe it processes at the first 1 min mark and there after at every 3 min mark till the video is fully processed. So for a 5 min video, i would want the statement to be printed at the following videotimestamps: 1:00, 4:00. For a 8 min video it would be: 1:00, 4:00, 7:00.... and so on. I tried using schedule module but it seems to just schedule the entire code to run after 1 min.
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import time
import cv2
from collections import OrderedDict
import numpy as np
import matplotlib.pyplot as plt
import datetime
import schedule
import time
from time import sleep
file = "test2"
input = "C:/Users/asmita.nandi/Downloads/" + file + ".mp4"
output = "C:/Users/asmita.nandi/Downloads/" + file + ".avi"
net = cv2.dnn.readNet( "C:/Users/asmita.nandi/Downloads/custom-yolov4-tiny_human-608
(1).cfg","C:/Users/asmita.nandi/Downloads/custom-yolov4-tiny-detector_human.weights")
labelsPath = "C:/Users/asmita.nandi/Downloads/human_label.txt"
def event(input,output,net,labelsPath):
LABELS = open(labelsPath).read().strip().split("\n")
np.random.seed(1)
cmap = plt.get_cmap('tab20b')
colors = [cmap(i)[:3] for i in np.linspace(0, 1, 6)]
CONF_THRESH, NMS_THRESH = 0.25, 0.25
vs = cv2.VideoCapture(input)
fp = vs.get(cv2.CAP_PROP_FPS)
writer = None
W = None
H = None
totalFrames = 0
TotalHuman = 0
while True:
frame = vs.read()
frame = frame[1] if input else frame
if input is not None and frame is None:
break
(H, W) = frame.shape[:2]
print(H,W)
if W is None or H is None:
(H, W) = frame.shape[:2]
if output is not None and writer is None:
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(output, fourcc,fp,(W,H), True)
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (608,608), swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
print(layerOutputs)
end = time.time()
boxes = []
confidences = []
classIDs = []
(H, W) = frame.shape[:2]
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
scores = detection[5:]
#print(detection)
classID = np.argmax(scores)
confidence = scores[classID]
if confidence > CONF_THRESH:
#print("Box")
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
idxs = cv2.dnn.NMSBoxes(boxes, confidences, CONF_THRESH,NMS_THRESH)
ObjectCount = {}
if len(idxs) > 0:
for i in idxs.flatten():
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
color = colors[classIDs[i]]
color = [i * 255 for i in color]
class_name = LABELS[classIDs[i]]
cv2.rectangle(frame,(x,y),(x+w,y+h),color,2)
# if class_name == "Human":
cv2.putText(frame, class_name,(x, y-10),0, 0.5,color,2)
obj, conf = LABELS[classIDs[i]], confidences[i]
if obj not in ObjectCount.keys():
ObjectCount[obj] = 1
else:
ObjectCount[obj] += 1
allvalues=[]
allvalues.append(ObjectCount[obj])
print("Number of Humans detected ", max(allvalues))
if writer is not None:
writer.write(frame)
# show the output frame
#cv2_imshow(frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# increment the total number of frames processed thus far and
# then update the FPS counter
totalFrames += 1
fps.update()
# stop the timer and display FPS information
#for (objectID, centroid) in objects.items():
#print(objectID, centroids)
fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print(totalFrames)
#print(info)
# check to see if we need to release the video writer pointer
if writer is not None:
writer.release()
# if we are not using a video file, stop the camera video stream
if not input:
vs.stop()
# otherwise, release the video file pointer
else:
vs.release()
# close any open windows
cv2.destroyAllWindows()
schedule.every(1).minutes.do(event(input,output,net,labelsPath))
while 1:
schedule.run_pending()
time.sleep(1)

Convert Video to Frames in Python - 1 FPS

I have a video that is 30 fps.
I need to extract frames from the video at 1 FPS. How is this possible in Python?
I have the below code I got from online but I am not sure if its extracting frames in 1 FPS.
Please help!
# Importing all necessary libraries
import cv2
import os
# Read the video from specified path
cam = cv2.VideoCapture("C:\\Users\\Admin\\PycharmProjects\\project_1\\openCV.mp4")
try:
# creating a folder named data
if not os.path.exists('data'):
os.makedirs('data')
# if not created then raise error
except OSError:
print ('Error: Creating directory of data')
# frame
currentframe = 0
while(True):
# reading from frame
ret,frame = cam.read()
if ret:
# if video is still left continue creating images
name = './data/frame' + str(currentframe) + '.jpg'
print ('Creating...' + name)
# writing the extracted images
cv2.imwrite(name, frame)
# increasing counter so that it will
# show how many frames are created
currentframe += 1
else:
break
# Release all space and windows once done
cam.release()
cv2.destroyAllWindows()
KPS = 1# Target Keyframes Per Second
VIDEO_PATH = "video1.avi"#"path/to/video/folder" # Change this
IMAGE_PATH = "images/"#"path/to/image/folder" # ...and this
EXTENSION = ".png"
cap = cv2.VideoCapture(VIDEO_PATH)
fps = round(cap.get(cv2.CAP_PROP_FPS))
print(fps)
# exit()
hop = round(fps / KPS)
curr_frame = 0
while(True):
ret, frame = cap.read()
ifnot ret: break
if curr_frame % hop == 0:
name = IMAGE_PATH + "_" + str(curr_frame) + EXTENSION
cv2.imwrite(name, frame)
curr_frame += 1
cap.release()
This is the code I use when I need to extract frames from videos:
# pip install opencv-python
import cv2
import numpy as np
# video.mp4 is a video of 9 seconds
filename = "video.mp4"
cap = cv2.VideoCapture(filename)
cap.set(cv2.CAP_PROP_POS_AVI_RATIO,0)
frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
videoFPS = int(cap.get(cv2.CAP_PROP_FPS))
print (f"frameCount: {frameCount}")
print (f"frameWidth: {frameWidth}")
print (f"frameHeight: {frameHeight}")
print (f"videoFPS: {videoFPS}")
buf = np.empty((
frameCount,
frameHeight,
frameWidth,
3), np.dtype('uint8'))
fc = 0
ret = True
while (fc < frameCount):
ret, buf[fc] = cap.read()
fc += 1
cap.release()
videoArray = buf
print (f"DURATION: {frameCount/videoFPS}")
You can see how to extract features of the video like frameCount, frameWidth, frameHeight, videoFPS
At the end, the duration should be the number of frames divided by the videoFPS variable.
All the frames are stored inside buf, so if you want to extract only 1 Frame iterate over buf and extract only 9 frames (increasing your video FPS each iteration).
Here's the code that I found works best.
import os
import cv2
import moviepy.editor
def getFrames(vid, output, rate=0.5, frameName='frame'):
vidcap = cv2.VideoCapture(vid)
clip = moviepy.editor.VideoFileClip(vid)
seconds = clip.duration
print('durration: ' + str(seconds))
count = 0
frame = 0
if not os.path.isdir(output):
os.mkdir(output)
success = True
while success:
vidcap.set(cv2.CAP_PROP_POS_MSEC,frame*1000)
success,image = vidcap.read()
## Stop when last frame is identified
print(frame)
if frame > seconds or not success:
break
print('extracting frame ' + frameName + '-%d.png' % count)
name = output + '/' + frameName + '-%d.png' % count # save frame as PNG file
cv2.imwrite(name, image)
frame += rate
count += 1
The value for the rate argument is 1/fps

trouble reading file in same script after print export via sys.stdout to the same file

I am utilizing an audio analysis script that outputs a bunch of print commands. After the print commands are all executed and exported to an external text file via sys.stdout in the script, I want to read the text file in the same script. However, the text file comes out empty within the script. I have tried running f.flush() and f.seek() which doesn't help. When I open the text file after the script has completed, the contents of all the print commands display as expected. It looks like the script has to complete in order for sys.stdout() to write the contents to the external file. Can I ensure that this step happens prior to the f.read() at the end of my script?
import sys
sys.path.append(".")
#print(sys.path)
import time
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
start_time = time.time()
import numba
import numpy as np
import librosa
def print_color(msg, color=32):
if sys.stdout.isatty():
print("\033[{color}m {msg} \033[0m".format(msg=msg, color=color))
else:
print(" *** {msg} ***".format(msg=msg))
# TODO: configure this via cmdline
SAMPLE_RATE = 44100 # Hz
ELF_THRESHOLD_DB = -22 #dB
#OLDBUCKET N_FFT = 16384
N_FFT = 13000
FIRST_BASS_BUCKET = 0
LAST_BASS_BUCKET = 11
LAST_ANALYSIS_BUCKET = 64
DEBUG_ENABLED = False
if len(sys.argv) < 2:
print("ElfTag: Extremely Low Frequency Audio Tagger")
print("Usage: %(cmd)s </path/to/directory>" % {"cmd": sys.argv[0]})
sys.exit(0)
#filename = sys.argv[1]
directory = sys.argv[1]
print(directory)
#filename = librosa.util.example_audio_file()
#filename = "/Volumes/SDXC128GB/ElfTag/sp/02. Luma.mp3"
def debug(msg):
if DEBUG_ENABLED:
print(msg)
files = librosa.util.find_files(directory)
print("Total Tracks: ",len(files))
queue = (len(files))
class Tee:
def write(self, *args, **kwargs):
self.out1.write(*args, **kwargs)
self.out2.write(*args, **kwargs)
def __init__(self, out1, out2):
self.out1 = out1
self.out2 = out2
def flush(self):
pass
import sys
logfile = input ("Enter Filename for Log File: ")
sys.stdout = Tee(open(logfile, "w"), sys.stdout)
for filename in files:
try:
queue = queue - 1
print(queue, "Songs Remaining")
print("Loading %(filename)s" % {"filename": filename})
y, sr = librosa.load(filename, sr=None)
duration = librosa.core.get_duration(y=y, sr=sr)
print("Detected sample rate: %(sr)d Hz, duration: %(duration)f seconds." % {"sr": sr, "duration": duration})
bin_size_hz = float(sr) / N_FFT
num_bins = N_FFT / 2 + 1
print("Using transform length of %(n_fft)d for FFT, which gives us %(num_bins)d bins at %(bin_size_hz)f Hz per bin." % {"n_fft": N_FFT, "num_bins": num_bins, "bin_size_hz": bin_size_hz})
start_hz = bin_size_hz * FIRST_BASS_BUCKET
end_hz = bin_size_hz * (LAST_BASS_BUCKET + 1)
anal_hz = bin_size_hz * (LAST_ANALYSIS_BUCKET + 1)
print("Detecting deep bass as peaks between %(start)f Hz and %(end)f Hz above %(db)d dB chosen from frequency range below %(anal)f Hz." % { "start" : start_hz, "end" : end_hz, "db" : ELF_THRESHOLD_DB, "anal" : anal_hz })
#y = librosa.core.to_mono(y)
D = librosa.stft(y, n_fft = N_FFT)
tempo, beats = librosa.beat.beat_track(y=y, sr=sr, units='frames', hop_length=512)
numBeats = beats.shape[0]
print("Estimated tempo: %(tempo)f." % {"tempo" : tempo})
print("Number of beats detected: %(beats)d." % {"beats" : numBeats})
# Split into Harmonic and Percussive layers to aid with beat detection
#H, P = librosa.decompose.hpss(D)
P = D
P = librosa.amplitude_to_db(P, ref=np.max)
totalFrames = P.shape[1]
print("Total frames: %(frames)d, about %(secPerFrame)f seconds per frame" % {"frames": totalFrames, "secPerFrame": (duration / totalFrames)})
# Select significant bass frame rows
Pbass = P[FIRST_BASS_BUCKET:(LAST_ANALYSIS_BUCKET + 1)]
firstFrame = np.argmax(Pbass.max(axis=0) > -80)
debug("firstFrame")
debug(firstFrame)
Pbass = Pbass[:, firstFrame:]
debug("Pbass")
debug(Pbass)
localmaxBass = librosa.util.localmax(Pbass)
debug("localmaxBass")
debug(localmaxBass)
maskBass = localmaxBass[FIRST_BASS_BUCKET:(LAST_BASS_BUCKET + 1)]
debug("maskBass")
debug(maskBass)
ourBass = Pbass[FIRST_BASS_BUCKET:(LAST_BASS_BUCKET + 1)]
debug("ourBass")
debug(ourBass)
filteredBass = (ourBass > ELF_THRESHOLD_DB)
debug("filteredBass")
debug(filteredBass)
peakFilteredBass = np.multiply(filteredBass, maskBass)
debug("peakFilteredBass")
debug(peakFilteredBass)
vertBassFrames = np.sum(filteredBass, axis=0)
debug("vertBassFrames")
debug(vertBassFrames)
horizBassFrames = (vertBassFrames > 0)
debug("horizBassFrames")
debug(horizBassFrames)
deepBassFrames = np.nonzero(horizBassFrames)[0]
debug("deepBassFrames")
debug(deepBassFrames.shape)
debug(deepBassFrames)
# Adjacent Deep Bass detector
shiftedHorizBassFrames = np.append(horizBassFrames[1:], [False])
andedShiftedHorizBassFrames = np.logical_and(horizBassFrames, shiftedHorizBassFrames)
adjacentHorizBassFrames = np.logical_and(andedShiftedHorizBassFrames, np.append(andedShiftedHorizBassFrames[1:], [False]))
debug("adjacentHorizBassFrames")
debug(adjacentHorizBassFrames)
# /End Adjacent Deep Bass detector
debug("beats")
debug(beats.shape)
debug(beats)
deepBassBeats = np.intersect1d(deepBassFrames, beats, assume_unique=True)
debug("deepBassBeats")
debug(deepBassBeats.shape)
debug(deepBassBeats)
numDeepBeats = deepBassBeats.shape[0]
print("Number of deep beats: %(numDeepBeats)d" % {"numDeepBeats": numDeepBeats})
deepBeatsPercentage = float(numDeepBeats) / numBeats
print("Percentage of deep beats: %(deepBeatsPercentage)f" % {"deepBeatsPercentage": deepBeatsPercentage})
numBassFrames = horizBassFrames.sum()
print("Number of frames with deep bass: %(frames)d." % {"frames": numBassFrames})
numAdjacentBassFrames = adjacentHorizBassFrames.sum()
print("Number of adjacent frames with deep bass: %(frames)d." % {"frames": numAdjacentBassFrames})
bassFramesPerBeat = float(numBassFrames) / numBeats
print("Number of deep bass frames per beat: %(bassFramesPerBeat)f" % {"bassFramesPerBeat": bassFramesPerBeat})
bassFramesPercentage = float(numBassFrames) / totalFrames
print("Percentage of deep bass frames: %(bassFramesPercentage)f" % {"bassFramesPercentage": bassFramesPercentage})
adjacentBassFramesPercentage = float(numAdjacentBassFrames) / totalFrames
print("Percentage of adjacent deep bass frames: %(bassFramesPercentage)f" % {"bassFramesPercentage": adjacentBassFramesPercentage})
#if %(bassFramesPercentage)f" % {"bassFramesPercentage": adjacentBassFramesPercentage} >= "0.30":
# print("DEEP BASS TRACK NEEDS TAGGING")
print(("--- %s seconds ---" % (time.time() - start_time)))
#sys.exit(0)
except:
continue
f= open(logfile, 'r+')
f.flush()
f.seek(0)
fh = f.read()
print(fh.rstrip())
You're not flushing the output file that you're writing to. Implement the flush method in your Tee class:
def flush(self):
self.out1.flush()
self.out2.flush()
Then use sys.stdout.flush() at the end of your loop.

Stitching large images together - Python.exe has stopped working

I am using a python script to stitch large images (10000 by 10000 pixels) together in a row. I can stitch the first six out of eight images together one at a time absolutely fine. However when I stitch more images beyond this point, I get 'Python.exe has stopped working'.
Code below:
from PIL import Image
import getopt, sys
args = sys.argv
ImageFileList = []
MasterWidth = 0
MasterHeight = 0
filename = ""
row = True
print """
Usage: python imageconnector.py [OPTIONS] [FILENAME] [FILE1] [FILE2] ...[FILE...]...
Combines [FILE1,2,...] into one file called [FILENAME]
OPTIONS:
-o <r/c> Stitch images into a row or a column. Default is row.
-c <colour> Change background fill colour. Default is black.
"""
def main(argv):
global args, MasterWidth, MasterHeight, ImageFileList, filename, deletename
try:
opts, args_files = getopt.getopt(argv, 'o:c:')
except getopt.GetoptError:
print "Illegal arguments!"
sys.exit(-1)
if '-o' in args:
index = args.index('-o')
cr = args[index + 1]
if cr == 'r':
row = True
elif cr == 'c':
row = False
else:
row = True
if '-c' in args:
index = args.index('-c')
colour = args[index + 1]
else:
colour = 'black'
filename = args_files.pop(0)
print('Combining the following images:')
if row:
for x in args_files:
try:
im = Image.open(x)
print(x)
MasterWidth += im.size[0]
if im.size[1] > MasterHeight:
MasterHeight = im.size[1]
else:
MasterHeight = MasterHeight
ImageFileList.append(x)
except:
raise
final_image = Image.new("RGB", (MasterWidth, MasterHeight), colour)
offset = 0
for x in ImageFileList:
temp_image = Image.open(x)
final_image.paste(temp_image, (offset, 0))
offset += temp_image.size[0]
final_image.save(filename)
else:
for x in args_files:
try:
im = Image.open(x)
print(x)
MasterHeight += im.size[1]
if im.size[0] > MasterWidth:
MasterWidth = im.size[0]
else:
MasterWidth = MasterWidth
ImageFileList.append(x)
except:
raise
final_image = Image.new("RGB", (MasterWidth, MasterHeight), colour)
offset = 0
for x in ImageFileList:
temp_image = Image.open(x)
final_image.paste(temp_image, (0, offset))
offset += temp_image.size[1]
final_image.save(filename)
if __name__ == "__main__":
try:
main(sys.argv[1:])
except IOError:
print 'One of more of the input image files is not valid.'
sys.exit(-1)
except SystemExit:
pass
except ValueError:
print 'Not a valid colour value.'
The answer was that Python Image Library (PIL) was 32 bit. I used the updated Pillow library (http://www.lfd.uci.edu/~gohlke/pythonlibs/thb9cnq7/Pillow-2.5.3.win-amd64-py2.7.exe) and it works perfectly. Uses a LOT of memory to stitch 60k pixel images though!

Categories

Resources