I have written a script on python for unity applications (on Windows) to parse through logs. It just opens .log files, goes through it and creates new ones. I need to launch it after unity application is closed. There is the easiest way to make it work just by launching it manually after you've closed the Unity, but is there any way to automate that process and make the script start after Unity process is terminated? And if so - how? It's a simple console application that doesn't require any input, all that is needed is just to run it.
import glob
import os
import time
import re
import getpass
user = getpass.getuser()
def exception_search(full_line): #Parsing function, that will filter original log file
nonfiltered = full_line.split('\n') #Spliting file contents to have it as list separated by each line from file
result = ["List of exceptions:"] #first line
i = 0 #index which will be used in for loop to iterate through string list
previous_exc = [] #Will contain previous found exception to avoid printing the same one
current_exc = [] #Will contain current exception to compare it
repeat = 0 #repeating count that will be used to type how many times exception was repeated in a row
for i in range(len(nonfiltered) - 1): #going through a range of array elements
if "Exception:" in nonfiltered[i]: #Checking if we found an exception line
#checking if element on top is not another exception, so, we would need to print it for context
if (not "(Filename: <" in nonfiltered[i - 2]) and (not "(Filename: currently not" in nonfiltered[i - 2]):
result.append(nonfiltered[i - 3])
result.append(nonfiltered[i - 2])
result.append(nonfiltered[i - 1])
previous_exc = [] #Zeroing previous exception remembered because after printing nonexception we need to reset it
while (not "(Filename: <" in nonfiltered[i]) and (not "(Filename: currently not" in nonfiltered[i]):
current_exc.append(nonfiltered[i]) #Filling current exception list with exception lines until we reach the last line of it
i += 1 #incrementing index because we are using non-for incrementation to go through elements
current_exc.append(nonfiltered[i]) #adding the last line and separator for visibility
current_exc.append("")
if previous_exc != current_exc: #checking if exception was the same as the last one, if it is - we don't need to print it but increment the count
result.extend(current_exc) #Not the same so we add the exception to the resulting list
previous_exc = current_exc[:] #Putting it as the previous exception in case it will be the same later
if repeat != 0: #If count wasn't 0, so, we got exception repeated and we inform about it the reader
result.append("It was repeated " + str(repeat) + " times")
result.append("")
repeat = 0 #zeroing the counter after using it
current_exc = [] #zeroing the current exception for the next use
else: #exception is repeatig, so, we just summ up the count
repeat += 1
current_exc = []
result = '\n'.join(result) #Turning the list back into string with \n as separator
return result
logpath = "C:\\Users\\" + user +"\\AppData\\LocalLow" #Path where all logs are contained
text_files = glob.glob(logpath + "/**/Playe*.log", recursive = True) #Making a list of Player.log/Player-prev.log named files
for path in text_files:
projname = re.match(r"(.+\\)(.+)(\\Player\-prev.log)", path) #Extracting the name of the project from the folder inside which log files are content
if projname is None:
projname = re.match(r"(.+\\)(.+)(\\Player.log)", path).group(2)
else:
projname = projname.group(2)
#getting the last modified time of the log file
lastmod = time.asctime(time.localtime(os.path.getmtime(path))).replace(":", "h ", 1).replace(":", "m ")
filecontent = ""
with open(path,'r', encoding="utf-8") as file: #copying the contents of the original log file
filecontent = file.read()
newpath = 'C:\\Users\\' + user + '\\Desktop\\logs'
if not os.path.isdir(newpath): #creating a directory for logs if it's not around
os.mkdir(newpath)
newpath = newpath + '\\'
if not os.path.isdir(newpath + projname): #creating a directory for project if it's not around
os.mkdir(newpath + projname)
#getting a filepath for a log to check if it's already here
filepath = os.path.join(newpath + projname, "full " + projname + " " + lastmod + ".log")
if not os.path.isfile(filepath): #checking if log is not copied, if that's so, we are copying it by creating a new file
with open(filepath,'x', encoding="utf-8") as file:
temp = file.write(filecontent)
filepath = filepath.replace("full ", "Exception ")
filecontent = exception_search(filecontent) #parsing the contents of log file to get only neccesary exception lines
if (not os.path.isfile(filepath)) and (len(filecontent) > 19): #checking if file is not created and there are exception which needed to be mentioned
with open(filepath,'x', encoding="utf-8") as file:
temp = file.write(filecontent)
As usual also Unity when run via Command Line only returns once it has exited.
So you can simply have a wrapper script like e.g. in batch (or whatever language you use)
C:\Path\To\Your\UnityInstall\Unity.exe -batchmode -username "name#example.edu.uk" -password "XXXXXXXXXXXXX" -serial "E3-XXXX-XXXX-XXXX-XXXX-XXXX" –quit -projectPath "C:\Path\To\Your\UnityProjectFolder" -logFile "C:\Path\ToWrite\The\Log\To"
C:\Path\To\Your\PythonInstall\Pyhton.exe C:\Path\To\Your\Script.py
You could also directly run your stuff from within Unity itself.
But then of course you would need to include that script into every project you want to use this way ;)
But yes, you can use [InitializeOnLoadMethod] and Application.quitting
public class test
{
[InitializeOnLoadMethod]
private static void Init()
{
Application.quitting += OnQuit;
}
private static void OnQuit()
{
var p = new Process
{
StartInfo = new ProcessStartInfo
{
FileName = #"C:\PATH\TO\YOUR\PYTHONINSTALL\PYTHYON.exe",
WorkingDirectory = #"C:\PATH\TO\YOUR\PYTHONINSTALL",
Arguments = "C:\PATH\TO\YOUR\SCRIPT.py",
UseShellExecute = false,
RedirectStandardOutput = true,
CreateNoWindow = false,
WindowStyle = ProcessWindowStyle.Normal
}
};
p.Start();
}
}
Related
The goal is to create a naming system for duplicate strings.
If the name is hotdog.jpg and I want to make a duplicate and that the next string is hotdog_1.jpg. And so on. But the problem I'm facing is that if you make a duplicate of hotdog_1.jpg we get hotdog_1_1.jpg. I tried to just check if the string ends with "(underscore) + number". But then the problem is that the string could also have "(underscore) + number + number". like hotdog_13.jpg.
is there any good way to implement this?
for current_image_name in self.images_names:
extension = os.path.splitext(current_image_name)[1]
current_image_name = os.path.splitext(current_image_name)[0]
if current_image_name[-2] == '_' and current_image_name[-1].isdigit():
self.images_names.insert(self.current_index + 1, current_image_name[:-1] + str(int(self.images_names[self.current_index][-1]) + 1) + extension)
name_change = True
if not name_change:
self.images_names.insert(self.current_index + 1, self.images_names[self.current_index] + '_1')
You can do this with a simple method that does some string magic.
EDITED after reading the comments on the question. Added a method to handle lists
code
def inc_filename(filename: str) -> str:
if "." in filename:
# set extension
extension = f""".{filename.split(".")[-1]}"""
# remove extension from filename
filename = ".".join(filename.split(".")[:-1])
else:
# set extension to empty if not included
extension = ""
try:
# try to set the number
# it will throw a ValueError if it doesn't have _1
number = int(filename.split("_")[-1]) +1
newfilename = "_".join(filename.split("_")[:-1])
except ValueError:
# catch the ValueError and set the number to 1
number = 1
newfilename = "_".join(filename.split("_"))
return f"{newfilename}_{number}{extension}"
def inc_filelist(filelist: list) -> list:
result = []
for filename in filelist:
filename = inc_filename(filename)
while filename in filelist or filename in result:
filename = inc_filename(filename)
result.append(filename)
return result
print(inc_filename("hotdog_1"))
print(inc_filename("hotdog"))
print(inc_filename("hotdog_1.jpg"))
print(inc_filename("hotdog.jpg"))
print(inc_filename("ho_t_dog_15.jpg"))
print(inc_filename("hotdog_1_91.jpg"))
filelist = [
"hotdog_1.jpg",
"hotdog_2.jpg",
"hotdog_3.jpg",
"hotdog_4.jpg"
]
print(inc_filelist(filelist))
output
hotdog_2
hotdog_1
hotdog_2.jpg
hotdog_1.jpg
ho_t_dog_16.jpg
hotdog_1_92.jpg
['hotdog_5.jpg', 'hotdog_6.jpg', 'hotdog_7.jpg', 'hotdog_8.jpg']
I am trying to make a program that writes a new file each time it runs.
For example:
I run the program once. The folder is empty so it adds a file to the folder named "Test_Number_1.txt"
I run the program for the second time. The folder has one file, so it scans it as a file, scans for another file but there is no file, so it creates a new file named "Test_Number_2.txt"
This is what I had in mind, but the code won't leave the while loop. I am still new to programming so excuse my inefficient coding haha.
memory = # something that changes each time I run the program
print(memory)
print("<-<<----<<<---------<+>--------->>>---->>->")
found_new = False
which_file = 0
while not found_new:
try:
file = open("path_to_folder/Test_Number_" + str(which_file) + ".txt", "a")
except FileNotFoundError:
which_file += 1
file_w = open("path_to_folder/Test_Number_" + str(which_file) + ".txt", "w")
found_new = True
break
print("Looked", which_file, "times.")
which_file += 1
time.sleep(1)
file = open("path_to_folder/Test_Number_" + str(which_file) + ".txt", "a")
file.write(memory)
file.close()
print("Done.")
I put the time.sleep(1) to delay the process in case of a bug so that my entire computer didn't overload and thank goodness because the program just keeps adding more and more files until I force quit it.
One simple solution
from os.path import isfile
def file_n(n):
return "Test_number_" + str(n) + ".txt"
n = 0
while isfile(file_n(n)):
n += 1
f = open( file_n(n), "w" )
f.write("data...")
f.close()
The problem is that if many instances of that same program run at the same time, some files may be overwritten.
Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 5 years ago.
Improve this question
AIM: I wanted to record all of the files on a variety of hard disk and collects the: file name, folders, and size of the file in megabytes. The code runs, to my knowledge doesn't produce any errors, but doesn't produce the csv file at the end?
What I've tried:
I've tried running the file with Sudo, changing the permissions with chmod +x, checking that python is in the same place for standard user and for sudo user, and lastly removing or commenting out troublesome lines which seem to yield different results or errors depending on OS.
import os
from os import path
import sys
import datetime
from datetime import date, time, timedelta
import time
import csv
#from socket import gethostbyname
#variables
#hostname = str(socket.gethostname())
scandir = "/"
savefiledir = "/Users/joshua/Documents/python/"
textfilename = str(datetime.datetime.now().strftime("%Y-%m-%d")) + "_" "directory_printer.csv"
#change directory to the root directory or the one which you want to scan for files (scandir)
os.getcwd()
os.chdir(scandir)
directory = os.getcwd()
#find all files in a directory and it's sub directory regardless of extension
results = [val for sublist in [[os.path.join(i[0], j) for j in i[2]] for i in os.walk(directory)] for val in sublist]
d = {}
file_count = 0
metadata = []
for file in results:
#full path
try:
fullpath = file
except:
fullpath = None
#file name
try:
file_directory = "/".join(str(file).split('/')[1:-1])
except:
file_directory = None
#file extension
try:
file_ext = str(file).split('/')[-1]
except:
file_ext = None
#subfolders
try:
parts = file_directory.split('/')
sub_folders = ":".join(parts[1:-1])
except:
sub_folders = None
#num subfolders
try:
count_subfolders = len(sub_folders.split(':'))
except:
count_subfolders = None
#filesize megabytes
try:
filesize_mb = os.path.getsize(file)/1024
except:
filesize_mb = None
#date modified
try:
date_modified = datetime.datetime.now() - datetime.datetime.fromtimestamp(path.getmtime(file))
except:
date_modified = None
#time modified
#try:
# time_modified = os.stat(fullpath).st_mtime #time of most recent content modification
#except:
# time_modified = None
#time created (windows)
# try:
# time_created = os.stat(fullpath).st_ctime #platform dependent; time of most recent metadata change on Unix, or the time of creation on Windows)# except:
# time_created = None
#record all file metadata
d[file_count] = {'Full_Path': fullpath, 'File_Directory': file_directory,
'File_Extension': file_ext, 'List_Sub_Folders' : sub_folders,
'Count_Sub_Folders' : count_subfolders, 'Filesize_mb' : filesize_mb,
'Date_Modified' : date_modified}
file_count = file_count + 1
#write the dictinary with the disks file metadata to a csv file
with open(textfilename,'w') as f:
w = csv.writer(f)
w.writerows(d.items())
print("Scanning directory: "
+ str(scandir) + " complete!" + "\n"
+ "The results have been saved to: " + "\n"
+ str(savefiledir)+str(textfilename))
As it is, it looks like your code will write the CSV file to scandir (/), not to savefiledir, because at the beginning of the program you call os.chdir(scandir). If you want to get the file at the right place (where the final printed message says it's saved to) you should do:
# ...
#write the dictinary with the disks file metadata to a csv file
with open(savefiledir + textfilename,'w') as f:
w = csv.writer(f)
w.writerows(d.items())
# ...
i have a python script that when is run from eclipse it does what i want without any errors or anything.
I want now to create a batch file, that will run my script in a loop (infinitely).
The first problem is that i when i run the bat file, i get a second cmd window that shows the logging from my python script (which shows me that it is running) but when the main process of the script starts(which can take from 1 minute to some hours) it exits within a few second without actually running all the script. I have used start wait/ but it doesn't seem to work. Here is the simple batch file i have created:
#echo off
:start
start /wait C:\Python32\python.exe C:\Users\some_user\workspace\DMS_GUI\CheckCreateAdtf\NewTest.py
goto start
So i want the bat file to run my script, wait for it to finish(even if it takes some hours) and then run it again.
I have also tried creating a bat file that calls with start wait/ the bat file shown above with no success.
Optimally i would like it to keep the window open with all the logging that i have in my script, but that is another issue that can be solved later.
def _open_read_file(self):
logging.debug("Checking txt file with OLD DB-folder sizes")
content = []
with open(self._pathToFileWithDBsize) as f:
content = f.read().splitlines()
for p in content:
name,size = (p.split(","))
self._folder_sizes_dic[name] = size
def _check_DB(self):
logging.debug("Checking current DB size")
skippaths = ['OtherData','Aa','Sss','asss','dss','dddd']
dirlist = [ item for item in os.listdir(self._pathToDBparentFolder) if os.path.isdir(os.path.join(self._pathToDBparentFolder, item)) ]
for skip in skippaths:
if skip in dirlist:
dirlist.remove(skip)
MB=1024*1024.0
for dir in dirlist:
folderPath = self._pathToDBparentFolder +"\\"+str(dir)
fso = com.Dispatch("Scripting.FileSystemObject")
folder = fso.GetFolder(folderPath)
size = str("%.5f"%(folder.Size/MB))
self._DB_folder_sizes_dic[dir] = size
def _compare_fsizes(self):
logging.debug("Comparing sizes between DB and txt file")
for (key, value) in self._DB_folder_sizes_dic.items():
if key in self._folder_sizes_dic:
if (float(self._DB_folder_sizes_dic.get(key)) - float(self._folder_sizes_dic.get(key)) < 100.0 and float(self._DB_folder_sizes_dic.get(key)) - float(self._folder_sizes_dic.get(key)) > -30.0):
pass
else:
self._changed_folders.append(key)
else:
self._changed_folders.append(key)
def _update_file_with_new_folder_sizes(self):
logging.debug("Updating txt file with new DB sizes")
file = open(self._pathToFileWithDBsize,'w')
for key,value in self._DB_folder_sizes_dic.items():
file.write(str(key)+","+str(value)+"\n")
def _create_paths_for_changed_folders(self):
logging.debug("Creating paths to parse for the changed folders")
full_changed_folder_parent_paths = []
for folder in self._changed_folders:
full_changed_folder_parent_paths.append(self._pathToDBparentFolder +"\\"+str(folder))
for p in full_changed_folder_parent_paths:
for path, dirs, files in os.walk(p):
if not dirs:
self._full_paths_to_check_for_adtfs.append(path)
def _find_dat_files_with_no_adtf(self):
logging.debug("Finding files with no adtf txt")
for path in self._full_paths_to_check_for_adtfs:
for path, dirs, files in os.walk(path):
for f in files:
if f.endswith('_AdtfInfo.txt'):
hasAdtfFilename = f.replace('_AdtfInfo.txt', '.dat')
self.hasADTFinfos.add(path + "\\" + hasAdtfFilename)
self.adtf_files = self.adtf_files + 1
elif f.endswith('.dat'):
self.dat_files = self.dat_files + 1
self._dat_file_paths.append(path + "\\" + f)
logging.debug("Checking which files have AdtfInfo.txt, This will take some time depending on the number of .dat files ")
for file in self._dat_file_paths:
if file not in self.hasADTFinfos:
self._dat_with_no_adtf.append(file)
self.files_with_no_adtf = len(self._dat_with_no_adtf)
#self.unique_paths_from_log = set(full_paths_to_check_for_adtfs)
logging.debug("Files found with no adtf " + str(self.files_with_no_adtf))
def _create_adtf_info(self):
logging.debug("Creating Adtf txt for dat files")
files_numbering = 0
for file in self._dat_with_no_adtf:
file_name = str(file)
adtf_file_name_path = file.replace('.dat','_AdtfInfo.txt')
exe_path = r"C:\Users\some_user\Desktop\some.exe "
path_to_dat_file = file_name
path_to_adtf_file = adtf_file_name_path
command_to_subprocess = exe_path + path_to_dat_file + " -d "+ path_to_adtf_file
#Call VisionAdtfInfoToCsv
subprocess.Popen(command_to_subprocess,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
process_response = subprocess.check_output(command_to_subprocess)
#if index0 in response, adtf could not be created because .dat file is probably corrupted
if "index0" in str(process_response):
self._corrupted_files_paths.append(path_to_dat_file)
self._files_corrupted = self._files_corrupted + 1
self._corrupted_file_exist_flag = True
else:
self._files_processed_successfully = self._files_processed_successfully + 1
files_numbering = files_numbering + 1
The functions are called in this order
self._open_read_file()
self._check_DB()
self._compare_fsizes()
self._create_paths_for_changed_folders()
self._find_dat_files_with_no_adtf()
self._create_adtf_info()
self._check_DB()
self._update_file_with_new_folder_sizes()
Ok it seems that the .exe in the script was returning an error and that is why it the script was finishing so fast. I thought that the bat file did not wait. I should have placed the .bat file in the .exe folder and now the whole thing runs perfect.
I'm still working on my mp3 downloader but now I'm having trouble with the files being downloaded. I have two versions of the part that's tripping me up. The first gives me a proper file but causes an error. The second gives me a file that is way too small but no error. I've tried opening the file in binary mode but that didn't help. I'm pretty new to doing any work with html so any help would be apprecitaed.
import urllib
import urllib2
def milk():
SongList = []
SongStrings = []
SongNames = []
earmilk = urllib.urlopen("http://www.earmilk.com/category/pop")
reader = earmilk.read()
#gets the position of the playlist
PlaylistPos = reader.find("var newPlaylistTracks = ")
#finds the number of songs in the playlist
NumberSongs = reader[reader.find("var newPlaylistIds = " ): PlaylistPos].count(",") + 1
initPos = PlaylistPos
#goes though the playlist and records the html address and name of the song
for song in range(0, NumberSongs):
songPos = reader[initPos:].find("http:") + initPos
namePos = reader[songPos:].find("name") + songPos
namePos += reader[namePos:].find(">")
nameEndPos = reader[namePos:].find("<") + namePos
SongStrings.append(reader[songPos: reader[songPos:].find('"') + songPos])
SongNames.append(reader[namePos + 1: nameEndPos])
initPos = nameEndPos
for correction in range(0, NumberSongs):
SongStrings[correction] = SongStrings[correction].replace('\\/', "/")
#downloading songs
fileName = ''.join([a.isalnum() and a or '_' for a in SongNames[0]])
fileName = fileName.replace("_", " ") + ".mp3"
# This version writes a file that can be played but gives an error saying: "TypeError: expected a character buffer object"
## songDL = open(fileName, "wb")
## songDL.write(urllib.urlretrieve(SongStrings[0], fileName))
# This version creates the file but it cannot be played (file size is much smaller than it should be)
## url = urllib.urlretrieve(SongStrings[0], fileName)
## url = str(url)
## songDL = open(fileName, "wb")
## songDL.write(url)
songDL.close()
earmilk.close()
Re-read the documentation for urllib.urlretrieve:
Return a tuple (filename, headers) where filename is the local file
name under which the object can be found, and headers is whatever the
info() method of the object returned by urlopen() returned (for a
remote object, possibly cached).
You appear to be expecting it to return the bytes of the file itself. The point of urlretrieve is that it handles writing to a file for you, and returns the filename it was written to (which will generally be the same thing as your second argument to the function if you provided one).