I'm trying to write some Python code that uses libvirt-python to manage external snapshots with the KVM APIs.
I'm not sure of what to do in order to delete (aka merge) a certain snapshot. Let's say we have mainly 2 case in the following scenario: base <- snap_a <- snap-b <- top (as shown in this webpage).
I want to merge snap_a and snap_b (b is the active one). The result should have base as backing file in the snapshot chain.
I want to merge base and snap_a (none of them is active). The result should keep snap_b as active snapshot, which has base as backing file.
In the first case I lost my base backing file, while in the second one i got this error: Failed to merge snapshot: invalid argument: active commit requested but '/var/lib/nova/instances/b9c9cd3b-1102-4084-a7a9-6e85c179ac9c/disk.snap_system_1610446663' is not active (snap_system_numbers here is like snap_a in my example).
This is my merging function:
def snapshot_merge(instance_name: str, snapshot_name: str):
# Getting the connection to qemu
conn, dom = open_qemu_connection(instance_name)
if conn is not None and dom is not None:
logging.info(
'Merging domain {} from snapshot {}'
.format(instance_name, snapshot_name))
# Merge snapshot (Block commit, first phase)
# Get the vda path from my custom function
disk_path = get_vda_path(dom)
top = disk_path+'.'+snapshot_name
disk = 'vda'
bandwith = 0
# Check if snapshot in input is the active/current one. Also setting flags
snapshot = dom.snapshotCurrent()
if snapshot.getName() == snapshot_name:
bc_flags = (libvirt.VIR_DOMAIN_BLOCK_COMMIT_ACTIVE
+ libvirt.VIR_DOMAIN_BLOCK_COMMIT_SHALLOW)
else:
bc_flags = 0
try:
dom.blockCommit(disk,
None,
top,
bandwith,
bc_flags)
logging.info('Snapshot merged')
except libvirt.libvirtError as e:
logging.info('Failed to merge snapshot: %s' % e)
close_qemu_connection(conn)
raise HTTPException(status_code=500, detail=ERR_MERGE)
# Merge snapshot (Pivoting, second phase)
piv_flags = libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT
# Wait for ready state
time.sleep(5)
try:
dom.blockJobAbort(disk, piv_flags)
logging.info('Snapshot pivoted')
snapshot_delete(dom, snapshot_name, top)
return {"snapshot_name": snapshot_name}
except libvirt.libvirtError as e:
logging.info('Failed to pivot snapshot: %s' % e)
# TODO snapshot_delete(dom, snapshot_name, top)
raise HTTPException(status_code=500, detail=ERR_PIVOT)
finally:
close_qemu_connection(conn)
else:
logging.info('Process failed')
raise HTTPException(status_code=500, detail=ERR_CONN)
Related
Everytime I create a new instance on my ontology, something goes wrong If I try to read from the same database again.
ps - these are all part of different views on Django
This is how I am adding instances to my ontology:
# OWLREADY2
try:
myworld = World(filename='backup.db', exclusive=False)
kiposcrum = myworld.get_ontology(os.path.dirname(__file__) + '/kipo.owl').load()
except:
print("Error opening ontology")
# Sync
#--------------------------------------------------------------------------
sync_reasoner()
seed = str(time.time())
id_unico = faz_id(seed)
try:
with kiposcrum:
# here I am creating my instance, these are all strings I got from the user
kiposcrum[input_classe](input_nome + id_unico)
if input_observacao != "":
kiposcrum[input_nome + id_unico].Observacao.append(input_observacao)
sync_reasoner()
status = "OK!"
myworld.close()
myworld.save()
except:
print("Mistakes were made!")
status = "Error!"
input_nome = "Mistakes were made!"
input_classe = "Mistakes were made!"
finally:
print(input_nome + " " + id_unico)
print(input_classe)
print(status)
This is how I am reading stuff from It:
# OWLREADY2
try:
myworld = World(filename='backup.db', exclusive=False)
kiposcrum = myworld.get_ontology(os.path.dirname(__file__) + '/kipo_fialho.owl').load()
except:
print("Error")
sync_reasoner()
try:
with kiposcrum:
num_inst = 0
# gets a list of properties given an instance informed by the user
propriedades = kiposcrum[instancia].get_properties()
num_prop = len(propriedades)
myworld.close()
I am 100% able to read from my ontology, but If I try to create an instance and then try to read the database again, something goes wrong.
I'm trying to delete certain registry keys, via python script.
i have no problems reading and deleting keys from the "HKEY_CURRENT_USER", but trying to do the same from the "HKEY_LOCAL_MACHINE", gives me the dreaded WindowsError: [Error 5] Access is denied.
i'm running the script via the IDLE IDE, with admin privileges.
here's the code:
from _winreg import *
ConnectRegistry(None,HKEY_LOCAL_MACHINE)
OpenKey(HKEY_LOCAL_MACHINE,r'software\wow6432node\App',0,KEY_ALL_ACCESS)
DeleteKey(OpenKey(HKEY_LOCAL_MACHINE,r'software\wow6432node'),'App')
You need to remove all subkeys before you can delete the key.
def deleteSubkey(key0, key1, key2=""):
import _winreg
if key2=="":
currentkey = key1
else:
currentkey = key1+ "\\" +key2
open_key = _winreg.OpenKey(key0, currentkey ,0,_winreg.KEY_ALL_ACCESS)
infokey = _winreg.QueryInfoKey(open_key)
for x in range(0, infokey[0]):
#NOTE:: This code is to delete the key and all subkeys.
# If you just want to walk through them, then
# you should pass x to EnumKey. subkey = _winreg.EnumKey(open_key, x)
# Deleting the subkey will change the SubKey count used by EnumKey.
# We must always pass 0 to EnumKey so we
# always get back the new first SubKey.
subkey = _winreg.EnumKey(open_key, 0)
try:
_winreg.DeleteKey(open_key, subkey)
print "Removed %s\\%s " % ( currentkey, subkey)
except:
deleteSubkey( key0, currentkey, subkey )
# no extra delete here since each call
#to deleteSubkey will try to delete itself when its empty.
_winreg.DeleteKey(open_key,"")
open_key.Close()
print "Removed %s" % (currentkey)
return
Here is an how you run it:
deleteSubkey(_winreg.HKEY_CURRENT_USER, "software\\wow6432node", "App")
deleteSubkey(_winreg.HKEY_CURRENT_USER, "software\\wow6432node\\App")
Just my two cents on the topic, but I recurse to the lowest subkey and delete on unravel:
def delete_sub_key(root, sub):
try:
open_key = winreg.OpenKey(root, sub, 0, winreg.KEY_ALL_ACCESS)
num, _, _ = winreg.QueryInfoKey(open_key)
for i in range(num):
child = winreg.EnumKey(open_key, 0)
delete_sub_key(open_key, child)
try:
winreg.DeleteKey(open_key, '')
except Exception:
# log deletion failure
finally:
winreg.CloseKey(open_key)
except Exception:
# log opening/closure failure
The difference between the other posts is that I do not try to delete if num is >0 because it will fail implicitly (as stated in the docs). So I don't waste time to try if there are subkeys.
[EDIT]
I have created a pip package that handles registry keys.
Install with: pip install windows_tools.registry
Usage:
from windows_tools.registry import delete_sub_key, KEY_WOW64_32KEY, KEY_WOW64_64KEY
keys = ['SOFTWARE\MyInstalledApp', 'SOFTWARE\SomeKey\SomeOtherKey']
for key in keys:
delete_sub_key(key, arch=KEY_WOW64_32KEY | KEY_WOW64_64KEY)
[/EDIT]
Unburying this old question, here's an updated version of ChrisHiebert's recursive function that:
Handles Python 3 (tested with Python 3.7.1)
Handles multiple registry architectures (eg Wow64 for Python 32 on Windows 64)
Is PEP-8 compliant
The following example shows function usage to delete two keys in all registry architectures (standard and redirected WOW6432Node) by using architecture key masks.
Hopefully this will help someone:
import winreg
def delete_sub_key(key0, current_key, arch_key=0):
open_key = winreg.OpenKey(key0, current_key, 0, winreg.KEY_ALL_ACCESS | arch_key)
info_key = winreg.QueryInfoKey(open_key)
for x in range(0, info_key[0]):
# NOTE:: This code is to delete the key and all sub_keys.
# If you just want to walk through them, then
# you should pass x to EnumKey. sub_key = winreg.EnumKey(open_key, x)
# Deleting the sub_key will change the sub_key count used by EnumKey.
# We must always pass 0 to EnumKey so we
# always get back the new first sub_key.
sub_key = winreg.EnumKey(open_key, 0)
try:
winreg.DeleteKey(open_key, sub_key)
print("Removed %s\\%s " % (current_key, sub_key))
except OSError:
delete_sub_key(key0, "\\".join([current_key,sub_key]), arch_key)
# No extra delete here since each call
# to delete_sub_key will try to delete itself when its empty.
winreg.DeleteKey(open_key, "")
open_key.Close()
print("Removed %s" % current_key)
return
# Allows to specify if operating in redirected 32 bit mode or 64 bit, set arch_keys to 0 to disable
arch_keys = [winreg.KEY_WOW64_32KEY, winreg.KEY_WOW64_64KEY]
# Base key
root = winreg.HKEY_LOCAL_MACHINE
# List of keys to delete
keys = ['SOFTWARE\MyInstalledApp', 'SOFTWARE\SomeKey\SomeOtherKey']
for key in keys:
for arch_key in arch_keys:
try:
delete_sub_key(root, key, arch_key)
except OSError as e:
print(e)
Figured it out!
turns out the registry key wasn't empty and contained multiple subkeys.
i had to enumerate and delete the subkeys first, and only then i was able to delete the main key from HKLM.
(also added "try...except", so it wouldn't break the whole code, it case there were problems).
This is my solution. I like to use with statements in order to not have to close the key manually. First I check for sub keys and delete them before I delete the key itself. EnumKey raises an OSError if no sub key exists. I use this to break out of the loop.
from winreg import *
def delete_key(key: Union[HKEYType, int], sub_key_name: str):
with OpenKey(key, sub_key_name) as sub_key:
while True:
try:
sub_sub_key_name = EnumKey(sub_key, 0)
delete_key(sub_key, sub_sub_key_name)
except OSError:
break
DeleteKey(key, sub_key_name)
I am a newbie to concurrent. I am trying to refresh 3 workbooks in Tableau:
workbook_dict = {"workbook1":"workbook_id1","workbook2":"workbook_id2","workbook3":"workbook_id3"}
#retry(tries=3, delay=5, backoff=0.2)
def refresh_workbooks(self, workbook_dict):
for workbook_name, workbook_id in workbook_dict.items():
workbook = self.server.workbooks.get_by_id(workbook_id)
#refresh will fire up the refresh, and return job_id object
job_id = self.server.workbooks.refresh(workbook)
#wait_for job will check the status and raise exception if fails or timeout
#https://tableau.github.io/server-client-python/docs/api-ref#jobswait_for_job
self.server.jobs.wait_for_job(job_id,timeout=1000)
This base code totally works and each workbook takes 15mins, so totally 45min to complete; And if it fails on the second workbook, it will start over from scratch.
I want to use concurrent to speed up and check wait_for_job. And if any fails then refresh only that workbook, retry a few times before throwing errors.
First question: The below code is my attempt to try concurrent but the printed data object returns None.
I think the code failed to execute the wait_for job
Why is that?
import concurrent.futures
import urllib.request
import tableauserverclient as TSC
def refresh_workbook(self, workbook_id):
workbook = self.server.workbooks.get_by_id(workbook_id)
job_id = self.server.workbooks.refresh(workbook)
return job_id
def refresh_workbooks(self, workbook_dict):
job_dict = {}
try:
for workbook_name, workbook_id in workbook_dict.items():
workbook = self.server.workbooks.get_by_id(workbook_id)
job_id = self.server.workbooks.refresh(workbook)
job_dict[workbook_name] = job_id.id
print(job_dict)
except:
raise
return job_dict
def wait_workbook(self, job_id, timeout=None):
self.server.jobs.wait_for_job(job_id, timeout=timeout)
test = TableauServerConnection()
workbook_dict = {"workbook1":"workbook_id1","workbook2":"workbook_id2","workbook3":"workbook_id3"}
jobs = test.refresh_workbooks(workbook_dict)
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future_to_job = {executor.submit(test.wait_workbook, job_id, 1800): job_id for workbook_name, job_id in jobs.items()}
for future in concurrent.futures.as_completed(future_to_job):
job = future_to_job[future]
try:
data = future.result()
#Why did I get None in data?
print('data',data)
except Exception as exc:
#can I spin up new future here and what is the correct syntax?
print('%s generated an exception: %s' % (job, exc))
else:
print('job', job)
Secondly, if I add retry in exception, could I add new future object there?
You migth want to try something like this,
I'm using a while loop and getting the lastest result.
Then for each finished task I can now add some new task.
In your case only when a task fails.
But this allow you to to also not submit all the task at the begining. Depending on the number of task you want to run, it might be better to add X task at the begining (Where X > max_workers) and then each time a task finishes you add one more.
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future_to_job = {executor.submit(test.wait_workbook, job_id, 1800): job_id for workbook_name, job_id in jobs.items()}
while future_to_job:
done, _ = concurrent.futures.wait(
future_to_job, timeout=0.25,
return_when=concurrent.futures.FIRST_COMPLETED)
for future in done:
try:
data = future.result()
#Why did I get None in data?
print('data',data)
except Exception as exc:
#can I spin up new future here and what is the correct syntax?
print('%s generated an exception: %s' % (job, exc))
# get the jobs id
job_id = future_to_job[future]
# clean up the old task
del future_to_job[future]
# add the wen one
future_to_job[executor.submit(test.wait_workbook, job_id, 1800)] = job_id
else:
print('job', job)
For the None part, your wait_workbook function should return the result
def wait_workbook(self, job_id, timeout=None):
return self.server.jobs.wait_for_job(job_id, timeout=timeout)
hope it helps :)
I have a class for multiprocessing in Python which creates 3 different processes. First process is for checking if there is any signal from my hardware and pushing it into a Queue, second process is for getting the data out of the Queue and pushing it into a database and the third processes is for getting the data out of the database and pushing it on a server.
obj = QE()
stdFunct = standardFunctions()
watchDogProcess = multiprocessing.Process(target=obj.watchDog)
watchDogProcess.start()
pushToDBSProcess = multiprocessing.Process(target=obj.pushToDBS)
pushToDBSProcess.start()
pushToCloud = multiprocessing.Process(target=stdFunct.uploadCycleTime)
pushToCloud.start()
watchDogProcess.join()
pushToDBSProcess.join()
pushToCloud.join()
My first two processes are running perfectly as desired, however I am struggling with the third process. The following is the code of my third process :
def uploadCycleTime(self):
while True:
uploadCycles = []
lastUpPointer = "SELECT id FROM lastUploaded"
lastUpPointer = self.dbFetchone(lastUpPointer)
lastUpPointer = lastUpPointer[0]
# print("lastUploaded :"+str(lastUpPointer))
cyclesToUploadSQL = "SELECT id,machineId,startDateTime,endDateTime,type FROM cycletimes WHERE id > "+str(lastUpPointer)
cyclesToUpload = self.dbfetchMany(cyclesToUploadSQL,15)
cyclesUploadLength = len(cyclesToUpload)
if(cyclesUploadLength>0):
for cycles in cyclesToUpload:
uploadCycles.append({"dataId":cycles[0],"machineId":cycles[1],"startDateTime":cycles[2].strftime('%Y-%m-%d %H:%M:%S.%f'),"endDateTime":cycles[3].strftime('%Y-%m-%d %H:%M:%S.%f'),"type":cycles[4]})
# print("length : "+str(cyclesUploadLength))
lastUpPointer = uploadCycles[cyclesUploadLength-1]["dataId"]
uploadCycles = json.dumps(uploadCycles)
api = self.dalUrl+"/cycle-times"
uploadResponse = self.callPostAPI(api,str(uploadCycles))
print(lastUpPointer)
changePointerSQL = "UPDATE lastUploaded SET id="+str(lastUpPointer)
try:
changePointerSQL = self.dbAbstraction(changePointerSQL)
except Exception as errorPointer:
print("Pointer change Error : "+str(errorPointer))
time.sleep(2)
Now I am saving a pointer to remember the last id uploaded, and from there on keep uploading 15 packets. When there is data existing in the DB the code works well, however if there is no existing when the process is initiated and data is sent afterwards then it fails to fetch the data from the DB.
I tried printing the length in realtime, it keeps giving me 0, inspite of data being continuously pushed into the DB in real-time.
In my upload process, I missed out on a commit()
def dbFetchAll(self,dataString):
# dataToPush = self.cycletimeQueue.get()
# print(dataToPush)
dbTry = 1
try:
while(dbTry == 1): # This while is to ensure the data has been pushed
sql = dataString
self.conn.execute(sql)
response = self.conn.fetchall()
dbTry = 0
return response
# print(self.conn.rowcount, "record inserted.")
except Exception as error:
print ("Error : "+str(error))
return dbTry
***finally:
self.mydb.commit()***
I am getting this error:
try:
newthing1 = Thing1()
newthing1.thing1 = data['thing1']
newthing1.save()
except (SystemExit, KeyboardInterrupt):
raise
except Exception, e:
clientnamedb_logger.error('Exception:', exc_info=True)
clientnamedb_logger.debug('create account - thing1 record created, thing1 id:%s' % newthing1.id)
#
# create an instance of Thing2 and save the thing2 record
#
try:
newthing2 = Thing2()
newthing2.target_id = target_id
newthing2.thing2_id = user_id
#newthing2.thing1 = data['datasharing_thing1']
newthing2.thing1 = [newthing1.id]
newthing2.save()
except (SystemExit, KeyboardInterrupt):
raise
except Exception, e:
clientnamedb_logger.error('Exception:', exc_info=True)
Now, I know that it is because newthing2 doesn't exist yet when trying to save the many to many relationship. I've tried executing a save right before assigning the list of newthing1.id, but that gives a null field constraint (I could turn that off, but I think that would give me two rows in my database, which I do not want).
How do I save a row with the data for newthing1.id on the newthing2.thing1 field, which is ManyToMany? Is there a way to do this without saving it twice? Will that create a duplicate row?
Try to save without committing, that way you get the ID before you commit, like so:
newthing1.save(commit=False)
After that you can use the id, but you will have to save again after thing 2 is saved:
newthing2.thing1 = [newthing1.id]
newthing2.save()
newthing1.save()