Monitor CPU temperature in python on windows11 - python

I have wrote a code to monitor ram memory usage, CPU memory usage, and CPU temperature I have used both psutil and WMI and I'm some kind of problem the code ran perfectly when I had windows 10 I updated to window 11 it is not working. I have checked python interpreter it is on 3.10
I get this my output/error message:
C:\Users\jeries\PycharmProjects\PP1\venv\Scripts\python.exe C:/Users/jeries/PycharmProjects/PP1/study.py
The CPU usage is: 47.1
RAM memory % used: 54.0
Traceback (most recent call last):
File "C:\Users\jeries\PycharmProjects\PP1\venv\lib\site-packages\wmi.py", line 880, in query
return self._namespace.query(wql, self, fields)
File "C:\Users\jeries\PycharmProjects\PP1\venv\lib\site-packages\wmi.py", line 1072, in query
return [ _wmi_object(obj, instance_of, fields) for obj in self._raw_query(wql) ]
File "C:\Users\jeries\PycharmProjects\PP1\venv\lib\site-packages\wmi.py", line 1072, in <listcomp>
return [ _wmi_object(obj, instance_of, fields) for obj in self._raw_query(wql) ]
File "C:\Users\jeries\PycharmProjects\PP1\venv\lib\site-packages\win32com\client\dynamic.py", line 324, in __getitem__
return self._get_good_object_(self._enum_.__getitem__(index))
File "C:\Users\jeries\PycharmProjects\PP1\venv\lib\site-packages\win32com\client\util.py", line 41, in __getitem__
return self.__GetIndex(index)
File "C:\Users\jeries\PycharmProjects\PP1\venv\lib\site-packages\win32com\client\util.py", line 62, in __GetIndex
result = self._oleobj_.Next(1)
pywintypes.com_error: (-2147217372, 'OLE error 0x80041024', None, None)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\jeries\PycharmProjects\PP1\study.py", line 30, in <module>
temperature_infos = w.Sensor()
File "C:\Users\jeries\PycharmProjects\PP1\venv\lib\site-packages\wmi.py", line 882, in query
handle_com_error()
File "C:\Users\jeries\PycharmProjects\PP1\venv\lib\site-packages\wmi.py", line 258, in handle_com_error
raise klass(com_error=err)
wmi.x_wmi: <x_wmi: Unexpected COM Error (-2147217372, 'OLE error 0x80041024', None, None)>
Process finished with exit code 1
I have tried this:
w = wmi.WMI(namespace="root\openHardwareMonitor")
temperature_infos = w.Sensor()
for sensor in temperature_infos:
if sensor.SensorType == u'Temperature':
print(sensor.Name)
print(sensor.Value)
not working it says that the w.Senosor() "no documentation found"
this is my current code:
import os
import psutil
import wmi
def avg(value_list):
num = 0
length = len(value_list)
for val in value_list:
num += val
return num / length
# Calling psutil.cpu_precent() after 2 seconds
print('The CPU usage is: ', psutil.cpu_percent(2))
print('RAM memory % used:', psutil.virtual_memory()[2])
# have the open hardware monitor opened
w = wmi.WMI(namespace="root\\OpenHardwareMonitor")
sensors = w.Sensor()
cpu_temps = []
gpu_temp = 0
for sensor in sensors:
if sensor.SensorType == u'Temperature' and not 'GPU' in sensor.Name:
cpu_temps += [float(sensor.Value)]
elif sensor.SensorType == u'Temperature' and 'GPU' in sensor.Name:
gpu_temp = sensor.Value
print("Avg CPU: {}".format(avg(cpu_temps)))
print("GPU: {}".format(gpu_temp))

OpenHardwareMonitor can generate logs (options/log Sensors)
The log is called OpenHardwareMonitorLog-YYYY-MM-DD.csv
The idea is therefore to launch OpenHardwareMonitor beforehand (possible to execute via scheduled task + subprocess, or in automatic execution at startup), and to retrieve the correct column in the last line of the file:
#Code
from datetime import date
while 1 == 1:
#Génère le nom du log
now = date.today()
infile = r"C:\OpenHardwareMonitor\OpenHardwareMonitorLog-" + now.strftime("%Y-%m-%d") + ".csv"
#Ouvre en lecture seule
with open(infile, "r") as f:
f = f.readlines()[-1] #Lis la dernière ligne
output = f.split(',') # Sépare via les ","
print(output[10]) # 10 = Colonne T°CPU Core #1
edit:
You will have to find your column number by looking at the log, it's 10 for me, but it must be able to change depending on your config...
I'm just starting, the script should be able to be improved by scanning the first 2 lines and determining the correct column with its name ;-)

Related

Cycle an iterator using multiprocessing in Python

I have an iterator that will retrive various number of lines from a very large (>20GB) file depend on some features. The iterator works fine, but I can only use 1 thread to process the result. I would like to feed the value from each iteration to multiple threads / processes.
I'm using a text file with 9 lines to mimic my data, here is my code. I've been struggling on how to create the feedback so when one process finished, it will go and retrive the next iteration:
from multiprocessing import Process, Manager
import time
# Iterator
class read_file(object):
def __init__(self, filePath):
self.file = open(filePath, 'r')
def __iter__(self):
return self
def __next__(self):
line = self.file.readline()
if line:
return line
else:
raise StopIteration
# worker for one process
def print_worker(a, n, stat):
print(a)
stat[n] = True # Set the finished status as True
return None
# main
def main():
file_path = 'tst_mp.txt' # the txt file wit 9 lines
n_worker = 2
file_handle = read_file(file_path)
workers = []
# Create shared list for store dereplicated dict and progress counter
manager = Manager()
status = manager.list([False] * 2) # list of dictonary for each thread
# Initiate the workers
for i in range(n_worker):
workers.append(Process(target=print_worker, args=(file_handle.__next__(), i, status,)))
for worker in workers:
worker.start()
block = file_handle.__next__() # The next block (line)
while block: # continue is there is still block left
print(status)
time.sleep(1) # for every second
for i in range(2):
if status[i]: # Worker i finished
workers[i].join()
# workers[i].close()
workers[i] = Process(target=print_worker, args=(block, i, status,))
status[i] = False # Set worker i as busy (False)
workers[i].start() # Start worker i
try: # try to get the next item in the iterator
block = file_handle.__next__()
except StopIteration:
block = False
if __name__ == '__main__':
main()
The code is clumsy, but it did print out the sequence, but also with some error when I ran the code twice:
1
2
3
4
5
6
7
8
9
Process Process-10:
Traceback (most recent call last):
File "/home/zewei/mambaforge/lib/python3.9/multiprocessing/managers.py", line 802, in _callmethod
conn = self._tls.connection
AttributeError: 'ForkAwareLocal' object has no attribute 'connection'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/zewei/mambaforge/lib/python3.9/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/home/zewei/mambaforge/lib/python3.9/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/home/zewei/share/paf_depth/test_multiprocess.py", line 31, in print_worker
stat[n] = True # Set the finished status as True
File "<string>", line 2, in __setitem__
File "/home/zewei/mambaforge/lib/python3.9/multiprocessing/managers.py", line 806, in _callmethod
self._connect()
File "/home/zewei/mambaforge/lib/python3.9/multiprocessing/managers.py", line 794, in _connect
dispatch(conn, None, 'accept_connection', (name,))
File "/home/zewei/mambaforge/lib/python3.9/multiprocessing/managers.py", line 90, in dispatch
kind, result = c.recv()
File "/home/zewei/mambaforge/lib/python3.9/multiprocessing/connection.py", line 255, in recv
buf = self._recv_bytes()
File "/home/zewei/mambaforge/lib/python3.9/multiprocessing/connection.py", line 419, in _recv_bytes
buf = self._recv(4)
File "/home/zewei/mambaforge/lib/python3.9/multiprocessing/connection.py", line 384, in _recv
chunk = read(handle, remaining)
ConnectionResetError: [Errno 104] Connection reset by peer
Here is where I'm stucked, I was wondering if there is any fix or more elegant way for this?
Thanks!
Here's a better way to do what you are doing, using pool:
from multiprocessing import Pool
import time
.
.
.
.
# worker for one process
def print_worker(a):
print(a)
return None
def main():
file_path = r'' # the txt file wit 9 lines
n_worker = 2
file_handle = read_file(file_path)
results = []
with Pool(n_worker) as pool:
for result in pool.imap(print_worker, file_handle):
results.append(result)
print(results)
if __name__ == '__main__':
main()
Here, the imap function lazily iterates over the iterator, so that the whole file won't be read into memory. Pool handles spreading the tasks across the number of processes you started (using n_worker) automatically so that you don't have to manage it yourself.

Exception: Failed to process waveform

Error:
Traceback (most recent call last):
File "c:\Programming\New_assistant\speech_to_text.py", line 18, in <module>
if rec.AcceptWaveform(data):
File "C:\Users\david\AppData\Local\Programs\Python\Python310\lib\site-packages\vosk\__init__.py", line 84, in AcceptWaveform
raise Exception("Failed to process waveform")
Exception: Failed to process waveform
PS C:\Programming\New_assistant>
I get this error when I try to use AcceptWaveform regardless of the file (wav) or the rest of the code, but the error is removed only when using vosk-model-small-ru-0.22, and does not give errors on vosk-model-ru-0.22, but the processing time is too long.
Code:
from vosk import Model, KaldiRecognizer
import json
import wave
model = Model(r"File\vosk-model-small-ru-0.22")
wf = wave.open(r"File\record1.wav", "rb")
rec = KaldiRecognizer(model, 8000)
result = ''
last_n = False
while True:
data = wf.readframes(8000)
if len(data) == 0:
break
if rec.AcceptWaveform(data):
res = json.loads(rec.Result())
if res['text'] != '':
result += f" {res['text']}"
last_n = False
elif not last_n:
result += '\n'
last_n = True
res = json.loads(rec.FinalResult())
result += f" {res['text']}"
print(result)
Using the poke method, I found a solution and got an assumption about the error occurring, so if I'm wrong or you have a more complete solution, add it and I'll mark it.
If you copied the sample code using vosk, then most likely this is the version with the module vosk-model-ru-0.22 which works with the sampling rate 8000,but vosk-model-small-ru-0.22 works with 44100, so just change 8000 to 44100 (depending on the entry)

Multiprocessing using partial() throws ForkingPickler error

I am trying to crawl abstracts from PubMed and filtering them using regex via python. To speed things up, I wanted to use pythons multiprocessing pool.
My code looks like the following:
import multiprocessing as mp
from functools import partial
from typing import List, Tuple
def beautify_abstract(abstract: str, regex: str):
import re
result: str = ""
last_start = 0
matches = re.finditer(regex, abstract, re.MULTILINE)
for matchNum, match in enumerate(matches, start=1):
result += abstract[last_start:match.start()]
result += "<b>"
result += abstract[match.start():match.end()]
result += "</b>"
last_start = match.end()
result += abstract[last_start:]
return result
def get_doi(pim: str, regex: str):
from Bio import Entrez
from Bio.Entrez import efetch
import re
from metapub.convert import pmid2doi
Entrez.email = "Your.Name.Here#example.org"
print(f"Processing {pim}")
abstract_handle = efetch(db="pubmed", id=pim, retmode='text', rettype='all')
abstract = abstract_handle.read()
abstract_handle.close()
if re.search(regex, abstract, re.MULTILINE) is not None:
docsum_handle = efetch(db="pubmed", id=pim, retmode='text', rettype='docsum').read()
docsum = docsum_handle.read()
try:
doi = pmid2doi(pim)
except:
doi = "UNKNOWN"
return f"{doi}"
return ""
def get_pim_with_regex_list(keywords: List[str]) -> List[str]:
from Bio import Entrez
Entrez.email = "Your.Name.Here#example.org"
searchterm = " ".join(keywords)
pims = []
handle = Entrez.esearch(db="pubmed", retstart=0, retmax=0, term=searchterm, idtype="acc")
record = Entrez.read(handle)
handle.close()
count = int(record['Count'])
if count > 100000:
retmax = 100000
else:
retmax = count
retstart = 0
while retstart < count:
handle = Entrez.esearch(db="pubmed", retstart=retstart, retmax=retmax, term=searchterm, idtype="acc")
record = Entrez.read(handle)
handle.close()
for pim in record['IdList']:
pims.append(pim)
retstart += retmax
return pims
if __name__ == '__main__':
keywords = ["keyword1", "keyword2"]
pim_list = get_pim_with_regex_list(keywords)
regex = "keyword1 keyword2"
worker_fn = partial(get_doi, regex=regex)
pool = mp.Pool(mp.cpu_count())
entries = pool.map(worker_fn, pim_list)
pool.close()
pool.join()
When I run the given code, I get the following error:
Traceback (most recent call last):
File "/usr/lib/python3.9/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/usr/lib/python3.9/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/usr/lib/python3.9/multiprocessing/pool.py", line 114, in worker
task = get()
File "/usr/lib/python3.9/multiprocessing/queues.py", line 368, in get
return _ForkingPickler.loads(res)
TypeError: __new__() missing 2 required positional arguments: 'tag' and 'attributes'
Process ForkPoolWorker-4:
Traceback (most recent call last):
File "/usr/lib/python3.9/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/usr/lib/python3.9/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/usr/lib/python3.9/multiprocessing/pool.py", line 114, in worker
task = get()
File "/usr/lib/python3.9/multiprocessing/queues.py", line 368, in get
return _ForkingPickler.loads(res)
TypeError: __new__() missing 2 required positional arguments: 'tag' and 'attributes'
I did some digging into multiprocessing with python and found out that only python native types are supported as parameters (enforced by the ForkingPickler).
Assuming that str is a native type, the code should work... Currently, I am completely lost and have no idea what may be the problem.
As suggested, I uploaded a minimal (sequential) working example here
Is there any way to fix this problem or at least diagnose the real issue here?

HBase-HappyBase : Socket Timeout Error For Larger Files - Works Good With Smaller one's

I use following piece of python code using happybase module to update hbase. This works perfectly for a file less than 30k records. But throws timeout error when exceeds 30k-35k. I tried options informed in other stack questions by editing hbase_site.xml and few other stuffs. But no help. Did anyone come across same issue ?
import happybase as hb
def loadIdPHSegmentPyBase() :
s = socket.socket()
s.settimeout(300)
connection = hb.Connection('XXXXX',9090,timeout=None,compat='0.92',transport='buffered')
table = connection.table('HBASE_D_L')
ReqFileToLoad = ("%segment.txt" %(dirName))
b = table.batch()
with open('%s' %(ReqFileToLoad)) as ffile1 :
for line in ffile1 :
line = line.strip()
line = line.split('|')
#print line[7] ,
if line[7] == 'PH' :
b.put(line[0],{'ADDR_IDPH:PHMIDDLE_NAME':line[1],'ADDR_IDPH:PHSUR_NAME' :line[2],'ADDR_IDPH:PHFIRST_NAME' :line[3],'ADDR_IDPH:PHFILLER1' :line[4],'ADDR_IDPH:PHFILLER2' :line[5],'ADDR_IDPH:PHFILLER3' :line[6],'ADDR_IDPH:TELEPHONE_SUBSEGMENT_ID' :line[7],'ADDR_IDPH:TELEPHONE_TYPE_CODE' :line[8],'ADDR_IDPH:PUBLISHED_INDICATOR' :line[9],'ADDR_IDPH:TELEPHONE_NUMBER' :line[10]})
else :
b.put(line[0],{'ADDR_IDPH:IDMIDDLE_NAME':line[1],'ADDR_IDPH:IDSUR_NAME' :line[2],'ADDR_IDPH:IDFIRST_NAME' :line[3],'ADDR_IDPH:IDFILLER1' :line[4],'ADDR_IDPH:IDFILLER2' :line[5],'ADDR_IDPH:IDFILLER3' :line[6],'ADDR_IDPH:IDSUBSEGMENT_IDENTIFIER' :line[7],'ADDR_IDPH:ID_TYPE' :line[8],'ADDR_IDPH:ID_VALIDITY_INDICATOR' :line[9],'ADDR_IDPH:ID_VALUE' :line[11]})
b.send()
s.close()
My error with larger files :
File "thriftpy/protocol/cybin/cybin.pyx", line 429, in cybin.TCyBinaryProtocol.read_message_begin (thriftpy/protocol/cybin/cybin.c:6325)
File "thriftpy/protocol/cybin/cybin.pyx", line 60, in cybin.read_i32 (thriftpy/protocol/cybin/cybin.c:1546)
File "thriftpy/transport/buffered/cybuffered.pyx", line 65, in thriftpy.transport.buffered.cybuffered.TCyBufferedTransport.c_read (thriftpy/transport/buffered/cybuffered.c:1881)
File "thriftpy/transport/buffered/cybuffered.pyx", line 69, in thriftpy.transport.buffered.cybuffered.TCyBufferedTransport.read_trans (thriftpy/transport/buffered/cybuffered.c:1948)
File "thriftpy/transport/cybase.pyx", line 61, in thriftpy.transport.cybase.TCyBuffer.read_trans (thriftpy/transport/cybase.c:1472)
File "/usr/local/python27/lib/python2.7/site-packages/thriftpy/transport/socket.py", line 108, in read
buff = self.sock.recv(sz)
socket.timeout: timed out
This was how it got resolved :
with open('%s' %(ReqFileToLoad)) as ffile1 :
for line in ffile1 :
line = line.strip()
line = line.split('|')
#print line[7] ,
if line[7] == 'PH' :
b = table.batch()
b.put(line[0],{'ADDR_IDPH:PHMIDDLE_NAME':line[1],'ADDR_IDPH:PHSUR_NAME' :line[2],'ADDR_IDPH:PHFIRST_NAME' :line[3],'ADDR_IDPH:PHFILLER1' :line[4],'ADDR_IDPH:PHFILLER2' :line[5],'ADDR_IDPH:PHFILLER3' :line[6],'ADDR_IDPH:TELEPHONE_SUBSEGMENT_ID' :line[7],'ADDR_IDPH:TELEPHONE_TYPE_CODE' :line[8],'ADDR_IDPH:PUBLISHED_INDICATOR' :line[9],'ADDR_IDPH:TELEPHONE_NUMBER' :line[10]})
else :
b = table.batch()
b.put(line[0],{'ADDR_IDPH:IDMIDDLE_NAME':line[1],'ADDR_IDPH:IDSUR_NAME' :line[2],'ADDR_IDPH:IDFIRST_NAME' :line[3],'ADDR_IDPH:IDFILLER1' :line[4],'ADDR_IDPH:IDFILLER2' :line[5],'ADDR_IDPH:IDFILLER3' :line[6],'ADDR_IDPH:IDSUBSEGMENT_IDENTIFIER' :line[7],'ADDR_IDPH:ID_TYPE' :line[8],'ADDR_IDPH:ID_VALIDITY_INDICATOR' :line[9],'ADDR_IDPH:ID_VALUE' :line[11]})
b.send()
i suggest that you use smaller batch sizes, or that you do not use a batch at all. batching is a client-side buffer without any limits, so it can cause huge thrift requests when it is sent. happybase also provides a helper for this: you can specify batch_size and the batch will be periodically flushed.
https://happybase.readthedocs.io/en/latest/api.html#happybase.Table.batch

Python: Can't start new thread. Can I stagger or delay some threads?

Not really sure how to ask this question since I am just beginning to learn python but here it goes:
I have a web scraper that uses threading to grab info. I am looking for pricing and stock for about 900 products. When I test the script with about half of that, there is no problem. When I try to scrape all 900 products I get a can't start new thread error.
I imagine this is do to some memory constraint or it is because I am asking a server for too many requests
I would like to know if there is a way to slow down the threads or to stagger the requests.
Error Code:
Traceback (most recent call last):
File "C:\Python27\tests\dxpriceupdates.py", line 78, in <module>
t.start()
error: can't start new thread
>>>
Traceback (most recent call last):Exception in thread Thread-554:
Traceback (most recent call last):
File "C:\Python27\lib\urllib.py", line 346, in open_http
errcode, errmsg, headers = h.getreply()
File "C:\Python27\lib\httplib.py", line 1117, in getreply
response = self._conn.getresponse()
File "C:\Python27\lib\httplib.py", line 1045, in getresponse
response.begin()
File "C:\Python27\lib\httplib.py", line 441, in begin
self.msg = HTTPMessage(self.fp, 0)
File "C:\Python27\lib\mimetools.py", line 25, in __init__
rfc822.Message.__init__(self, fp, seekable)
File "C:\Python27\lib\rfc822.py", line 108, in __init__
self.readheaders()
File "C:\Python27\lib\httplib.py", line 308, in readheaders
self.addheader(headerseen, line[len(headerseen)+1:].strip())
MemoryError
<bound method Thread.__bootstrap of <Thread(Thread-221, stopped 9512)>>Traceback (most recent call last):
Traceback (most recent call last):
Traceback (most recent call last):
Traceback (most recent call last):
Unhandled exception in thread started by Unhandled exception in thread started by ...
Here is the python (The skulist.txt is just a text file like 12345, 23445, 5551,...):
from threading import Thread
import urllib
import re
import json
import math
def th(ur):
site = "http://dx.com/p/GetProductInfoRealTime?skus="+ur
htmltext = urllib.urlopen(site)
data = json.load(htmltext)
htmlrates = urllib.urlopen("http://rate-exchange.appspot.com/currency?from=USD&to=AUD")
datarates = json.load(htmlrates)
if data['success'] == True:
if data['data'][0]['discount'] is 0:
price = float(data['data'][0]['price'])
rate = float(datarates['rate']) + 0.12
cost = price*rate
if cost <= 5:
saleprice = math.ceil(cost*1.7) - .05
elif (cost >5) and (cost <= 10):
saleprice = math.ceil(cost*1.6) - .05
elif (cost >10) and (cost <= 15):
saleprice = math.ceil(cost*1.55) - .05
else:
saleprice = math.ceil(cost*1.5) - .05
if data['data'][0]['issoldout']:
soldout = "Out Of Stock"
enabled = "Disable"
qty = "0"
else:
soldout = "In Stock"
enabled = "Enabled"
qty = "9999"
#print model, saleprice, soldout, qty, enabled
myfile.write(str(ur)+","+str(saleprice)+","+str(soldout)+","+str(qty)+","+str(enabled)+"\n")
else:
price = float(data['data'][0]['listprice'])
rate = float(datarates['rate']) + 0.12
cost = price*rate
if cost <= 5:
saleprice = math.ceil(cost*1.7) - .05
elif (cost >5) and (cost <= 10):
saleprice = math.ceil(cost*1.6) - .05
elif (cost >10) and (cost <= 15):
saleprice = math.ceil(cost*1.55) - .05
else:
saleprice = math.ceil(cost*1.5) - .05
if data['data'][0]['issoldout']:
soldout = "Out Of Stock"
enabled = "Disable"
qty = "0"
else:
soldout = "In Stock"
enabled = "Enabled"
qty = "9999"
#print model, saleprice, soldout, qty, enabled
myfile.write(str(ur)+","+str(saleprice)+","+str(soldout)+","+str(qty)+","+str(enabled)+"\n")
else:
qty = "0"
print ur, "error \n"
myfile.write(str(ur)+","+"0.00"+","+"Out Of Stock"+","+str(qty)+","+"Disable\n")
skulist = open("skulist.txt").read()
skulist = skulist.replace(" ", "").split(",")
myfile = open("prices/price_update.txt", "w+")
myfile.close()
myfile = open("prices/price_update.txt", "a")
threadlist = []
for u in skulist:
t = Thread(target=th,args=(u,))
t.start()
threadlist.append(t)
for b in threadlist:
b.join()
myfile.close()
Don't fire 900 threads at once, your PC could literally choke! Instead, use a pool and distribute the activity on a certain number of workers. Use multiprocessing like this:
from multiprocessing import Pool
WORKERS = 10
p = Pool(WORKERS)
p.map(tr, skulist)
Find the right value for WORKERS by experimenting a bit.

Categories

Resources