Call python script from another python script - python

I have two scripts
script_1.py
import sys
import math
from datetime import datetime, timedelta
from calendar import isleap
count = sys.argv[1]
state = sys.argv[2]
f = open("myfile_c_"+count+".xml", 'a')
f.write("<state >"+state+"state "+"\n")
f.close()
it creates files (copies of a file) according to the input count variable
script_2.py
import random
import subprocess
import decimal
import string
import sys
import math
from datetime import datetime, timedelta
from calendar import isleap
copy = int(sys.argv[1])
count = 0
state = random.choices( ["FeeSimple","Leasehold","Other"], weights=(80, 15, 5), k=copy)
while (count < copy):
exec(open("script_1.py count state[int(count]").read()) // should call the first script and enter the arguments
any idea how to call the first script from the second script and enter the arguments in the while loop ?

in top of script_2.py put the line below,else, you have another variable in your script_2.py called count so change one of them into another name to avoid bug.
from script_1 import count

Related

Losing some samples while writing to file in Python

I'm continuosly getting readings from an ADC in Python, but during the process of writing it to a file, I lose some samples because there is some small delay. Is there a way I could avoid losing these samples (I'm sampling at 100Hz)?
I'm using multithreading, but in the process of writing and cleaning the list used to write the data to a file, I always lose some samples. The code is copied here as I have written it and all advice is welcome.
Thanks in advance.
import threading
import time
from random import randint
import os
from datetime import datetime
import ADS1256
import RPi.GPIO as GPIO
import sys
import os
import csv
ADC=ADS1256.ADS1256()
ADC.ADS1256_init()
value_list=[]
#adc_reading function reads adc values and writes a list continuously.
def adc_reading():
global value_list
value_list=[]
while True:
adc_value=ADC.ADS1256_GetAll()
timestamp=time.time()
x=adc_value[1]
y=adc_value[2]
z=adc_value[3]
value_list.append([timestamp,x,y,z])
#function to create a new file every 60 seconds with the values gathered in adc_reading()
def cronometro():
global value_list
while True:
contador=60
inicio=time.time()
diferencia=0
while diferencia<=contador:
diferencia=time.time()-inicio
write_to_file(value_list)
#write_to_file() function writes the values gathered in adc_reading() to a file every 60 seconds.
def write_to_file(lista):
nombre_archivo=str(int(time.time()))+".finish"
with open(nombre_archivo, 'w') as f:
# using csv.writer method from CSV package
write = csv.writer(f)
write.writerows(lista)
value_list=[]
escritor = threading.Thread(target=adc_reading)
temporizador = threading.Thread(target=cronometro)
escritor.start()
temporizador.start()
At a 100Hz, I have to wonder if the write operation really takes longer than 10ms. You could probably do both operations in the same loop and just collect data in a buffer and write it (about 6000 values) once every 60 seconds without incurring more than a few milliseconds delay:
import time
import ADS1256
import csv
ADC = ADS1256.ADS1256()
ADC.ADS1256_init()
def adc_reading():
buffer = []
contador = 60
while True:
check = inicio = time.time()
while check - inicio <= contador:
adc_value = ADC.ADS1256_GetAll()
buffer.append([(check := time.time()), *adc_value[1:4]])
nombre_archivo = str(int(check)) + ".finish"
with open(nombre_archivo, 'w') as f:
write = csv.writer(f)
write.writerows(buffer)
buffer = []
if __name__ == '__main__':
adc_reading()
If you do need them to run in parallel (slow computer, other circumstances), you shouldn't use threads, but processes from multiprocessing.
The two threads won't run in parallel, they will alternate. You could run the data collection in a separate process and collect data from that from the main process.
Here's an example of doing this with some toy code, I think it's easy to see how to adjust for your case:
from multiprocessing import SimpleQueue, Process
from random import randint
from time import sleep, time
def generate_signals(q: SimpleQueue):
c = 0
while True:
sleep(0.01) # about 100 Hz
q.put((c, randint(1, 42)))
c += 1
def write_signals(q: SimpleQueue):
delay = 3 # 3 seconds for demo, 60 works as well
while True:
start = time()
while (check := time()) - start < delay:
sleep(.1)
values = []
while not q.empty():
values.append(str(q.get()))
with open(f'{str(int(check))}.finish', 'w') as f:
f.write('\n'.join(values))
if __name__ == "__main__":
q = SimpleQueue()
generator = Process(target=generate_signals, args=((q),))
generator.start()
writer = Process(target=write_signals, args=((q),))
writer.start()
writer.join(timeout=10) # run for no more than 10 seconds, enough for demo
writer.kill()
generator.join(timeout=0)
generator.kill()
Edit: added a counter, to show that no values are missed.

How do I run a function from my .py file in the command?

I have a .py file with a function that calculates the gradient of a function at a point and returns the value of that gradient at the point. The function takes a np.array([2,]) as input and outputs another np.array([2,]). I am confused as to how I can call the function from the cmd line and run the function with a specified input.
Here is a code snippet:
import numpy as np
def grad(x):
x_1 = x[0]
x_2 = x[1]
df_dx_1 = 6*x
df_dx_2 = 8*x_2
df_dx = np.array([df_dx_1, df_dx_2])
return np.transpose(df_dx)
I would really appreciate your help!
EDIT: This question differs from the popular command line thread because I have a specific issue of not being able to recognise the numpy input
First change script to (Here it uses if __name__='__main__' to check if it is running from script, then import sys and pass first argument using sys.argv[0] to the function):
import numpy as np
def grad(x):
x_1 = x[0]
x_2 = x[1]
df_dx_1 = 6*x
df_dx_2 = 8*x_2
df_dx = np.array([df_dx_1, df_dx_2])
return np.transpose(df_dx)
if __name__ == '__main__':
import sys
grad(sys.argv[1])
And call it like:
python "YOURSCRIPTPATH.py" argument_1
You can have more than one command line argument:
import sys
import numpy as np
def grad(x):
# your grad function here
arr = np.array([int(sys.argv[1]), int(sys.argv[2])])
print(grad(arr))
Usage:
python gradient.py 10 5
You could just something like this in the command line:
$ python -c 'from YOURFILE import grad; print(grad(your_argument))'

How to mock datetime in just one file?

I have two files in which I use datetime. Is there a way to mock it in just one file and not the other? Following is an example of where I'm seeing a weird behavior.
File: test_file_one.py
import datetime as dt
def print_datetime():
print 'First: {}'.format(dt.datetime.utcnow())
File: test_file_two.py
import datetime as dt
def print_datetime():
print 'Second: {}'.format(dt.datetime.utcnow())
File: main.py
import test_file_one as first
import test_file_two as second
import mock
from datetime import datetime, timedelta
#mock.patch('test_file_one.dt.datetime')
def main(mock_datetime):
mock_datetime.utcnow.return_value = datetime.utcnow() + timedelta(days=1)
first.print_datetime()
second.print_datetime()
if __name__ == '__main__':
main()
Output
First: 2018-06-12 08:12:43.838243
Second: 2018-06-12 08:12:43.838243
As you see, both return the same datetime which was mocked.
Why are both mocked?
How to limit the mock to just one file?
You can just add as many results you calls you want to do to side_effect
mock_datetime.utcnow.side_effect = [datetime.utcnow() + timedelta(days=1), datetime.utcnow() + timedelta(days=2)]
I'd suggest you to reduce the scope of your mocking.
Now you are applying your mock to the whole method by using the decorator #mock.patch('test_file_one.dt.datetime')
Instead you could try something like:
def main(mock_datetime):
with mock.patch('test_file_one.dt.datetime') as mock_datetime:
mock_datetime.utcnow.return_value = datetime.utcnow() + timedelta(days=1)
first.print_datetime()
second.print_datetime()
As an alternative, you could use fake_time method from libfaketime-tz-wrapper library.
Then your approach would be something like:
from libfaketime_tz_wrapper import fake_time
def main(mock_datetime):
with fake_time(datetime.utcnow() + timedelta(days=1)):
first.print_datetime()
second.print_datetime()
I didn't test if my suggestions work, but I've been using fake_time a lot in the last 1,5 year and seems to be very handful on issues like this.

Calling one script from another and importing its values

I have two scripts main.py and get_number.py. The script get_number.py returns a random number whenver it's called. I want to call this script from main.py and print all these returned values. In other words, the script get_number.py is the following:
def get_random():
return np.random.uniform(0,1)
Now I have the following code in main.py
import get_number
n_call = 4
values = np.zeros(n_call)
for i in range(n_call):
values[i]= get_number.get_random()
print(values)
However I am receiving the error that No module named get_number. How would I go about accomplishing this task?
I believe you can import just as importing another libraries
from file1 import *
Importing variables from another file?
I Found some similar Problems up here
You are confusing between get_number and get_random
main.py:
import numpy as np
from get_number import get_random
n_call = 4
values = np.zeros(n_call)
for i in range(n_call):
values[i]= get_random()
print(values)
Out: [ 0.63433276 0.36541908 0.83485925 0.59532567]
get_number:
import numpy as np
def get_random():
return np.random.uniform(0,1)
You have to import this way:
In main.py
from get_number import get_random
n_call = 4
values = np.zeros(n_call)
for i in range(n_call):
values[i]= get_random()
print(values)

python: which file is newer & by how much time

I am trying to create a filedate comparison routine. I suspect that the following is a rather clunky approach.
I had some difficulty finding info about timedelta's attributes or methods, or whatever they are called; hence, I measured the datetime difference below only in terms of days, minutes and seconds, and there is no list item representing years.
Any suggestions for an alternative, would be much appreciated.
import os
import datetime
from datetime import datetime
import sys
def datetime_filedif(filepath1e, filepath2e):
filelpath1 = str(filepath1e)
filepath1 = str(filepath1e)
filepath2 = str(filepath2e)
filepath1_lmdate = datetime.fromtimestamp(os.path.getmtime(filepath1))
filepath2_lmdate = datetime.fromtimestamp(os.path.getmtime(filepath2))
td_files = filepath2_lmdate - filepath1_lmdate #Time delta of the 2 filedates
td_list = [('td_files.days', td_files.days), ('td_hrs', int(str(td_files.seconds))/3600), ('td_minutes', (int(str(td_files.seconds))%3600)/60), ('td_seconds', (int(str(td_files.seconds))%3600)%60)]
print "Line 25: ", str(td_list)
return td_list
There is a solution for that already:
import os
modified_time = os.stat(path).st_mtime # time of most recent content modification
diff_time = os.stat(path_1).st_mtime - os.stat(path_2).st_mtime
Now you have the time in seconds since Epoch. why are you creating a new representation, you can create a deltatime or whatever from this, why invent a new format?

Categories

Resources