I have two Python files (using PyCharm). In Python file#2, I want to call a function in Python file#1.
from main import load_data_from_file
delay, wavelength, measured_trace = load_data_from_file("Sweep_0.txt")
print(delay.shape)
which main is the name of python file#1. However, when I run python file#2 (the code posted at the top), I can see that whole python file#1 is also running.
Any suggestion on how I can just run print(delay. shape) without running the entire python file#1??
Here are my codes:
class import_trace:
def __init__(self,delays,wavelengths,spectra):
self.delays = delays
self.wavelengths = wavelengths
self.spectra = spectra
def load_data_from_file(self, filename):
# logging.info("Entered load_data_from_file")
with open(filename, 'r') as data_file:
wavelengths = []
spectra = []
delays = []
for num, line in enumerate(data_file):
if num == 0:
# Get the 1st line, drop the 1st element, and convert it
# to a float array.
delays = np.array([float(stri) for stri in line.split()[1:]])
else:
data = [float(stri) for stri in line.split()]
# The first column contains wavelengths.
wavelengths.append(data[0])
# All other columns contain intensity at that wavelength
# vs time.
spectra.append(np.array(data[1:]))
logging.info("Data loaded from file has sizes (%dx%d)" %
(delays.size, len(wavelengths)))
return delays, np.array(wavelengths), np.vstack(spectra)
and below I use this to get the values, however it does not work:
frog = import_trace(delays,wavelengths,spectra)
delay, wavelength, measured_trace =
frog.load_data_from_file("Sweep_0.txt")
I got this error:
frog = import_trace(delays,wavelengths,spectra)
NameError: name 'delays' is not defined
You can wrap up the functions of file 1 in a class and in file 2 you can create object and can call the specific function. However if you can share the file 1's code then it would be clear. For your reference find below...
File-1
class A:
def func1():
...
File-2
import file1 as f
# function call
f.A.func1()
Related
Im new to python and having trouble passing in an object in a function.
Basically, I'm trying to read a large file over 1.4B lines.
I am passing in an object that contains information on the file. One of these is a very large array containing the location of the start of each line in the file.
This is a large array and by passing just the object reference I wish to have just one instance of the array which is then shared by multiple processes although I don't know if this is actually happening.
The array when passed into the process_line function is then empty leading to errors. This is the problem.
Here is where the function is being called (see the p.starmap)
with open(file_name, 'r') as f:
line_start = file_inf.start_offset
# Iterate over all lines and construct arguments for `process_line`
while line_start < file_inf.file_size:
line_end = min(file_inf.file_size, line_start + line_size) #end = minimum of either file size and line start + line size
# Save `process_line` arguments
args = [file_name, line_start, line_end, file_inf.line_offset ] #arguments for process_line
line_args.append(args)
# Move to the next line
line_start = line_end
print(line_args[1])
with multiprocessing.Pool(cpu_count) as p: #run the process_line function on each line
# Run lines in parallel
# starmap() is like map() except the the we have multiple arguments in a list so we use starmap
line_result = p.starmap(process_line, line_args) #maps the process_line function to each line
This is the function:
def process_line(file_name,line_start, line_end, file_obj):
line_results = register()
c2 = register()
c1 = register()
with open(file_name, 'r') as f:
# Moving stream position to `line_start`
f.seek(file_obj[line_start])
i = 0
if line_start == 63400:
print ("hello")
# Read and process lines until `line_end`
for line in f:
line_start += 1
if line_start > line_end:
line_results.__append__(c2)
c2.clear()
break
c1 = func(line)
c2.__add__(c1)
i= i+1
return line_results.countf
where file_obj contains line_offset which is the array in question.
Now If I remove the multiprocessing and just use:
line_result = starmap(process_line, line_args)
the array is passed in just fine. Although without multiprocessing
Also if I pass in just the array instead of the entire object then it also works but now for some reason only 2 processes work (on Linux, on Windows using task manager only 1 works while the rest just use memory but not CPU). Instead of an expected 20 which is critical for this task.
Is there any solution to this? please help
My program is search the upper and lower value from .txt file according to that input value.
def find_closer():
file = 'C:/.../CariCBABaru.txt'
data = np.loadtxt(file)
x, y = data[:,0], data[:,1]
print(y)
for k in range(len(spasi_baru)):
a = y #[0, 20.28000631, 49.43579604, 78.59158576, 107.7473755, 136.9031652, 166.0589549,
176.5645474, 195.2147447]
b = spasi_baru[k]
# diff_list = []
diff_dict = OrderedDict()
if b in a:
b = input("Number already exists, please enter another number ")
else:
for x in a:
diff = x - b
if diff < 0:
# diff_list.append(diff*(-1))
diff_dict[x] = diff*(-1)
else:
# diff_list.append(diff)
diff_dict[x] = diff
#print("diff_dict", diff_dict)
# print(diff_dict[9])
sort_dict_keys = sorted(diff_dict.keys())
#print(sort_dict_keys)
closer_less = 0
closer_more = 0
#cl = []
#cm = []
for closer in sort_dict_keys:
if closer < b:
closer_less = closer
else:
closer_more = closer
break
#cl.append(closer_less == len(spasi_baru) - 1)
#cm.append(closer_more == len(spasi_baru) - 1)
print(spasi_baru[k],": lower value=", closer_less, "and upper
value =", closer_more)
data = open('C:/.../Batas.txt','w')
text = "Spasi baru:{spasi_baru}, File: {closer_less}, line:{closer_more}".format(spasi_baru=spasi_baru[k], closer_less=closer_less, closer_more=closer_more)
data.write(text)
data.close()
print(spasi_baru[k],": lower value=", closer_less, "and upper value =", closer_more)
find_closer()
The results image is here 1
Then, i want to write these results to file (txt/csv no problem) into rows and columns sequence. But the problem that i have, the file contain just one row or written the last value output in terminal like below,
Spasi baru:400, File: 399.3052727, line: 415.037138
any suggestions to help fix my problem please? I stuck in a several hours to tried any different code algorithms. I'm using Python 3.7
The best solution is to use w+ or a+ mode when you're trying to append into the same test file.
Instead of doing this:
data = open('C:/.../Batas.txt','w')
Do this:
data = open('C:/.../Batas.txt','w+')
or
data = open('C:/.../Batas.txt','a+')
The reason is because you are overwriting the same file over and over inside the loop, so it will keep just the last interaction. Look for ways to save files without overwriting them.
‘r’ – Read mode which is used when the file is only being read
‘w’ – Write mode which is used to edit and write new information to the file (any existing files with the same name will be erased when this mode is activated)
‘a’ – Appending mode, which is used to add new data to the end of the file; that is new information is automatically amended to the end
‘r+’ – Special read and write mode, which is used to handle both actions when working with a file
I am trying to create a pipeline that takes in 3 files, takes n amount of rows from each file (represented by obs_num) compares each of the values in the files to a random float between 0 and 1 and either returns the obs_num if it is greater than the random number or false if not. I then append these values to a list (list 1)
I then look at the next second file, check the position of the obs_num, if the num in the same position returned false in the previous file return false, or else check if the num is greater than the random float again, I then do the same for the third file. I also append these values to lists (list 2 and 3)
I then convert these 3 lists to a dataframe with each list representing a column.
The problem however is that when I run my pipeline the output is a file with one blank column as opposed to a csv with rows equivalent to obs_num.
Here is the code I am using for my wrapper:
import pandas as pd
import luigi
import state_to_state_machine as ssm
class wrapper(luigi.WrapperTask):
def requires(self):
file_tag = ['Sessiontolead','leadtoopportunity','opportunitytocomplete']
size = 10
for j in range(1,int(size)):
return[ssm.state_machine(file_tag=i,size=size,obs_nums=j)for i in file_tag]
def run(self):
print('The wrapper is complete')
pd.DataFrame().to_csv('/Users/emm/Documents/AttributionData/Data/datawranglerwrapper3.csv') #never returns anything
def output(self):
return luigi.LocalTarget('/Users/emm/Documents/AttributionData/Data/datawranglerwrapper3.csv')
if __name__ == '__main__':
luigi.build([wrapper()],workers=8,local_scheduler=True)
state machine:
import pandas as pd
import get_samples as gs
import luigi
import random
class state_machine(luigi.Task):
file_tag = luigi.Parameter()
obs_nums = luigi.Parameter() #directly get element - don't write to file
size = luigi.Parameter()
def run(self):
path = '/Users/emm/Documents/AttributionData/Data/Probabilities/'
file = path+self.file_tag+'sampleprobs.csv'
def generic_state_machine(tag,file=file,obs_nums=self.obs_nums):
if file.split('/')[7][:4] == tag:
state_machine = pd.read_csv(file)
return state_machine.ix[:,1][obs_nums] if s.ix[:,1][obs_nums] > random.uniform(0,1) else False
session_to_leads = []
lead_to_opps = []
opps_to_comp = []
session_to_leads.append(generic_state_machine(tag='Sessiontoload+sampleprobabs',file=file,obs_nums=self.obs_nums))
lead_to_opps.append(generic_state_machine(tag='leadtoopportunity+sampleprobabs',file=file,obs_nums=self.obs_nums)) if session_to_leads[self.obs_nums-1] != False else lead_to_opps.append(False)
opps_to_comp.append(generic_state_machine(tag='opportunitytocomplete+sampleprobabs',file=file,obs_nums=self.obs_nums)) if lead_to_opps[self.obs_nums-1] != False else opps_to_comps.append(False)
df = pd.DataFrame(zip(session_to_leads,lead_to_opps,opps_to_comp),columns=['session_to_leads','lead_to_opps','oops_to_comp'])
with self.output().open('w') as out_csv:
out_csv.write(df.to_csv('/Users/emmanuels/Documents/AttributionData/Data/Probabilities/'+str(self.file_tag)+str(self.size)+'statemachine.csv'))
def output(self):
return luigi.LocalTarget('/Users/emmanuels/Documents/AttributionData/Data/Probabilities/'+str(self.file_tag)+str(self.size)+'statemachine.csv')
I have asked similar versions of this question, but then has changed each time, I have managed to resolve most of the initial issues- so this is not a repetition on previous questions
So from my understanding in this instance this should produce 3 state machine files each with 10 rows for each observation and the comparison made.
The three files are literally files with 2 columns, the first being the index and the second being probabilities between 0 and 1
I'm not sure if this a problem with the logic of my code or how I am using Luigi
Check your formatting. In your state machine file, your with statement is at the class level for some reason and the output method is at the namespace level.
I have extremely large files. Each file is almost 2GB. Therefore, I would like to run multiple files in parallel. And I can do that because all of the files have similar format therefore, file reading can be done in parallel. I know I should use multiprocessing library but I am really confused how to use it with my code.
My code for file reading is:
def file_reading(file,num_of_sample,segsites,positions,snp_matrix):
with open(file,buffering=2000009999) as f:
###I read file here. I am not putting that code here.
try:
assert len(snp_matrix) == len(positions)
return positions,snp_matrix ## return statement
except:
print('length of snp matrix and length of position vector not the same.')
sys.exit(1)
My main function is:
if __name__ == "__main__":
segsites = []
positions = []
snp_matrix = []
path_to_directory = '/dataset/example/'
extension = '*.msOut'
num_of_samples = 162
filename = glob.glob(path_to_directory+extension)
###How can I use multiprocessing with function file_reading
number_of_workers = 10
x,y,z = [],[],[]
array_of_number_tuple = [(filename[file], segsites,positions,snp_matrix) for file in range(len(filename))]
with multiprocessing.Pool(number_of_workers) as p:
pos,snp = p.map(file_reading,array_of_number_tuple)
x.extend(pos)
y.extend(snp)
So my input to the function is as follows:
file - list containing filenames
num_of_samples - int value
segsites - initially an empty list to which I want to append as I am reading the file.
positions - initially an empty list to which I want to append as I am reading the file.
snp_matrix - initially an empty list to which I want to append as I am reading the file.
The function returns positions list and snp_matrix list at the end. How can I use multiprocessing for this where my arguments are lists and integer? The way I've used multiprocessing gives me following error:
TypeError: file_reading() missing 3 required positional arguments: 'segsites', 'positions', and 'snp_matrix'
The elements in the list that is being passed to the Pool.map are not automatically unpacked. You can generally only have one argument in your 'file_reading' function.
Of course, this argument can be a tuple, so it is no problem to unpack it yourself:
def file_reading(args):
file, num_of_sample, segsites, positions, snp_matrix = args
with open(file,buffering=2000009999) as f:
###I read file here. I am not putting that code here.
try:
assert len(snp_matrix) == len(positions)
return positions,snp_matrix ## return statement
except:
print('length of snp matrix and length of position vector not the same.')
sys.exit(1)
if __name__ == "__main__":
segsites = []
positions = []
snp_matrix = []
path_to_directory = '/dataset/example/'
extension = '*.msOut'
num_of_samples = 162
filename = glob.glob(path_to_directory+extension)
number_of_workers = 10
x,y,z = [],[],[]
array_of_number_tuple = [(filename[file], num_of_samples, segsites,positions,snp_matrix) for file in range(len(filename))]
with multiprocessing.Pool(number_of_workers) as p:
pos,snp = p.map(file_reading,array_of_number_tuple)
x.extend(pos)
y.extend(snp)
I am still new to python but using it for my linguistics research.
So I am doing some research into toponyms, and I got a list of input data from a topographic institution, which looks like the following:
Official_Name, tab, Dialect_Name, tab, Administrative_district, Topographic_district, Y_coordinates, X_coordinates, Longitude, Latitude.
So, I defined a class:
class MacroTop:
def __init__(self, Official_Name, Dialect_Name, Adm_District, Topo_District, Y, X, Long, Lat):
self.Official_Name = Official_Name
self.Dialect_Name = Dialect_Name
self.Adm_District = Adm_District
self.Topo_District = Topo_District
self.Y = Y
self.X = X
self.Long = Long
self.Lat = Lat
So, with open(), I wanted to load my .txt file with the data I have to read it into the class using a loop but it did not work.
The result I want is to be able to access a feature of the class, say, Dialect_Name and be able to look through all the entries of that feature. I can do that just in the loop, but I wanted to define a class so I could be able to do more manipulation afterwards.
my loop:
with open("locLuxAll.txt", "r") as topo_list:
lines = topo_list.readlines()
for line in lines:
line = line.split('\t')
print(line)
print(line[0]) # This would access all the data that is characterized as Official_Name
I tried to make another loop:
for i in range(0-len(lines)):
lines[i] = MacroTop(str(line[0]), str(line[1]), str(line[2]), str(line[3]), str(line[4]), str(line[5]), str(line[6]), str(line[7]))
But that did not seem to work.
This line fails:
for i in range(0-len(lines)):
You're trying to loop through negative number I guess, so the output will be an empty list.
In [11]: [i for i in range(-200)]
Out[11]: []
EDIT:
Your code seems unreadable to me, you have for i in range(len(lines)) but in this for loop, you're iterating through line variable, where is it from? First of all I'd not write back to lines list as it comes from readlines. Create new list for that, and you dont need i variable, those lines will be kept in order anyway.
class_lines = []
for line in lines:
class_lines.append(MacroTop(str(line[0]), str(line[1]), str(line[2]), str(line[3]), str(line[4]), str(line[5]), str(line[6]), str(line[7])))
Or even with list comprehension:
class_lines = [MacroTop(str(line[0]), str(line[1]), str(line[2]), str(line[3]), str(
line[4]), str(line[5]), str(line[6]), str(line[7])) for line in lines]