Let's say I have a main.py and a subfile.py.
main.py
import subfile #Writen by myself. I want to share argparse with it
import argparse
parser = argparse.ArgumentParser(description='test',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--a', type=int, default=0, help='test')
args = parser.parse_args()
args.a=0
subfile.fun()
args.a=1
subfile.fun()
args.a=0
subfile.fun()
2.subfile.py
xxxxxsome operation here to use args in main.pyxxxx
# In tensorflow wrapped argparse, I just need to define the same ArgumentParser to use args defined in main.py.
# But it's not suitable for "argparse", which would cause conflicts.
def fun():
if args.a==0:
print('yes')
else:
print('no')
In main.py I use argparse to pass arguments, and main.py use subfile.py.
Now, I want to use args define in main.py, how can I do?
(The codes are an simplified example described the problem, which can't be solved by reorganized code and has to be solved by using args in subfile.py. )
Thanks for your help! I have been torture by some bugs for the whole day and your answer would help me out. Appreciated!
edit:
Actually I know how to pass a paramter to a funciton. But as I said above, I writed the example code just for the key solution: is how to use the library "argparse" across python file, which is supported in a similar library in tensorflow "FLAG".
You can add another file, let's call it arguments.py for example.
Then include at the top of both main.py and subfile.py the following line:
from arguments import args
You then move all the code relating to parsing the command line arguments to arguments.py.
This way both the modules in main.py and subfile.py have access to the same args object.
def
When you define a function, if you need parameters, you can add them between the parentesis.
This is the model:
def fun(par: type) -> ReturnedType:
pass
For example:
def fun(a: int) -> None:
if a == 0:
print("Yes")
else:
print("No")
Then, when calling subfile.fun() add the argument like this:
subfile.fun(a = a)
or like this:
subfile.fun(a)
return
Remember your function doesn't return anything.
If you want to assign to a variable "yes" or "no" you can do like this:
def fun(a: int) -> str:
if a == 0:
return "Yes"
else:
return "No"
If you want to return only "yes" or "no", you might like to return a boolean instead:
def fun(a: int) -> bool:
if a == 0:
return True
else:
return False
You can also write the if statement in one line:
print('Yes') if (a == 0) else print('No)
import
To import subfile.py the file should be in the same directory.
Otherwise you have to do like this:
from sys import path
# Imagine you need C:\\Python\MyFolder\subfile.py
path.append(r"C:\\Python\MyFolder")
import subfile
Related
I have a use case where I have a main python script with many command line arguments, I need to break it's functionality into multiple smaller scripts, a few command-line arguments will be common to more than one smaller scripts. I want to reduce code duplicacy. I tried to use decorators to register each argument to one or more scripts, but am not able to get around an error. Another caveat I have is I want to set default values for shared argument according to which script is being run. This is what I have currently
argument_parser.py
import argparse
import functools
import itertools
from scripts import Scripts
from collections import defaultdict
_args_register = defaultdict(list)
def argument(scope):
"""
Decorator to add argument to argument registry
:param scope: The module name to register current argument function to can also be a list of modules
:return: The decorated function after after adding it to registry
"""
def register(func):
if isinstance(scope, Scripts):
_args_register[scope].append(func)
elif isinstance(scope, list) and Scripts.ALL in scope:
_args_register[Scripts.ALL].append(func)
else:
for module in scope:
_args_register[module].append(func)
return func
return register
class ArgumentHandler:
def __init__(self, script, parser=None):
self._parser = parser or argparse.ArgumentParser(description=__doc__)
assert script in Scripts
self._script = script
#argument(scope=Scripts.ALL)
def common_arg(self):
self._parser.add_arg("--common-arg",
default=self._script,
help="An arg common to all scripts")
#argument(scope=[Scripts.TRAIN, Scripts.TEST])
def train_test_arg(self):
self._parser.add_arg("--train-test-arg",
default=self._script,
help=f"An arg common to train-test scripts added in argument handler"
)
def parse_args(self):
for argument in itertools.chain(_args_register[Scripts.ALL],
_args_register[self._script]):
argument()
_args = self._parser.parse_args()
return _args
One of the smaller scripts train.py
"""
A Train script to abstract away training tasks
"""
import argparse
from argument_parser import ArgumentHandler
from scripts import Scripts
current = Scripts.TRAIN
parser = argparse.ArgumentParser(description=__doc__)
def get_args() -> argparse.Namespace:
parser.add_argument('--train-arg',
default='blah',
help='a train argumrnt set in the train script')
args_handler = ArgumentHandler(parser=parser, script=current)
return args_handler.parse_args()
if __name__ == '__main__':
print(get_args())
When I run train.py I get the following error
File "../argument_parser.py", line 68, in parse_args
argument()
TypeError: common_arg() missing 1 required positional argument: 'self'
Process finished with exit code 1
I think this is because decorators are run at import time, but am not sure, is there any work around this? or any other better way to reduce code duplicacy? Any help will be highly appreciated. Thanks!
In my unittest, I have 2 prompts in the test. I am trying to use 2 #patch("builtins.input"), but it seems to only take the 1 of the return values.
#patch("builtins.input")
#patch("builtins.input")
def test_setProfileName_modify_init_prompt_empty(self, paramName1, paramName2):
paramName1.return_value = self.profileName_prod
paramName2.return_value = self.profileName_dev
a = c.ALMConfig(self.configType)
self.assertTrue(a.setProfileName())
self.assertEqual(a.getProfileName(), self.profileName_dev)
self.assertEqual(a.profileName, self.profileName_dev)
self.assertTrue(a.setProfileName())
self.assertEqual(a.getProfileName(), self.profileName_prod)
self.assertEqual(a.profileName, self.profileName_prod)
The call a.setProfileName() will prompt for 1 input using input() call in my function. In this test, it will call a.setProfileName() twice.
First time I call a.setProfileName(), I would enter the value of self.profileName_prod.
The second time I call it, I would enter the value of self.profileName_dev.
But the test fails after the second a.setProfileName() case (at the second to last assertEqual after the second a.setProfileName() call).
self.assertEqual(a.getProfileName(), self.profileName_prod)
The reason for the failure is because a.getProfileName is returning the value for self.profileName_dev instead of self.profileName_prod.
I had tested my code in the python cli to make sure the behavior is correct.
Any feedback is appreciated.
Thanks guys!
Patching the same function twice does not make it return different values on different calls. You can use the side_effect attribute of the Mock object by setting it with a list of values you want the function to return in successive calls instead:
from unittest.mock import patch
#patch('builtins.input', side_effect=['dev', 'prod'])
def test_input(mock_input):
assert input() == 'dev'
assert input() == 'prod'
test_input() # this will not raise an exception since all assertions are True
I revisited blhsing's solution, and it is more much elegant. Here is my working test code now:
#patch('builtins.input', side_effect=['dev', 'production'])
def test_setProfileName_modify_init_prompt_update_new(self, paramName):
a = c.ALMConfig(self.configType)
self.assertTrue(a.setProfileName())
self.assertEqual(a.getProfileName(), self.profileName_dev)
self.assertEqual(a.profileName, self.profileName_dev)
self.assertEqual(a.getProfileName(), self.profileName_dev)
self.assertTrue(a.setProfileName())
self.assertEqual(a.getProfileName(), self.profileName_prod)
self.assertEqual(a.profileName, self.profileName_prod)
Thanks everyone for your comments! :)
To provide a more simple and to the point answer for anyone visiting this in 2020 and later, you can just do
`with patch("builtins.input", return_value = "Whatever you want returned"):
self.assertEqual("<Object>", "Whatever you want returned")
`
in python 3.8 in later.
To see a full easy to follow example keep reading otherwise stop here.
Full Example:
The below code shows a full example of this with a class named "AnsweredQuestion" and with a unit test
`class AnsweredQuestion:
def __init__(self):
print("I hope you find this example helpful")
def get_input(self):
print("Enter your input")
entered_data = input()
print("You entered '" + entered_data + "'")
return get_input
`
Unit Test to test above class AnsweredQuestion
`
import builtins
import unittest
import sys
sys.path.append(".")
# Assuming a directory named "answers" in your setup
import answers
from answers import AnsweredQuestion
from unittest.mock import Mock, patch
class TestAnsweredQuestion(unittest.TestCase):
def test_get_input(self):
with patch("builtins.input", return_value = "Thanks. This is correct"):
self.assertEqual(AnsweredQuestion.get_input(), "Thanks this is correct")
if __name__ == '__main__':
unittest.main()
`
Background
I am trying to write a python script that contains multiple functions like this:
import sys
def util1(x, y):
assert(x is not None)
assert(y is not None)
#does something
def util2(x, y):
assert(x is not None)
assert(y is not None)
#does something
def util3(x, y):
assert(x is not None)
assert(y is not None)
#does something
I need to be able to call any method command line:
python3 myscript.py util1 arg1 arg2
or
python3 myscript.py util3 arg1 arg2
Problem
I don't know the proper way to grab the command line args and pass them to the methods. I found a way to grab the first arg... but I would like a way to say "pass all arg to function x" if this is possible.
What I've tried So far
So far, I at the bottom of my script, I added the following logic:
if __name__ == '__main__':
globals()[sys.argv[1]]()
and so now, when I try to run my script, I get the following response:
lab-1:/var/www/localhost/htdocs/widgets# python3 myscript.py utils1 1 99999
Traceback (most recent call last):
File "myscript.py", line 62, in <module>
globals()[sys.argv[1]]()
TypeError: util1() missing 2 required positional arguments: 'x' and 'y'
I've also tried the following:
globals()[*sys.argv[1:]]()
globals()[*sys.argv[1]:[2]]()
But that doesn't work. I'm getting errors like "TypeError: unhashable type: 'list'
If you can point me in the right direction, I'd appreciate it.
Thanks.
EDIT 1
Based on the recommendation here to review a similar post, I changed my logic to include the argparse library. So now I have the following:
parser = argparse.ArgumentParser(description='This is the description of my program')
parser.add_argument('-lc','--lower_create', type=int, help='lower range value for util1')
parser.add_argument('-uc','--upper_create', type=int, help='upper range value for util1')
parser.add_argument('-lr','--lower_reserve', type=int, help='lower range value for util3')
parser.add_argument('-ur','--upper_reserve', type=int, help='upper range value for util3')
args = parser.parse_args()
#if __name__ == '__main__':
# globals()[sys.argv[1]](sys.argv[2], sys.argv[3])
What's not clear is how I "link" these arguments with a specific function.
So let's say I need -lc and -uc for util1. How can I make that association?
and then for example associate -lr and -ur with util3?
Thank you
You need to pass the arguments to the function when you call it. The naive way to do this would be like this: globals()[sys.argv[1]](sys.argv[2], sys.argv[3]) although you'll probably want to do some extra checking to make sure the arguments exist, as well as the function being called.
That is a nice question.
Try like this.
import sys
def util1(x, y):
print('This is "util1" with the following arguments: "'+x+'" and "'+y+'"')
#does something
def util2(x, y):
print('This is "util2" with the following arguments: "'+x+'" and "'+y+'"')
#does something
def util3(x, y):
print('This is "util3" with the following arguments: "'+x+'" and "'+y+'"')
#does something
locals()[sys.argv[1]](sys.argv[2] , sys.argv[3])
Then calling it like this, works great for me. Just tried it on my test machine.
python file.py util1 arg1 arg2
You could do this quite neatly with click, e.g.
#click.command()
#click.argument('x')
#click.argument('y')
def util1(x, y):
#does something
You can also use varargs, so you don't have to specify every argument:
#click.command()
#click.argument('args', nargs=-1)
def util2(args):
#does something, args is a list
Click also supports different arguments types, validation, etc.
In C++, I can print debug output like this:
printf(
"FILE: %s, FUNC: %s, LINE: %d, LOG: %s\n",
__FILE__,
__FUNCTION__,
__LINE__,
logmessage
);
How can I do something similar in Python?
There is a module named inspect which provides these information.
Example usage:
import inspect
def PrintFrame():
callerframerecord = inspect.stack()[1] # 0 represents this line
# 1 represents line at caller
frame = callerframerecord[0]
info = inspect.getframeinfo(frame)
print(info.filename) # __FILE__ -> Test.py
print(info.function) # __FUNCTION__ -> Main
print(info.lineno) # __LINE__ -> 13
def Main():
PrintFrame() # for this line
Main()
However, please remember that there is an easier way to obtain the name of the currently executing file:
print(__file__)
For example
import inspect
frame = inspect.currentframe()
# __FILE__
fileName = frame.f_code.co_filename
# __LINE__
fileNo = frame.f_lineno
There's more here http://docs.python.org/library/inspect.html
Building on geowar's answer:
class __LINE__(object):
import sys
def __repr__(self):
try:
raise Exception
except:
return str(sys.exc_info()[2].tb_frame.f_back.f_lineno)
__LINE__ = __LINE__()
If you normally want to use __LINE__ in e.g. print (or any other time an implicit str() or repr() is taken), the above will allow you to omit the ()s.
(Obvious extension to add a __call__ left as an exercise to the reader.)
You can refer my answer:
https://stackoverflow.com/a/45973480/1591700
import sys
print sys._getframe().f_lineno
You can also make lambda function
I was also interested in a __LINE__ command in python.
My starting point was https://stackoverflow.com/a/6811020 and I extended it with a metaclass object. With this modification it has the same behavior like in C++.
import inspect
class Meta(type):
def __repr__(self):
# Inspiration: https://stackoverflow.com/a/6811020
callerframerecord = inspect.stack()[1] # 0 represents this line
# 1 represents line at caller
frame = callerframerecord[0]
info = inspect.getframeinfo(frame)
# print(info.filename) # __FILE__ -> Test.py
# print(info.function) # __FUNCTION__ -> Main
# print(info.lineno) # __LINE__ -> 13
return str(info.lineno)
class __LINE__(metaclass=Meta):
pass
print(__LINE__) # print for example 18
wow, 7 year old question :)
Anyway, taking Tugrul's answer, and writing it as a debug type method, it can look something like:
def debug(message):
import sys
import inspect
callerframerecord = inspect.stack()[1]
frame = callerframerecord[0]
info = inspect.getframeinfo(frame)
print(info.filename, 'func=%s' % info.function, 'line=%s:' % info.lineno, message)
def somefunc():
debug('inside some func')
debug('this')
debug('is a')
debug('test message')
somefunc()
Output:
/tmp/test2.py func=<module> line=12: this
/tmp/test2.py func=<module> line=13: is a
/tmp/test2.py func=<module> line=14: test message
/tmp/test2.py func=somefunc line=10: inside some func
import inspect
.
.
.
def __LINE__():
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back.f_lineno
def __FILE__():
return inspect.currentframe().f_code.co_filename
.
.
.
print "file: '%s', line: %d" % (__FILE__(), __LINE__())
Here is a tool to answer this old yet new question!
I recommend using icecream!
Do you ever use print() or log() to debug your code? Of course, you
do. IceCream, or ic for short, makes print debugging a little sweeter.
ic() is like print(), but better:
It prints both expressions/variable names and their values.
It's 40% faster to type.
Data structures are pretty printed.
Output is syntax highlighted.
It optionally includes program context: filename, line number, and parent function.
For example, I created a module icecream_test.py, and put the following code inside it.
from icecream import ic
ic.configureOutput(includeContext=True)
def foo(i):
return i + 333
ic(foo(123))
Prints
ic| icecream_test.py:6 in <module>- foo(123): 456
To get the line number in Python without importing the whole sys module...
First import the _getframe submodule:
from sys import _getframe
Then call the _getframe function and use its' f_lineno property whenever you want to know the line number:
print(_getframe().f_lineno) # prints the line number
From the interpreter:
>>> from sys import _getframe
... _getframe().f_lineno # 2
Word of caution from the official Python Docs:
CPython implementation detail: This function should be used for internal and specialized purposes only. It is not guaranteed to exist in all implementations of Python.
In other words: Only use this code for personal testing / debugging reasons.
See the Official Python Documentation on sys._getframe for more information on the sys module, and the _getframe() function / submodule.
Based on Mohammad Shahid's answer (above).
This is an ugly, high maintenance factory. I really just need a way to use the string to instantiate an object with a name that matches the string. I think metaclass is the answer but I can't figure out how to apply it:
from commands.shVersionCmd import shVersionCmd
from commands.shVRFCmd import shVRFCmd
def CommandFactory(commandnode):
if commandnode.attrib['name'] == 'shVersionCmd': return shVersionCmd(commandnode)
if commandnode.attrib['name'] == 'shVRFCmd': return shVRFCmd(commandnode)
You can look up global names with the globals() function, which returns a dict:
from commands.shVersionCmd import shVersionCmd
from commands.shVRFCmd import shVRFCmd
# An explicit list of allowed commands to prevent malicious activity.
commands = ['shVersionCmd', 'shVRFCmd']
def CommandFactory(commandnode):
cmd = commandnode.attrib['name']
if cmd in commands:
fn = globals()[cmd]
fn(commandnode)
This answer How to make an anonymous function in Python without Christening it? discusses how to cleanly call blocks of code based on a key
eval is your friend:
from commands import *
def CommandFactory(commandnode):
name=commandnode.attrib['name']
assert name in ( "shVersionCmd", "shVRFCmd" ), "illegal command"
return eval(name+"."+name)(commandnode)
Note that if you are sure that name will never contain any illegal commands, you could remove the assert and turn the function into a no-maintenance-delight. In case of doubt, leave it in and maintain the list in a single place.
My personal preference would be to turn the dependencies between the factory and the command implementations around, so that each command registers itself with the factory.
Example implementation:
File commands/__init__.py:
import pkgutil
import commands
_commands = {}
def command(commandCls):
_commands[commandCls.__name__] = commandCls
return commandCls
def CommandFactory(commandnode):
name = commandnode.attrib['name']
if name in _commands.keys():
return _commands[name](commandnode)
# Load all commands
for loader, module_name, is_pkg in pkgutil.walk_packages(commands.__path__):
if module_name!=__name__:
module = loader.find_module(module_name).load_module(module_name)
File commands/mycommand.py:
from commands import command
#command
class MyCommand(object):
def __init__(self, commandnode):
pass
Small test:
from commands import CommandFactory
# Stub node implementation
class Node(object):
def __init__(self, name):
self.attrib = { "name": name }
if __name__=='__main__':
cmd = CommandFactory(Node("MyCommand"))
assert cmd.__class__.__name__=="MyCommand", "New command is instance of MyCommand"
cmd = CommandFactory(Node("UnknownCommand"))
assert cmd is None, "Returns None for unknown command type"