Fuse API and CMake Build - python

I've recently re-opened a project I worked on a couple of years ago. I wrote a small python script to build the project. I would like to port that to CMake instead.
The problem I'm having is that the script uses pkg-config on linux to find the fuse headers and libraries. I'm having trouble porting this to CMake.
Here's the current python script
import subprocess, sys, os, shutil
def call( command ):
c = subprocess.Popen( command.split(), stdout=subprocess.PIPE )
c.wait()
return c.stdout.read()
class GCC:
args = None
def __init__( self, initial_args ):
self.args = initial_args
def addPKG( self, package ):
self.args.extend( package )
def addFile( self, name ):
self.args.append( name )
def compile( self, out_name ):
self.args.extend(["-o", out_name])
print " ".join( self.args )
gcc = subprocess.Popen( self.args )
return gcc.wait() == 0
if __name__ == '__main__':
cflags = call("pkg-config fuse --libs --cflags").split()
print cflags
gcc = GCC(["gcc","-g","-Wall","-pg"])
gcc.addFile("argsparse.c")
gcc.addFile("hidden.c")
#gcc.addFile("fs.c")
gcc.addFile("initialization.c")
gcc.addFile("resolve.c")
gcc.addFile("utilities.c")
gcc.addFile("winhomefs0.4.c")
gcc.addPKG(cflags)
gcc.addFile("-lulockmgr")
if gcc.compile("winhomefs") and 'install' in sys.argv:
if os.getuid() == 0:
shutil.copy("winhomefs", "/usr/local/bin/winhomefs")
Here's my current CMakeLists.txt file.
cmake_minimum_required (VERSION 2.8.11)
project (HomeFS)
set (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH}
"${CMAKE_SOURCE_DIR}/CMakeModules/")
find_package(FUSE REQUIRED)
add_library(argsparse argsparse.c)
add_library(hidden hidden.c)
add_library(initialization initialization.c)
add_library(resolve resolve.c)
add_library(utilities utilities.c)
add_executable(homefs winhomefs0.4.c)
The issue I'm having is with the find fuse part. I've tried several different permutations of it including the following...
https://github.com/tarruda/encfs/blob/master/CMakeModules/FindFUSE.cmake
https://github.com/Pronghorn/pronghorn/blob/master/FindFUSE.cmake
Neither seem to work I get:
...argsparse.c:21:22: fatal error: fuse_opt.h: No such file or directory
#include <fuse_opt.h>
^
compilation terminated.
The python script works however which suggests there's something wrong with how cmake is configured.
For reference the pkg-config line above outputs the following on my system.
-D_FILE_OFFSET_BITS=64 -I/usr/include/fuse -lfuse -pthread
Thanks for any help!
Per Fraser's feed back I've update two things.
My CMakeLists.txt now looks like:
cmake_minimum_required (VERSION 2.8.11)
project (HomeFS)
set (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH}
"${CMAKE_SOURCE_DIR}/CMakeModules/")
find_package(FUSE REQUIRED)
add_executable(homefs
argsparse.c
hidden.c
initialization.c
resolve.c
utilities.c
winhomefs0.4.c)
set(CMAKE_C_FLAGS "-D_FILE_OFFSET_BITS=64 -lulockmgr")
target_include_directories(homefs PRIVATE ${FUSE_INCLUDE_DIR})
target_link_libraries(homefs ${FUSE_LIBRARIES})
And all references to <fuse.h> and <fuse_opt.h> have been updated to <fuse/fuse.h> and so forth. I also had to add the flag -D_FILE_OFFSET_BITS=64 and it now compiles cleanly.
However I'm still getting a linker error.
winhomefs0.4.c:(.text+0x10b2): undefined reference to `ulockmgr_op'
collect2: error: ld returned 1 exit status
I tried adding the lib -lulockmgr to the c flags but that's not working.
Google hasn't been my friend on this there are very few references to ulockmgr do I need to implement a FindULOCKMGR CMake module, or do I need to add the line elsewhere?
Ok after some trial and error + logical thought I solved the issue I needed to move the -lulockmgr string from CFLAGS to the target_link_libraries line.

You're probably just missing a couple of calls in your CMakeLists.txt.
The line find_package(FUSE REQUIRED) will try and find the path to the FUSE headers and to the FUSE lib(s). The comment blocks at the top of the two FindFUSE.cmake files provide details of what variables each sets. Take the encfs one for example. It will set FUSE_FOUND to true or false, allowing you to exit your script with a helpful error message if FUSE isn't found.
The variable FUSE_INCLUDE_DIR will be set to the absolute path of the folder containing the FUSE header. FUSE_LIBRARIES will be set to a list of absoulte paths to the FUSE libs.
What's currently missing from your CMakeLists.txt is to use these variables.
You would use them in calls to target_include_directories and target_link_libraries - e.g.
target_include_directories(homefs PRIVATE ${FUSE_INCLUDE_DIR})
target_link_libraries(homefs ${FUSE_LIBRARIES})
Another issue is that you're creating five separate libraries with your five add_library calls, but then not using them. At the very least I'd have expected to see these also being linked to the exe via a target_link_libraries call.
I don't know Python well enough to know what the original script is doing, but I think the more likely solution is that these should all just be part of the exe:
add_library(argsparse argsparse.c)
add_library(hidden hidden.c)
add_library(initialization initialization.c)
add_library(resolve resolve.c)
add_library(utilities utilities.c)
add_executable(homefs
argsparse.c
hidden.c
initialization.c
resolve.c
utilities.c
winhomefs0.4.c)

Related

What does it mean to "initialize the Julia runtime" when exporting compiled .dll or .so files for use in other langauges?

I'm trying to compile a usable .dll file from Julia to be used in Python as I've already written a large GUI in Python and need some fast optimization work done. Normally I would just call PyJulia or some "live" call, however this program needs to be compiled to distribute within my research team, so whatever solution I end up with needs to be able to run on its own (without Julia or Python actually installed).
Right now I'm able to create .dll files via PackageCompiler.jl, something I learned from previous posts on StackOverflow, however when trying to run these files in Python via the following code
Julia mock package
module JuliaFunctions
# Pkg.add("BlackBoxOptim")
Base.#ccallable function my_main_function(x::Cfloat,y::Cfloat)::Cfloat
z = 0
for i in 1:x
z += i ^ y
end
return z
end
# function julia_main()
# print("Hello from a compiled executable!")
# end
export my_main_function
end # module
Julia script to use PackageCompiler
# using PackageCompiler
using Pkg
# Pkg.develop(path="JuliaFunctions") # This is how you add a local package
# include("JuliaFunctions/src/JuliaFunctions.jl") # this is how you add a local module
using PackageCompiler
# Pkg.add(path="JuliaFunctions")
#time create_sysimage(:JuliaFunctions, sysimage_path="JuliaFunctions.dll")
Trying to use the resulting .dll in CTypes in Python
import ctypes
from ctypes.util import find_library
from ctypes import *
path = os.path.dirname(os.path.realpath(__file__)) + '\\JuliaFunctions.dll'
# _lib = cdll.LoadLibrary(ctypes.util.find_library(path)) # same error
# hllDll = ctypes.WinDLL(path, winmode=0) # same error
with os.add_dll_directory(os.path.dirname(os.path.realpath(__file__))):
_lib = ctypes.CDLL(path, winmode=0)
I get
OSError: [WinError 127] The specified procedure could not be found
With my current understanding, this means that CTypes found the dll and imported it, but didn't find.. something? I've yet to fully grasp how this behaves.
I've verified the function my_main_function is exported in the .dll file via Nirsoft's DLL Export Viewer. Users from previous similar issues have noted that this sysimage is already callable and should work, but they always add at the end something along the lines of "Note that you will also in general need to initialize the Julia runtime."
What does this mean? Is this even something that can be done independently from the Julia installation? The dev docs in PackageCompiler mention this, however they just mention that julia_main is automatically included in the .dll file and gets called as a sort of launch point. This function is also being exported correctly into the .dll file the above code creates. Below is an image of the Nirsoft export viewer output for reference.
Edit 1
Inexplicably, I've rebuilt this .dll on another machine and made progress. Now, the dll is imported correctly. I'm not sure yet why this worked on a fresh Julia install + Python venv, but I'm going to reinstall them on the other one and update this if anything changes. For anyone encountering this, also note you need to specify the expected output, whatever it may be. In my case this is done by adding (after the import):
_lib.testmethod1.restype = c_double # switched from Cfloat earlier, a lot has changed.
_lib.testmethod1.argtypes = [c_double, c_double] # (defined by ctypes)
The current error is now OSError: exception: access violation writing 0x0000000000000024 when trying to actually use the function, which is specific to Python. Any help on this would also be appreciated.

Inkscape extension: python doesn't invoke .exe

I'm developing a plugin for Inkscape. Some versions:
Inkscape v0.92.3
Windows 10, version 1803 (build 17134.165)
Python 3.7 explicitly installed
MonoDevelop Version 7.7 Preview (7.7) Extra versions below
Installation Locations:
Inkscape: C:\Program Files\Inkscape
Extension: C:\Program Files\Inkscape\share\extensions
Contains: myplugin.inx, myplugin.py, MyPlugin.exe
I've made a plugin which, for development reasons, works as currently intended.
Most important of all, it runs when I run it either from MonoDevelop, or the built exe itself (both with the generated .dll's etc in the same location, or with only the exe copied to a different location).
I use (a slightly edited version of) SugarPillStudio's python script to run the .exe file. However, when I run that python script by invoking the extension, the .exe is not launched. Inkscape blinks a message that says 'MyPlugin is launching...' and closes that as fast as it opens.
I know that the python script works, because I have it print debugging lines to a .log file on my desktop. I know that the .exe doesn't launch because I have it also writing lines to the same .log file, first thing when the main() is invoked. When I (successfully) run the .exe it does print to the file, when I run the extension it doesn't.
This leads me to believe there's a problem with the python script in invoking the .exe. Any help?
Python Script:
#!/usr/bin/env python
'''
sugarpillstudios.com/wp/?p=142
'''
import os, sys, subprocess, datetime
f=open("C:\Users\Diamundo\Documents\plugin.log", "a+")
f.write("[PYT] %s Python script called at: %s.\n" % (datetime.datetime.now().isoformat(), os.getcwd() ) )
argv = []
for arg in sys.argv[:]:
if arg.startswith("--executable="):
executable = arg.split("=")[1]
else:
argv.append(arg)
argv[0] = executable
f.write("[PYT] %s %s\n" % ( datetime.datetime.now().isoformat(), executable ) )
process = subprocess.Popen(argv,shell=False,stdout=subprocess.PIPE)
print process.communicate()[0]
Plugin.inx:
<inkscape-extension>
<name>MyPlugin</name>
<id>name.space.plugin.main</id>
<param name="executable" type="string" gui-hidden="true">MyPlugin.exe</param>
<effect>
<object-type>all</object-type>
<effects-menu>
<submenu _name="MyPlugin"/>
</effects-menu>
</effect>
<script>
<command reldir="extensions" interpreter="python">myplugin.py</command>
</script>
</inkscape-extension>
Extra Monodevelop versions:
Runtime:
Microsoft .NET 4.0.30319.42000
GTK+ 2.24.26 (Light theme)
GTK# 2.12.45
NuGet
Version: 4.3.1.4445
.NET Core
Runtime: C:\Program Files\dotnet\dotnet.exe
Runtime Versions:
2.0.9
2.0.5
SDK: C:\Program Files\dotnet\sdk\2.1.202\Sdks
SDK Versions:
2.1.202
2.1.4
MSBuild SDKs: Not installed
Inkscape uses Python 2.7, which it brings with it, unless you set that differently in the settings file (edit manually).
If you want to write an Inkscape extension, you can learn how to do this by:
reading https://inkscape.org/develop/extensions/
following the examples in other extensions that work (e.g. for running additional Inkscape instances, you could follow this one: https://gitlab.com/su-v/inx-pathops/blob/master/src/pathops.py)
Loosely based on the pathops.py file, linked by Moini in her answer, I've come up with the following file.
About
It uses the inkex.py (source on GitLab) library to declare an Inkscape Effect. The Effect class uses the OptionParser library to parse the default given parameters (e.g. --id=$$ for selected nodes where $$ is the XML node's 'id' tag's value). By adding the custom executable option, we can also parse this.
Parsing arguments
After the OptionParser is done parsing, the values will be visible in self.options, i.e. our executable now lives in self.options.executable (because of the action="store" and dest="executable" parameters).
Furthermore, the temporary SVG-file as created by Inkscape, can be found in self.svg_file.
Saving edits
As previously said, Inkscape makes a temporary file with the contents of the SVG in its then current state. Any edits you(r plugin) make(s) should not be saved back to this file, but returned to Inkscape itself - this is the premise of the Effect class: it edits an SVG and returns the edit to Inkscape. Further reading here.
Instead, in your plugin you should (readonly) open the file, read its contents, and then edit it. When you're done editing, write the entire SVG to your commandline.
Then, the line out, err = process.communicate(None) will grab your plugin's output and error-output. These are used to return information to Inkscape.
Notes
The structure of the cmd array is of no importance, except the fact that the executable should come as the very first element. All other array-elements can be anything in any order, I just added '--id=$$' to every ID because that's the way Inkscape uses, and this way it looks the same as if there's no Python middleware present. The same goes for the self.svg_file which I placed last, Inkscape does the same in its arguments - you could also make '--file='+self.svg_file from it for clarity.
Source
#!/usr/bin/env python
import os
from subprocess import Popen, PIPE
import time
try:
import inkex_local as inkex
except ImportError:
import inkex
#import simplestyle
class MyPlugin(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
self.OptionParser.add_option("--executable", action="store", type="string", dest="executable", default="MyPlugin.exe")
def effect(self):
out = err = None
cmd = []
cmd.append(self.options.executable)
for id in self.options.ids:
cmd.append("--id=" + id)
cmd.append(self.svg_file)
#inkex.debug(cmd);
process = Popen(cmd, shell=False, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = process.communicate(None)
if process.returncode == 0:
print out
elif err is not None:
inkex.errormsg(err)
if __name__ == '__main__':
myplugin = MyPlugin()
myplugin.affect()

Procedure entry point __gxx_personality_v0 could not be located libstdc++-6.dll

Appreciate this question has been asked and answered for several cases but I'm still having issues. My directory structure is laid out as such:
models/
bs/
__init__.py
values.py
optlib/
Makefile
bin/
optlib.so # generated after compilation/linking
libstdc++-6.dll
inc/
optlib.h
stats.h
obj/
optlib.o # generated after compilation/linking
stats.o # generated after compilation/linking
src/
optlib.cc
stats.cc
In short, my source files are in src, header files in inc, object files output to obj and the shared library file output to bin.
I've compiled and linked C/C++ libraries using the following commands (extracted from Makefile):
g++ -c -Iinc -o obj/optlib.o src/optlib.cc
g++ -c -Iinc -o obj/stats.o src/stats.cc
g++ -shared -Wl,-soname,bin/optlib.so -o bin/optlib.so obj/optlib.o obj/stats.o
I'm using MinGW on a Windows 7 machine and attempting to access the shared library from Python using ctypes. I've placed the libstdc++-6.dll file from C:\MinGW\bin into the bin directory which contains the shared library, optlib.so as many of the posts suggest.
My Python code is as follows (note the __init__.py imports values.py):
# neither the os.path.abspath method of building the path works...
fn = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'optlib', 'bin', 'optlib.so'))
print type(fn) # returns <type 'unicode'>
# or actually implicitly typing it out
#fn= u'C:\\Users\\striji\\Desktop\\models\\optlib\\bin\\optlib.so'
LOAD_WITH_ALTERED_SEARCH_PATH=0x00000008
kernel32 = WinDLL('kernel32')
kernel32.LoadLibraryExW.restype = c_void_p
hMod = kernel32.LoadLibraryExW(fn, None, LOAD_WITH_ALTERED_SEARCH_PATH)
lib = CDLL(fn, handle=hMod)
def fcn(x):
lib.c_func.argtypes = [c_double]
lib.c_func.restype = c_double
x = byref(c_double(x))
return lib.c_func(x)
When I attempt to import the module using the following:
>>> from models import bs
I get the following error:
C:\MinGW\bin is on my PATH variable. Source code is actually C++ wrapped in extern "C" so that ctypes can see it. These are very simple C++ functions. If you need to see the source, I can post.
Question then is twofold. First, how do I fix the immediate problem. Second, is it possible to simplify the compilation and linking process so it is system agnostic (i.e. compiles and links on Mac OSX and Linux)?

Python call to system program not working

I wrote a little python script, intending to automate non-default options for gcc (on Kubuntu 14.04); the python runs without error now, and inserting a debug print statement (or changing the system command to 'echo') verifies the correct information is being passed, but I get an error from gcc saying
$ python gccm prog16
gcc: fatal error: no input files
compilation terminated.
Here's the script I wrote:
#!/usr/bin/python
from sys import argv #get incoming argument
import subprocess #function to call an OS program
script, target = argv
# massage received argument into form needed for math.h linkage
target = "-o " + target + " " + target + ".c -lm"
subprocess.call (['gcc', target], shell=False)`
There are other additions I'd make to the gcc call (compile version options, stricter code checking, etc.), if I can get this to work correctly. Based on the error message, it appears to be invoking gcc correctly, but the target source file isn't being found; could this not be running in the directory from which I invoke it? If so, how can I get it to run from the correct directory (where I'm keeping my C source code files); if not, what else could cause this?
If you're using shell=False, your arguments to the sub-processes shouldn't be concatenated together. Instead, they should each be their own element in the args list:
subprocess.call(['gcc', '-o', target, target+'.c', '-lm'], shell=False)
On a related note, any reason why you're writing something like this yourself? If you're looking to use a Python-based build system, have a look at SCons.
If you have shell=False, then you must pass each argument separately into the subprocess.call.
Try this instead:
subprocess.call (['gcc', '-o', target, target + '.c', '-lm'], shell=False)

distutils: How to pass a user defined parameter to setup.py?

How can I pass a user-defined parameter both from the command line and setup.cfg configuration file to distutils' setup.py script?
I want to write a setup.py script, which accepts my package specific parameters. For example:
python setup.py install -foo myfoo
As Setuptools/Distuils are horribly documented, I had problems finding the answer to this myself. But eventually I stumbled across this example. Also, this similar question was helpful. Basically, a custom command with an option would look like:
from distutils.core import setup, Command
class InstallCommand(Command):
description = "Installs the foo."
user_options = [
('foo=', None, 'Specify the foo to bar.'),
]
def initialize_options(self):
self.foo = None
def finalize_options(self):
assert self.foo in (None, 'myFoo', 'myFoo2'), 'Invalid foo!'
def run(self):
install_all_the_things()
setup(
...,
cmdclass={
'install': InstallCommand,
}
)
Here is a very simple solution, all you have to do is filter out sys.argv and handle it yourself before you call to distutils setup(..).
Something like this:
if "--foo" in sys.argv:
do_foo_stuff()
sys.argv.remove("--foo")
...
setup(..)
The documentation on how to do this with distutils is terrible, eventually I came across this one: the hitchhikers guide to packaging, which uses sdist and its user_options.
I find the extending distutils reference not particularly helpful.
Although this looks like the "proper" way of doing it with distutils (at least the only one that I could find that is vaguely documented). I could not find anything on --with and --without switches mentioned in the other answer.
The problem with this distutils solution is that it is just way too involved for what I am looking for (which may also be the case for you).
Adding dozens of lines and subclassing sdist is just wrong for me.
Yes, it's 2015 and the documentation for adding commands and options in both setuptools and distutils is still largely missing.
After a few frustrating hours I figured out the following code for adding a custom option to the install command of setup.py:
from setuptools.command.install import install
class InstallCommand(install):
user_options = install.user_options + [
('custom_option=', None, 'Path to something')
]
def initialize_options(self):
install.initialize_options(self)
self.custom_option = None
def finalize_options(self):
#print('The custom option for install is ', self.custom_option)
install.finalize_options(self)
def run(self):
global my_custom_option
my_custom_option = self.custom_option
install.run(self) # OR: install.do_egg_install(self)
It's worth to mention that install.run() checks if it's called "natively" or had been patched:
if not self._called_from_setup(inspect.currentframe()):
orig.install.run(self)
else:
self.do_egg_install()
At this point you register your command with setup:
setup(
cmdclass={
'install': InstallCommand,
},
:
You can't really pass custom parameters to the script. However the following things are possible and could solve your problem:
optional features can be enabled using --with-featurename, standard features can be disabled using --without-featurename. [AFAIR this requires setuptools]
you can use environment variables, these however require to be set on windows whereas prefixing them works on linux/ OS X (FOO=bar python setup.py).
you can extend distutils with your own cmd_classes which can implement new features. They are also chainable, so you can use that to change variables in your script. (python setup.py foo install) will execute the foo command before it executes install.
Hope that helps somehow. Generally speaking I would suggest providing a bit more information what exactly your extra parameter should do, maybe there is a better solution available.
I successfully used a workaround to use a solution similar to totaam's suggestion. I ended up popping my extra arguments from the sys.argv list:
import sys
from distutils.core import setup
foo = 0
if '--foo' in sys.argv:
index = sys.argv.index('--foo')
sys.argv.pop(index) # Removes the '--foo'
foo = sys.argv.pop(index) # Returns the element after the '--foo'
# The foo is now ready to use for the setup
setup(...)
Some extra validation could be added to ensure the inputs are good, but this is how I did it
A quick and easy way similar to that given by totaam would be to use argparse to grab the -foo argument and leave the remaining arguments for the call to distutils.setup(). Using argparse for this would be better than iterating through sys.argv manually imho. For instance, add this at the beginning of your setup.py:
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument('--foo', help='required foo argument', required=True)
args, unknown = argparser.parse_known_args()
sys.argv = [sys.argv[0]] + unknown
The add_help=False argument means that you can still get the regular setup.py help using -h (provided --foo is given).
Perhaps you are an unseasoned programmer like me that still struggled after reading all the answers above. Thus, you might find another example potentially helpful (and to address the comments in previous answers about entering the command line arguments):
class RunClientCommand(Command):
"""
A command class to runs the client GUI.
"""
description = "runs client gui"
# The format is (long option, short option, description).
user_options = [
('socket=', None, 'The socket of the server to connect (e.g. '127.0.0.1:8000')',
]
def initialize_options(self):
"""
Sets the default value for the server socket.
The method is responsible for setting default values for
all the options that the command supports.
Option dependencies should not be set here.
"""
self.socket = '127.0.0.1:8000'
def finalize_options(self):
"""
Overriding a required abstract method.
The method is responsible for setting and checking the
final values and option dependencies for all the options
just before the method run is executed.
In practice, this is where the values are assigned and verified.
"""
pass
def run(self):
"""
Semantically, runs 'python src/client/view.py SERVER_SOCKET' on the
command line.
"""
print(self.socket)
errno = subprocess.call([sys.executable, 'src/client/view.py ' + self.socket])
if errno != 0:
raise SystemExit("Unable to run client GUI!")
setup(
# Some other omitted details
cmdclass={
'runClient': RunClientCommand,
},
The above is tested and from some code I wrote. I have also included slightly more detailed docstrings to make things easier to understand.
As for the command line: python setup.py runClient --socket=127.0.0.1:7777. A quick double check using print statements shows that indeed the correct argument is picked up by the run method.
Other resources I found useful (more and more examples):
Custom distutils commands
https://seasonofcode.com/posts/how-to-add-custom-build-steps-and-commands-to-setuppy.html
To be fully compatible with both python setup.py install and pip install . you need to use environment variables because pip option --install-option= is bugged:
pip --install-option leaks across lines
Determine what should be done about --(install|global)-option with Wheels
pip not naming abi3 wheels correctly
This is a full example not using the --install-option:
import os
environment_variable_name = 'MY_ENVIRONMENT_VARIABLE'
environment_variable_value = os.environ.get( environment_variable_name, None )
if environment_variable_value is not None:
sys.stderr.write( "Using '%s=%s' environment variable!\n" % (
environment_variable_name, environment_variable_value ) )
setup(
name = 'packagename',
version = '1.0.0',
...
)
Then, you can run it like this on Linux:
MY_ENVIRONMENT_VARIABLE=1 pip install .
MY_ENVIRONMENT_VARIABLE=1 pip install -e .
MY_ENVIRONMENT_VARIABLE=1 python setup.py install
MY_ENVIRONMENT_VARIABLE=1 python setup.py develop
But, if you are on Windows, run it like this:
set "MY_ENVIRONMENT_VARIABLE=1" && pip install .
set "MY_ENVIRONMENT_VARIABLE=1" && pip install -e .
set "MY_ENVIRONMENT_VARIABLE=1" && python setup.py install
set "MY_ENVIRONMENT_VARIABLE=1" && python setup.py develop
References:
How to obtain arguments passed to setup.py from pip with '--install-option'?
Passing command line arguments to pip install
Passing the library path as a command line argument to setup.py

Categories

Resources