I am creating a plugin for ST3 and need the list of all defined scopes. I know that hitting ctrl+alt+shift+p shows the current scope in the status bar but I can't do it for every file extension.
Edit:
In addition to simple .tmLanguage files I am extracting the .sublime-package files and reading .tmLanguage files from inside. This added some entries like source.php to the list. But source.python is still missing !
Actually, the python code is: ( this is for Python 3.3 )
import sublime, sublime_plugin, os, subprocess, glob, tempfile, plistlib
from zipfile import ZipFile
def scopes_inside(d):
result = []
for k in d.keys():
if k == 'scopeName':
result = result + [ s.strip() for s in d[k].split(',') ]
elif isinstance(d[k], dict):
result = result + scopes_inside(d[k])
return result
scopes = set()
for x in os.walk(sublime.packages_path() + '/..'):
for f in glob.glob(os.path.join(x[0], '*.tmLanguage')):
for s in scopes_inside(plistlib.readPlist(f)):
scopes.add(s.strip())
for x in os.walk(sublime.packages_path() + '/..'):
for f in glob.glob(os.path.join(x[0], '*.sublime-package')):
input_zip = ZipFile(f)
for name in input_zip.namelist():
if name.endswith('.tmLanguage'):
for s in self.get_scopes_from(plistlib.readPlistFromBytes(input_zip.read(name))):
scopes.add(s.strip())
scopes = list(scopes)
And it gives this list now:
"font",
"license",
"source.c++",
"source.cmake",
"source.coffee",
"source.css",
"source.d",
"source.disasm",
"source.dockerfile",
"source.gdb.session",
"source.gdbregs",
"source.git",
"source.gradle",
"source.groovy",
"source.gruntfile.coffee",
"source.gruntfile.js",
"source.gulpfile.coffee",
"source.gulpfile.js",
"source.ini",
"source.ini.editorconfig",
"source.jade",
"source.jl",
"source.js",
"source.json",
"source.json.bower",
"source.json.npm",
"source.jsx",
"source.less",
"source.php",
"source.procfile",
"source.puppet",
"source.pyjade",
"source.qml",
"source.rust",
"source.sass",
"source.scss",
"source.shell",
"source.stylus",
"source.swift",
"source.yaml",
"source.zen.5a454e6772616d6d6172",
"text.html.basic",
"text.html.mustache",
"text.html.ruby",
"text.html.twig",
"text.slim",
"text.todo"
But I can't find some languages like python in this list. I guess other are stored within some binary files somewhere within the installation folder. If that's true so how the parse thoses files ?
I just found the remaining packages wich are stored within the installation directory. So the final code which gives all scope names is:
import sublime, sublime_plugin, os, subprocess, glob, tempfile, plistlib
from zipfile import ZipFile
# This function gives array of scope names from the plist dictionary passed as argument
def scopes_inside(d):
result = []
for k in d.keys():
if k == 'scopeName':
result = result + [ s.strip() for s in d[k].split(',') ]
elif isinstance(d[k], dict):
result = result + scopes_inside(d[k])
return result
# Using set to have unique values
scopes = set()
# Parsing all .tmLanguage files from the Packages directory
for x in os.walk(sublime.packages_path()):
for f in glob.glob(os.path.join(x[0], '*.tmLanguage')):
for s in scopes_inside(plistlib.readPlist(f)):
scopes.add(s.strip())
# Parsing all .tmLanguage files inside .sublime-package files from the Installed Packages directory
for x in os.walk(sublime.installed_packages_path()):
for f in glob.glob(os.path.join(x[0], '*.sublime-package')):
input_zip = ZipFile(f)
for name in input_zip.namelist():
if name.endswith('.tmLanguage'):
for s in self.get_scopes_from(plistlib.readPlistFromBytes(input_zip.read(name))):
scopes.add(s.strip())
# Parsing all .tmLanguage files inside .sublime-package files from the Installation directory
for x in os.walk(os.path.dirname(sublime.executable_path())):
for f in glob.glob(os.path.join(x[0], '*.sublime-package')):
input_zip = ZipFile(f)
for name in input_zip.namelist():
if name.endswith('.tmLanguage'):
for s in self.get_scopes_from(plistlib.readPlistFromBytes(input_zip.read(name))):
scopes.add(s.strip())
scopes = list(scopes)
This code may give different results depending on Packages installed (some packages add new syntax/scope names). In my case, the result was :
font
license
source.actionscript.2
source.applescript
source.asp
source.c
source.c++
source.camlp4.ocaml
source.clojure
source.cmake
source.coffee
source.cs
source.css
source.d
source.diff
source.disasm
source.dockerfile
source.dosbatch
source.dot
source.erlang
source.gdb.session
source.gdbregs
source.git
source.go
source.gradle
source.groovy
source.gruntfile.coffee
source.gruntfile.js
source.gulpfile.coffee
source.gulpfile.js
source.haskell
source.ini
source.ini.editorconfig
source.jade
source.java
source.java-props
source.jl
source.js
source.js.rails
source.json
source.json.bower
source.json.npm
source.jsx
source.less
source.lisp
source.lua
source.makefile
source.matlab
source.nant-build
source.objc
source.objc++
source.ocaml
source.ocamllex
source.ocamlyacc
source.pascal
source.perl
source.php
source.procfile
source.puppet
source.pyjade
source.python
source.qml
source.r
source.r-console
source.regexp
source.regexp.python
source.ruby
source.ruby.rails
source.rust
source.sass
source.scala
source.scss
source.shell
source.sql
source.sql.ruby
source.stylus
source.swift
source.tcl
source.yaml
source.zen.5a454e6772616d6d6172
text.bibtex
text.haml
text.html.asp
text.html.basic
text.html.erlang.yaws
text.html.javadoc
text.html.jsp
text.html.markdown
text.html.markdown.multimarkdown
text.html.mustache
text.html.ruby
text.html.tcl
text.html.textile
text.html.twig
text.log.latex
text.plain
text.restructuredtext
text.slim
text.tex
text.tex.latex
text.tex.latex.beamer
text.tex.latex.haskell
text.tex.latex.memoir
text.tex.latex.rd
text.tex.math
text.todo
text.xml
text.xml.xsl
Related
I am working on a load/save module (a GUI written in Python) that will be used with and imported to future programs. My operating system is Windows 10. The problem I've run into is that my get_folders() method is grabbing ALL folder names, including ones that I would rather ignore, such as system folders and hidden folders (best seen on the c-drive).
I have a work around using a hard-coded exclusion list. But this only works for folders already on the list, not for hidden folders that my wizard may come across in the future. I would like to exclude ALL folders that have their 'hidden' attribute set. I would like to avoid methods that require installing new libraries that would have to be re-installed whenever I wipe my system. Also, if the solution is non-Windows specific, yet will work with Windows 10, all the better.
I have searched SO and the web for an answer but have come up empty. The closest I have found is contained in Answer #4 of this thread: Check for a folder, then create a hidden folder in Python, which shows how to set the hidden attribute when creating a new directory, but not how to read the hidden attribute of an existing directory.
Here is my Question: Does anyone know of a way to check if a folder's 'hidden' attribute is set, using either native python, pygame, or os. commands? Or, lacking a native answer, I would accept an imported method from a library that achieves my goal.
The following program demonstrates my get_folders() method, and shows the issue at hand:
# Written for Windows 10
import os
import win32gui
CLS = lambda :os.system('cls||echo -e \\\\033c') # Clear-Command-Console function
def get_folders(path = -1, exclude_list = -1):
if path == -1: path = os.getcwd()
if exclude_list == -1: exclude_list = ["Config.Msi","Documents and Settings","System Volume Information","Recovery","ProgramData"]
dir_list = [entry.name for entry in os.scandir(path) if entry.is_dir()] if exclude_list == [] else\
[entry.name for entry in os.scandir(path) if entry.is_dir() and '$' not in entry.name and entry.name not in exclude_list]
return dir_list
def main():
HWND = win32gui.GetForegroundWindow() # Get Command Console Handle.
win32gui.MoveWindow(HWND,100,50,650,750,False) # Size and Position Command Console.
CLS() # Clear Console Screen.
print(''.join(['\n','Folder Names'.center(50),'\n ',('-'*50).center(50)]))
# Example 1: Current Working Directory
dirs = get_folders() # Get folder names in current directory (uses exclude list.)
for elm in dirs:print(' ',elm) # Show the folder names.
print('','-'*50)
# Examle 2: C Drive, All Folders Included
dirs = get_folders('c:\\', exclude_list = []) # Get a list of folders in the root c: drive, nothing excluded.
for elm in dirs: print(' ',elm) # Show the fiolder names
print('','-'*50)
# Example 3: C Drive, Excluded Folder List Work-Around
dirs = get_folders('c:\\') # Get a list of folders in the root c: drive, excluding sytem dirs those named in the exclude_list.
for elm in dirs:print(' ',elm) # Show the folder names.
print("\n Question: Is there a way to identify folders that have the 'hidden' attribute\n\t set to True, rather than using a hard-coded exclusion list?",end='\n\n' )
# ==========================
if __name__ == "__main__":
main()
input(' Press [Enter] to Quit: ')
CLS()
Here is my revised get_folders() method, with thanks to Alexander.
# Written for Windows 10
# key portions of this code borrowed from:
# https://stackoverflow.com/questions/40367961/how-to-read-or-write-the-a-s-h-r-i-file-attributes-on-windows-using-python-and-c/40372658#40372658
# with thanks to Alexander Goryushkin.
import os
from os import scandir, stat
from stat import (
FILE_ATTRIBUTE_ARCHIVE as A,
FILE_ATTRIBUTE_SYSTEM as S,
FILE_ATTRIBUTE_HIDDEN as H,
FILE_ATTRIBUTE_READONLY as R,
FILE_ATTRIBUTE_NOT_CONTENT_INDEXED as I
)
from ctypes import WinDLL, WinError, get_last_error
import win32gui
CLS = lambda :os.system('cls||echo -e \\\\033c') # Clear-Command-Console function
def read_or_write_attribs(kernel32, entry, a=None, s=None, h=None, r=None, i=None, update=False):
# Get the file attributes as an integer.
if not update: attrs = entry.stat(follow_symlinks=False).st_file_attributes# Fast because we access the stats from the entry
else:
# Notice that this will raise a "WinError: Access denied" on some entries,
# for example C:\System Volume Information\
attrs = stat(entry.path, follow_symlinks=False).st_file_attributes
# A bit slower because we re-read the stats from the file path.
# Construct the new attributes
newattrs = attrs
def setattrib(attr, value):
nonlocal newattrs
# Use '{0:032b}'.format(number) to understand what this does.
if value is True: newattrs = newattrs | attr
elif value is False:
newattrs = newattrs & ~attr
setattrib(A, a)
setattrib(S, s)
setattrib(H, h)
setattrib(R, r)
setattrib(I, i if i is None else not i) # Because this attribute is True when the file is _not_ indexed
# Optional add more attributes here.
# See https://docs.python.org/3/library/stat.html#stat.FILE_ATTRIBUTE_ARCHIVE
# Write the new attributes if they changed
if newattrs != attrs:
if not kernel32.SetFileAttributesW(entry.path, newattrs):
raise WinError(get_last_error())
# Return an info tuple consisting of bools
return ( bool(newattrs & A),
bool(newattrs & S),
bool(newattrs & H),
bool(newattrs & R),
not bool(newattrs & I) )# Because this attribute is true when the file is _not_ indexed)
# Get the file attributes as an integer.
if not update:
# Fast because we access the stats from the entry
attrs = entry.stat(follow_symlinks=False).st_file_attributes
else:
# A bit slower because we re-read the stats from the file path.
# Notice that this will raise a "WinError: Access denied" on some entries,
# for example C:\System Volume Information\
attrs = stat(entry.path, follow_symlinks=False).st_file_attributes
return dir_list
def get_folders(path = -1, show_hidden = False):
if path == -1: path = os.getcwd()
dir_list = []
kernel32 = WinDLL('kernel32', use_last_error=True)
for entry in scandir(path):
a,s,hidden,r,i = read_or_write_attribs(kernel32,entry)
if entry.is_dir() and (show_hidden or not hidden): dir_list.append(entry.name)
return dir_list
def main():
HWND = win32gui.GetForegroundWindow() # Get Command Console Handle.
win32gui.MoveWindow(HWND,100,50,650,750,False) # Size and Position Command Console.
CLS() # Clear Console Screen.
line_len = 36
# Example 1: C Drive, Exclude Hidden Folders
print(''.join(['\n','All Folders Not Hidden:'.center(line_len),'\n ',('-'*line_len).center(line_len)]))
dirs = get_folders('c:\\') # Get a list of folders on the c: drive, exclude hidden.
for elm in dirs:print(' ',elm) # Show the folder names.
print('','='*line_len)
# Examle 2: C Drive, Include Hidden Folders
print(" All Folders Including Hidden\n "+"-"*line_len)
dirs = get_folders('c:\\', show_hidden = True) # Get a list of folders on the c: drive, including hidden.
for elm in dirs: print(' ',elm) # Show the fiolder names
print('','-'*line_len)
# ==========================
if __name__ == "__main__":
main()
input(' Press [Enter] to Quit: ')
CLS()
I have an almost working SConstruct file. I'm not using any SConscript files currently, and would prefer not to need any in my source repositories (git, not SCons).
Quick summary -- my problem occurs when changing some arguments, then returning to the previous arguments, the same files are rebuilt.
I run scons -f Builder_repo/SConstruct 'NameOfTargetLibrary.b' to build a library, NameOfTargetLibrary.b from NameOfTargetLibrary.src.
<lib>.b should be placed in a location that depends on various flags (Debug/Release, 32/64 bit, platform(from list)) like so:
topdir
|\-- Builder_repo (containing SConstruct, site_scons\...)
|\-- Lib1 (contains lib1.src, bunch of source files)
|\-- Lib2 (lib2.src, lib2 sources)
\--- BuiltLibs
|\-- Windows
| |\-- Release_32
| | |\-- lib1.b
| | |\-- lib2.b
| | \--- libn.b
| |\-- Debug_64
| | |\-- lib1.b
| | |\-- lib2.b
| | \--- libn.b
| \--- (Debug_32, Release_64)
\--- (etc, other targets, currently just one)
The command line is something like (split to multiple lines for readability, but only one line in SCons/cmdLine)
"abspath to exe" "static path and args" --x64 --
"abspath(lib1.src)" "abspath(BuiltLibs)"
"abspath(BuiltLibs/Windows/Release_64)"
"flags for debug/release, target, bitness"
The 'working' SConstruct uses a tool with a generate(env) something like:
construct target directory (e.g. BuiltLibs\Windows\Release_32) Store in env.
search for .src files
get containing directory (using os.path.dirname)
add to env.Repositories(dirname(lib.src))
tgt = env.BLDR(<TARGETDIR>/lib.b, lib.src)
env.Alias(lib.b, tgt)
The Builder then uses an Emitter to add to the source list any <TARGETDIR>/libx.b files on which lib.src depends (read from a source file). These could instead be added as just libx.b if preferable?
The Generator parses the input target and source lists to form the command line, which it returns. With the current configuration, target and source are both relative paths, so probably the Repository calls are unnecessary.
When I run
scons -f Builder_repo\SConstruct 'lib2.b' DEBUG=0 BITNESS=32 TRGT=Windows
(lib2.src depends on lib1.b, due to the emitter), the correct lib1.b and lib2.b are built and placed in BuiltLibs\Windows\Release_32\lib{1,2}.b.
If I repeat the command, then nothing is built and 'lib2.b is up to date'.
Then, I try scons -f <..> 'lib2.b' DEBUG=1 <other args same>. Both libraries are built and placed in BuiltLibs\Windows\Debug_32\lib{1,2}.b as expected.
When I then try the first command again (DEBUG=0) I expect nothing to be built (the lib1.b, lib2.b are still up to date - no sources changed and the previously built files are still in Release_32) but instead they are rebuilt.
I tried to solve this problem by returning a reduced command line when for_signature is true, such that the value returned in that case is more like:
"abspath to exe" "static path and args" --
"abspath(lib1.src)" "abspath(BuiltLibs)" "version string"
where "version string" is something not affected by the debug/release, 32/64, platform flags (but does change with the source code). This made seemingly no difference.
I tried some variations on this using env.VariantDir(<TARGETDIR>, '.', duplicate=0) and then tgt = env.BLDR(lib.b, Lib1/lib.src), env.Alias(<TARGETDIR>/lib.b, tgt) or similar, but I haven't managed to improve anything (some configurations just made it always rebuilt, others made it so the dependencies couldn't be found and SCons errored.
How should I be doing this?
SConstruct:
import os
Decider('make')
Default(None)
# Add command line arguments with default values.
# These can be specified as, for example, LV_TARGET=cRIO
cmdVars = Variables(None, ARGUMENTS)
cmdVars.AddVariables(
EnumVariable('LV_TARGET', 'Choose the target for LabVIEW packages',
'Windows', allowed_values=('Windows', 'cRIO')),
BoolVariable('DEBUG', 'Set to 1 to build a debug-enabled package', 0),
EnumVariable('BITNESS', 'Choose the bitness for LabVIEW packages',
'32', allowed_values=('32', '64')),
EnumVariable('LV_VER', 'Choose the version of LabVIEW to use',
'2017', allowed_values=('2017',))
)
# Define a list of source extensions
src_exts = ['.vi', '.ctl', '.lvlib', '.vim', '.vit']
env = Environment(variables = cmdVars, ENV = os.environ, tools=['PPL'], PPLDIR='PPLs', SRC_EXTS=' '.join(src_exts))
init.py for the PPL tool:
""" SCons.Tool.PPL
Tool-specific initialization for compilation of lvlibp files from lvlib files,
using the Wiresmith 'labview-cli.exe' and the LabVIEW code stored in the
PPL_Builder GitHub repository.
This module should not usually be imported directly.
It can be imported using a line in SConstruct or SConscript like
env = Environment(tools=['PPL'])
"""
# A reference for this code can be found at
# https://github.com/SCons/scons/wiki/ToolsForFools
# which describes the creation of a Tool for JALv2 compilation.
import SCons.Builder
from SCons.Script import GetOption
import SCons.Node
import SCons.Util
import os.path
import textwrap
import re
import contextlib
import subprocess
# Add warning classes
class ToolPPLWarning(SCons.Warnings.Warning):
pass
class LabVIEW_CLI_ExeNotFound(ToolPPLWarning):
pass
SCons.Warnings.enableWarningClass(ToolPPLWarning)
__verbose = False
class LV_BuildPaths:
""" A simple class to contain the build paths
and configuration flags for PPL compilation
Contains the attributes:
hwTarget{,Dir}, debug{Opt,Flag,String}, bitness{,Flag}, lv_ver,
{ppl,storage,copy,topData,data}Dir
"""
def __init__(self, env):
# Set the target parameters
self.hwTarget = env.get('LV_TARGET')
copyDirExtra = ""
if self.hwTarget == "cRIO":
self.hwTargetDir = "cRIO-9045"
copyDirExtra = os.path.join('home','lvuser','natinst','bin')
else:
self.hwTargetDir = self.hwTarget
# Set the debug parameters
self.debugOpt = env.get('DEBUG')
self.debugFlag = int(self.debugOpt)
self.debugString = "Debug" if self.debugOpt else "Release"
# Set the bitness parameters
self.bitness = env.get('BITNESS')
self.bitnessFlag = ''
if self.bitness == '64':
self.bitnessFlag = '--x64'
# Get the LabVIEW year
self.lv_ver = env.get('LV_VER')
# Get important build directory paths.
# PPL files should be searched for in storageDir
self.pplDir = os.path.normpath(env.get('PPLDIR', 'PPLs'))
self.storageDir = os.path.join(self.pplDir, self.hwTargetDir, f'{self.debugString}_{self.bitness}', copyDirExtra)
self.copyDir = os.path.join(self.pplDir, self.hwTargetDir, copyDirExtra)
self.topDataDir = os.path.join(self.pplDir, 'Data')
self.dataDir = os.path.join(self.copyDir, 'Data')
def __str__(self):
return (textwrap.dedent(f"""\
The directories are as follows...
PPL Dir: {self.pplDir}
Storage Dir: {self.storageDir}
Copy Dir: {self.copyDir}""")
)
def _print_info(message):
""" Disable the output of messages if '--quiet', '-s' or '--silent'
are passed to the command line """
if not GetOption('silent'):
print(message)
def _detectCLI(env):
""" Search for the labview-cli.exe installed by Wiresmith's VIPackage """
try:
# If defined in the environment, use this
_print_info(f"labview-cli defined in the environment at {env['LV_CLI']}")
return env['LV_CLI']
except KeyError:
pass
cli = env.WhereIs('labview-cli')
if cli:
_print_info(f"Found labview-cli at {cli}")
return cli
raise SCons.Errors.StopError(
LabVIEW_CLI_ExeNotFound,
"Could not detect the labview-cli executable")
return None
#contextlib.contextmanager
def pushd(new_dir):
previous_dir = os.getcwd()
os.chdir(new_dir)
yield
os.chdir(previous_dir)
def _getHash(env, dir):
if env['GIT_EXE']:
with pushd(dir):
#cmdLine = env['git_showref']
cmdLine = env['git_describe']
return subprocess.run(cmdLine, shell=True, capture_output=True, text=True).stdout
return ''
def _detectGit(env):
""" Search for a git executable. This is not required for usage """
git = None
try:
# If defined in the environment, use this
_print_info(f"git executable defined in the environment at {env['GIT_EXE']}")
git = env['GIT_EXE']
except KeyError:
pass
cli = env.WhereIs('git')
if cli:
_print_info(f"Found git at {cli}")
git = cli
if git:
hash_len = 12
env['GIT_EXE'] = f"'{git}'" # I edited this line compared to the version in the repository, but I don't think it's relevant.
env['git_describe'] = f'"{git}" describe --dirty="*" --long --tags --always --abbrev={hash_len}'
env['git_showref'] = f'"{git}" show-ref --hash={hash_len} --head head'
return None
#
# Builder, Generator and Emitter
#
def _ppl_generator(source, target, env, for_signature):
""" This function generates the command line to build the PPL.
It should expect to receive a target as a relative path
['<SD>/A.lvlibp'], and source will be either None, or
['<src>/A.lvlib'].
When for_signature == 0, the PPL will actually be built.
"""
# Get these parameters properly
run_vi = os.path.abspath(os.path.join('.','PPL_Builder','Call_Builder_Wiresmith.vi'))
cliOpts = ''
package_ver = "0.0.0.0#sconsTest"
# These are extracted from the environment
cli = env['LV_CLI']
bp = env['LV_Dirs']
ver = bp.lv_ver
pplDir = f'{os.path.abspath(bp.pplDir)}'
storageDir = f'{os.path.abspath(bp.storageDir)}'
# Dependencies are parsed for the command line. They are already dependencies of the target.
pplSrcs = source[1:]
depsString = ""
if pplSrcs:
if __verbose:
_print_info("Adding PPL dependencies: %s" % [ str(ppl) for ppl in pplSrcs ])
depsString = " ".join([f'"{os.path.basename(ppl.get_string(for_signature))}"' for ppl in pplSrcs])
cmdLine = f'"{cli}" --lv-ver {ver} {bp.bitnessFlag} {run_vi} {cliOpts} -- '
lvlib_relpath = str(source[0])
lvlib_abspath = os.path.abspath(lvlib_relpath)
git_ver = _getHash(env, os.path.dirname(lvlib_abspath))
print("git version is " + str(git_ver).strip())
argsLine = f'"{lvlib_abspath}" "{pplDir}" "{storageDir}" {bp.debugFlag} {bp.hwTarget} "{package_ver}" {depsString}'
if not for_signature:
_print_info(f"Making {lvlib_abspath}")
return cmdLine + argsLine
#return cmdLine + argsLine
def _ppl_emitter(target, source, env):
""" Appends any dependencies found in the .mk file to the list of sources.
The target should be like [<SD>/A.lvlibp],
and the source should be like [<src>/A.lvlib]
"""
if not source:
return target, source
exts_tuple = tuple(env['SRC_EXTS'].split(' '))
src_files = _get_other_deps(source, exts_tuple)
if __verbose:
_print_info("Adding " + str(src_files) + " as dependencies")
env.Depends(target, src_files)
depsList, nodeDepsList = _get_ppl_deps(str(source[0]), env)
if nodeDepsList:
source += [os.path.normpath(os.path.join(env['LV_Dirs'].storageDir, str(pplNode))) for pplNode in nodeDepsList]
return target, source
_ppl_builder = SCons.Builder.Builder(generator = _ppl_generator, emitter = _ppl_emitter)
def lvlibpCreator(env, target, source=None, *args, **kw):
""" A pseudo-Builder for the labview-cli executable
to build .lvlibp files from .lvlib sources, with
accompanying dependency checks on appropriate source files
Anticipate this being called via env.PPL('<SD>/A.lvlibp'),
where target is a string giving a relative path, or
env.PPL('<SD>/A.lvlibp', '<src>/A.lvlib')
"""
bPaths = env['LV_Dirs']
# Ensure that if source exists, it is a list
if source and not SCons.Util.is_List(source):
source = [source]
if __verbose:
_print_info(f"Target = {target}")
if source:
_print_info("Sources = %s" % [ str(s) for s in source])
if __verbose:
_print_info("args: %s" % [ str(s) for s in args ])
_print_info("kw: %s" % str(kw.items()))
tgt = _ppl_builder.__call__(env, target, source, **kw)
return tgt
def _scanForLvlibs(env, topdir=None):
# Maybe check this...
if not topdir:
topdir = '.'
bPaths = env['LV_Dirs']
lvlibList = []
for root, dirs, files in os.walk(topdir):
# if any of files ends with .lvlib, add to the list
lvlibList += map(lambda selected: os.path.join(root, selected), filter(lambda x: x[-6:] == '.lvlib', files))
for lib in lvlibList:
# Set up the possibility of building the lvlib
(srcDir, libnameWithExt) = os.path.split(lib)
# Add the source repository
if __verbose:
_print_info("Adding repository at: " + srcDir)
env.Repository(srcDir)
# Add the build instruction
lvlibpName = libnameWithExt + 'p'
tgt = env.PPL(os.path.normpath(os.path.join(bPaths.storageDir, lvlibpName)),lib)
if __verbose:
_print_info(f"Adding alias from {libnameWithExt+'p'} to {str(tgt)}")
env.Alias(lvlibpName, tgt)
def _get_ppl_deps(lvlib, env):
lvlib_s = str(lvlib)
lvlib_name = os.path.basename(lvlib_s)
mkFile = lvlib_s.replace('.lvlib','.mk')
if os.path.isfile(mkFile):
# load dependencies from file
depVarName = lvlib_name.replace(' ',r'\+').replace('.lvlib','_Deps')
f = open(mkFile, "r")
content = f.readlines() # Read all lines (not just first)
depsList = []
for line in content:
matchedDeps = re.match(depVarName+r'[ ]?:=[ ]?(.*)$', line)
if matchedDeps:
listDeps = matchedDeps.group(1).replace(r'\ ','+').split(' ')
depsList = ['"' + elem.replace('+', ' ') + '"' for elem in listDeps]
nodeList = [ env.File(elem.replace('+', ' ')) for elem in listDeps]
return (depsList, nodeList)
raise RuntimeError("Found a .mk file ({mkFile}) but could not parse it to get dependencies.")
#print(f"No .mk file for {lvlib_name}")
return ('', None)
def _get_other_deps(source, exts):
parent_dir = os.path.dirname(str(source[0]))
if __verbose:
_print_info(f"Searching {parent_dir} for source files...")
_print_info(f"Acceptable extensions are {exts}")
src_files = []
for root, dirs, files in os.walk(parent_dir):
src_files += [os.path.join(root, file) for file in files if file.endswith(exts)]
return src_files
def generate(env):
'''Add builders and construction variables to the Environment.'''
env['LV_CLI'] = _detectCLI(env)
env.AddMethod(lvlibpCreator, "PPL")
_detectGit(env)
bp = LV_BuildPaths(env)
_print_info(bp)
env['LV_Dirs'] = bp
# Search for all lvlib files
_scanForLvlibs(env)
def exists(env):
return _detectCLI(env)
As briefly described in the comments, the reason for the rebuilds was the use of Decider('make') (i.e. checking by timestamp) with the effective globbing of source files catching an autogenerated file.
This was easily seen when running scons --debug=explain as suggested by bdbaddog in the comments to the question.
Although slightly brittle, the simplest solution is to modify the emitter, leaving the following (see the ---> mark) :
def _ppl_emitter(target, source, env):
""" Appends any dependencies found in the .mk file to the list of sources.
The target should be like [<SD>/A.lvlibp],
and the source should be like [<src>/A.lvlib]
"""
if not source:
return target, source
exts_tuple = tuple(env['SRC_EXTS'].split(' '))
src_files = _get_other_deps(source, exts_tuple)
--->filtered_files = list(filter(lambda x: "Get PPL Version.vi" not in x, src_files))
if __verbose:
_print_info("Adding " + str(filtered_files) + " as dependencies")
env.Depends(target, filtered_files)
depsList, nodeDepsList = _get_ppl_deps(str(source[0]), env)
if nodeDepsList:
source += [os.path.normpath(os.path.join(env['LV_Dirs'].storageDir, str(pplNode))) for pplNode in nodeDepsList]
return target, source
By removing this file, the target no longer has an explicit dependency on the generated file (this is independent of the Decider call).
Additionally removing the Decider('make') line from the SConstruct file allows the entire source repository to be deleted and redownloaded without triggering rebuilds.
As a side note, the Git-specific code was also removed and placed inside the code called by the Builder - in this way, it is additionally (to the reduction of code benefits) only called if required for a rebuild (rather than every time SCons runs).
I want to get the content of a file that it's storing configuration.
I'm using the following code:
repos = [f for f in gh.iter_user_repos(git_org, branch)]
modules = {}
modules_infos = {}
for a in repos:
# get the list folder in repo
if not a or not a.contents('.', branch):
continue
# get the list modules in each folder
for cont in a.contents('.', branch).iteritems():
if '.' not in cont[0] and cont[0] not in EXCEPTION_FOLDER:
modules.update({cont[1].name: cont[1].html_url})
if not cont[1] or not a.contents(cont[1].name, branch):
continue
for sub_folder in a.contents(cont[1].name, branch).iteritems():
if sub_folder[0] == 'configurations.txt':
vals = eval(base64.b64decode(sub_folder[1].content))
modules_infos.update({cont[1].name: [vals.get('name', False),
vals.get('version', False),]})
But, sub_folder[1].content was empty that I couldn't get my information.
So, did I miss anything ?
i'm trying to execute this file test.py from command line:
from brpy import init_brpy
import requests # or whatever http request lib you prefer
import MagicalImageURLGenerator # made up
# br_loc is /usr/local/lib by default,
# you may change this by passing a different path to the shared objects
br = init_brpy(br_loc='/path/to/libopenbr')
br.br_initialize_default()
br.br_set_property('algorithm','CatFaceRecognitionModel') # also made up
br.br_set_property('enrollAll','true')
mycatsimg = open('mycats.jpg', 'rb').read() # cat picture not provided =^..^=
mycatstmpl = br.br_load_img(mycatsimg, len(mycatsimg))
query = br.br_enroll_template(mycatstmpl)
nqueries = br.br_num_templates(query)
scores = []
for imurl in MagicalImageURLGenerator():
# load and enroll image from URL
img = requests.get(imurl).content
tmpl = br.br_load_img(img, len(img))
targets = br.br_enroll_template(tmpl)
ntargets = br.br_num_templates(targets)
# compare and collect scores
scoresmat = br.br_compare_template_lists(targets, query)
for r in range(ntargets):
for c in range(nqueries):
scores.append((imurl, br.br_get_matrix_output_at(scoresmat, r, c)))
# clean up - no memory leaks
br.br_free_template(tmpl)
br.br_free_template_list(targets)
# print top 10 match URLs
scores.sort(key=lambda s: s[1])
for s in scores[:10]:
print(s[0])
# clean up - no memory leaks
br.br_free_template(mycatstmpl)
br.br_free_template_list(query)
br.br_finalize()
this script file is /myfolder/ while the library brpy is in /myfolder/scripts/brpy.
The brpy folder contains 3 files: "face_cluster_viz.py" , "html_viz.py" and "init.py" .
When i try to execute this file from cmd it shows an error:
NameError; name 'init_brpy' is not defined
Why? Where am I doing wrong? Is it possible execute this script from command line?
Thanks
The problem is the following line:
br = init_brpy(br_loc='/path/to/libopenbr')
You have to set your path of the openbr library.
I was wondering if there was a faster way to implement a function that returns a case-sensitive path in python. One of the solutions I came up with works with both linux and windows, but requires that I iterate os.listdir, which can be slow.
This solution works fine for an application and context that does not need plenty of speed:
def correctPath(start, path):
'Returns a unix-type case-sensitive path, works in windows and linux'
start = unicode(start);
path = unicode(path);
b = '';
if path[-1] == '/':
path = path[:-1];
parts = path.split('\\');
d = start;
c = 0;
for p in parts:
listing = os.listdir(d);
_ = None;
for l in listing:
if p.lower() == l.lower():
if p != l:
c += 1;
d = os.path.join(d, l);
_ = os.path.join(b, l);
break;
if not _:
return None;
b = _;
return b, c; #(corrected path, number of corrections)
>>> correctPath('C:\\Windows', 'SYSTEM32\\CmD.EXe')
(u'System32\\cmd.exe', 2)
This however, will not be as fast when the context is gathering filenames from a large 50,000+ entry database.
One method would be to create a dict tree for each directory. Match the dict tree with the directory parts of the path, and if a key-miss occurs, perform an os.listdir to find and create a dict entry for the new directory and remove the unused parts or keep a variable counter as a way to assign a "lifetime" to each directory.
The following is a slight re-write of your own code with three modifications: checking if the filename is already correct before matching, processing the listing to lowercase before testing, using index to find the relevant 'true case' file.
def corrected_path(start, path):
'''Returns a unix-type case-sensitive path, works in windows and linux'''
start = unicode(start)
path = unicode(path)
corrected_path = ''
if path[-1] == '/':
path = path[:-1]
parts = path.split('\\')
cd = start
corrections_count = 0
for p in parts:
if not os.path.exists(os.path.join(cd,p)): # Check it's not correct already
listing = os.listdir(cd)
cip = p.lower()
cilisting = [l.lower() for l in listing]
if cip in cilisting:
l = listing[ cilisting.index(cip) ] # Get our real folder name
cd = os.path.join(cd, l)
corrected_path = os.path.join(corrected_path, l)
corrections_count += 1
else:
return False # Error, this path element isn't found
else:
cd = os.path.join(cd, p)
corrected_path = os.path.join(corrected_path, p)
return corrected_path, corrections_count
I'm not sure if this will be much faster, though there is a little less testing going on, plus the 'already-correct' catch at the beginning may help.
An extended version with case-insensitive caching to pull out the corrected path:
import os,re
def corrected_paths(start, pathlist):
''' This wrapper function takes a list of paths to correct vs. to allow caching '''
start = unicode(start)
pathlist = [unicode(path[:-1]) if path[-1] == '/' else unicode(path) for path in pathlist ]
# Use a dict as a cache, storing oldpath > newpath for first-pass replacement
# with path keys from incorrect to corrected paths
cache = dict()
corrected_path_list = []
corrections_count = 0
path_split = re.compile('(/+|\+)')
for path in pathlist:
cd = start
corrected_path = ''
parts = path_split.split(path)
# Pre-process against the cache
for n,p in enumerate(parts):
# We pass *parts to send through the contents of the list as a series of strings
uncorrected_path= os.path.join( cd, *parts[0:len(parts)-n] ).lower() # Walk backwards
if uncorrected_path in cache:
# Move up the basepath to the latest matched position
cd = os.path.join(cd, cache[uncorrected_path])
parts = parts[len(parts)-n:] # Retrieve the unmatched segment
break; # First hit, we exit since we're going backwards
# Fallback to walking, from the base path cd point
for n,p in enumerate(parts):
if not os.path.exists(os.path.join(cd,p)): # Check it's not correct already
#if p not in os.listdir(cd): # Alternative: The above does not work on Mac Os, returns case-insensitive path test
listing = os.listdir(cd)
cip = p.lower()
cilisting = [l.lower() for l in listing]
if cip in cilisting:
l = listing[ cilisting.index(cip) ] # Get our real folder name
# Store the path correction in the cache for next iteration
cache[ os.path.join(cd,p).lower() ] = os.path.join(cd, l)
cd = os.path.join(cd, l)
corrections_count += 1
else:
print "Error %s not in folder %s" % (cip, cilisting)
return False # Error, this path element isn't found
else:
cd = os.path.join(cd, p)
corrected_path_list.append(cd)
return corrected_path_list, corrections_count
On an example run for a set of paths, this reduces the number of listdirs considerably (this is obviously dependent on how alike your paths are):
corrected_paths('/Users/', ['mxF793/ScRiPtS/meTApaTH','mxF793/ScRiPtS/meTApaTH/metapAth/html','mxF793/ScRiPtS/meTApaTH/metapAth/html/css','mxF793/ScRiPts/PuBfig'])
([u'/Users/mxf793/Scripts/metapath', u'/Users/mxf793/Scripts/metapath/metapath/html', u'/Users/mxf793/Scripts/metapath/metapath/html/css', u'/Users/mxf793/Scripts/pubfig'], 14)
([u'/Users/mxf793/Scripts/metapath', u'/Users/mxf793/Scripts/metapath/metapath/html', u'/Users/mxf793/Scripts/metapath/metapath/html/css', u'/Users/mxf793/Scripts/pubfig'], 5)
On the way to this I realised the on Mac OSX Python returns path matches as if they are case-insensitive, so the test for existence always succeeds. In that case the listdir can be shifted up to replace it.