I'm working on a program that searches all the files in my database and groups them based on what number is in the file name (from 001 to 100).
The only problem is that python interprets '001' as '1', but '001' is the exact number in the file name, and since I'm using regular expressions to search it doesn't recognize the numbers the way I want it to.
Would really appreciate some help! Here's my code so far:
import sys
import os
import re
import glob
time_data = open("time_data.txt", "w")
space_data = open("space_data.txt", "w")
folder_list = ['/Users/fenyolab/Downloads/root images/pet week img seq - removed 621 and after - ch1 registered', 'C:/Users/fenyolab/Downloads/root images/0329 to 033116 - WERSCR regen - STELLAR - registered', 'C:/Users/fenyolab/Downloads/root images/0406 to 040816 - H2BIAAWOX regen - GOOD - pt II - REGISTERED']
def stack_at_time_point(direc, time_point):
time_list = []
for x in glob.glob('%s/*' % direc):
if re.search("t.*%s_z" % time_point, x) != None and re.search('_c1.*', x) != None:
time_list.append(x)
for i in time_list:
time_data.write("%s\n" % i)
def stack_at_zlocation(direc, location):
location_list = []
for x in glob.glob('%s/*' % direc):
if re.search("_z.*%s_." % location, x) != None and re.search('_c1.*', x) != None:
location_list.append(x)
for i in location_list:
space_data.write("%s\n" % i)
for i in folder_list:
for x in xrange(100):
stack_at_zlocation(i, x)
space_data.write("\n\n\n")
stack_at_time_point(i, x)
time_data.write("\n\n\n")
space_data.close()
time_data.close()
print "Done."
The regex "_z.*%s_." % location would match to _z023_ if the specified location is 23, but if the specified location is 1, the program will return _z001_, _z011_, _z021_, _z031_, _z041_ ... _z091_.
You use printf-style string formatting. Using printf-style formatting, you may specify leading zereos and a field width.
Replace your %s with %03d:
re.search("t.*%03d_z" % time_point, x)
and
re.search("_z.*%03d_." % location, x)
Suggest that you build an index based on a flexible length number portion and use this to lookup the appropriate file. E.g.:
>>> import re
>>>
>>> locations = ['_z{0:03d}_'.format(x) for x in range(1,101)]
>>>
>>> def create_zindex(names):
... reg = re.compile('_z(\d+)_')
... result = {}
... for name in names:
... m = reg.search(name)
... if not m:
... print "Can't find z index in {0!r}".format(name)
... continue
... zindex = int(m.groups()[0])
... if zindex in result:
... print "Duplicate z-index {0} - {1}".format(name,result[zindex])
... continue
... result[zindex] = name
... return result
...
>>> print locations
['_z001_', '_z002_', '_z003_', '_z004_', '_z005_', '_z006_', '_z007_', '_z008_', '_z009_', '_z010_', '_z011_', '_z012_', '_z013_', '_z014_', '_z015_', '_z016_', '_z017_', '_z018_', '_z019_', '_z020_', '_z021_', '_z022_', '_z023_', '_z024_', '_z025_', '_z026_', '_z027_', '_z028_', '_z029_', '_z030_', '_z031_', '_z032_', '_z033_', '_z034_', '_z035_', '_z036_', '_z037_', '_z038_', '_z039_', '_z040_', '_z041_', '_z042_', '_z043_', '_z044_', '_z045_', '_z046_', '_z047_', '_z048_', '_z049_', '_z050_', '_z051_', '_z052_', '_z053_', '_z054_', '_z055_', '_z056_', '_z057_', '_z058_', '_z059_', '_z060_', '_z061_', '_z062_', '_z063_', '_z064_', '_z065_', '_z066_', '_z067_', '_z068_', '_z069_', '_z070_', '_z071_', '_z072_', '_z073_', '_z074_', '_z075_', '_z076_', '_z077_', '_z078_', '_z079_', '_z080_', '_z081_', '_z082_', '_z083_', '_z084_', '_z085_', '_z086_', '_z087_', '_z088_', '_z089_', '_z090_', '_z091_', '_z092_', '_z093_', '_z094_', '_z095_', '_z096_', '_z097_', '_z098_', '_z099_', '_z100_']
>>> print create_zindex(locations)
{1: '_z001_', 2: '_z002_', 3: '_z003_', 4: '_z004_', 5: '_z005_', 6: '_z006_', 7: '_z007_', 8: '_z008_', 9: '_z009_', 10: '_z010_', 11: '_z011_', 12: '_z012_', 13: '_z013_', 14: '_z014_', 15: '_z015_', 16: '_z016_', 17: '_z017_', 18: '_z018_', 19: '_z019_', 20: '_z020_', 21: '_z021_', 22: '_z022_', 23: '_z023_', 24: '_z024_', 25: '_z025_', 26: '_z026_', 27: '_z027_', 28: '_z028_', 29: '_z029_', 30: '_z030_', 31: '_z031_', 32: '_z032_', 33: '_z033_', 34: '_z034_', 35: '_z035_', 36: '_z036_', 37: '_z037_', 38: '_z038_', 39: '_z039_', 40: '_z040_', 41: '_z041_', 42: '_z042_', 43: '_z043_', 44: '_z044_', 45: '_z045_', 46: '_z046_', 47: '_z047_', 48: '_z048_', 49: '_z049_', 50: '_z050_', 51: '_z051_', 52: '_z052_', 53: '_z053_', 54: '_z054_', 55: '_z055_', 56: '_z056_', 57: '_z057_', 58: '_z058_', 59: '_z059_', 60: '_z060_', 61: '_z061_', 62: '_z062_', 63: '_z063_', 64: '_z064_', 65: '_z065_', 66: '_z066_', 67: '_z067_', 68: '_z068_', 69: '_z069_', 70: '_z070_', 71: '_z071_', 72: '_z072_', 73: '_z073_', 74: '_z074_', 75: '_z075_', 76: '_z076_', 77: '_z077_', 78: '_z078_', 79: '_z079_', 80: '_z080_', 81: '_z081_', 82: '_z082_', 83: '_z083_', 84: '_z084_', 85: '_z085_', 86: '_z086_', 87: '_z087_', 88: '_z088_', 89: '_z089_', 90: '_z090_', 91: '_z091_', 92: '_z092_', 93: '_z093_', 94: '_z094_', 95: '_z095_', 96: '_z096_', 97: '_z097_', 98: '_z098_', 99: '_z099_', 100: '_z100_'}
Related
I'm trying to change the color of one of the actors. Seems like the method SetColor(colors.GetColor3d("Silver")) is not working.
I'm also tried with b.GetMapper().ScalarVisibilityOff() but doesn´t work.
import vtk
colors = vtk.vtkNamedColors()
def crear_superficie(*puntos):
points = vtk.vtkPoints()
polygon = vtk.vtkPolygon()
polygon.GetPointIds().SetNumberOfIds(len(puntos))
for i, p in enumerate(puntos):
points.InsertNextPoint(*p)
polygon.GetPointIds().SetId(i, i)
polygons = vtk.vtkCellArray()
polygons.InsertNextCell(polygon)
# Create a PolyData
polygonPolyData = vtk.vtkPolyData()
polygonPolyData.SetPoints(points)
polygonPolyData.SetPolys(polygons)
# Create a mapper and actor
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(polygonPolyData)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().EdgeVisibilityOn()
actor.GetProperty().SetLineWidth(2)
actor.GetProperty().SetColor(colors.GetColor3d("Banana"))
return actor
def main():
a = crear_superficie((0, 0, 0), (0, 6, 0), (15, 10, 0), (30, 6, 0), (30, 0, 0))
b = crear_superficie((0, 6, 0), (15, 10, 0), (15, 10, 60), (0, 6, 60))
# Add the polygon to a list of polygons
# Visualize
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetWindowName("Polygon")
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderer.AddActor(a)
renderer.AddActor(b)
renderer.SetBackground(colors.GetColor3d("Silver"))
renderWindow.Render()
renderWindowInteractor.Start()
b.GetProperty().SetColor(colors.GetColor3d("Silver"))
if __name__ == '__main__':
main()
EDIT:
After call Render like Nico suggested I get this Warning:
2020-06-01 12:45:40.413 ( 5.598s) [ ] vtkOpenGLState.cxx:1380 WARN| Hardware does not support the number of textures defined.2020-06-01 12:45:40.467 ( 5.652s) [ ] vtkOpenGLState.cxx:1380 WARN| Hardware does not support the number of textures defined.2020-06-01 12:45:40.484 ( 5.669s) [ ] vtkShaderProgram.cxx:437 ERR| vtkShaderProgram (000002380CB1D770): 1: #version 150
2: #ifndef GL_ES
3: #define highp
4: #define mediump
5: #define lowp
6: #endif // GL_ES
7: #define attribute in
8: #define varying out
9:
10:
11: /*=========================================================================
12:
13: Program: Visualization Toolkit
14: Module: vtkPolyDataVS.glsl
15:
16: Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
17: All rights reserved.
18: See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
19:
20: This software is distributed WITHOUT ANY WARRANTY; without even
21: the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
22: PURPOSE. See the above copyright notice for more information.
23:
24: =========================================================================*/
25:
26: in vec4 vertexMC;
27:
28:
29:
30: // frag position in VC
31: out vec4 vertexVCVSOutput;
32:
33: // optional normal declaration
34: //VTK::Normal::Dec
35:
36: // extra lighting parameters
37: //VTK::Light::Dec
38:
39: // Texture coordinates
40: //VTK::TCoord::Dec
41:
42: // material property values
43: //VTK::Color::Dec
44:
45: // clipping plane vars
46: //VTK::Clip::Dec
47:
48: // camera and actor matrix values
49: uniform mat4 MCDCMatrix;
50: uniform mat4 MCVCMatrix;
51:
52: // Apple Bug
53: //VTK::PrimID::Dec
54:
55: // Value raster
56: //VTK::ValuePass::Dec
57:
58: // picking support
59: //VTK::Picking::Dec
60:
61: void main()
62: {
63: //VTK::Color::Impl
64:
65: //VTK::Normal::Impl
66:
67: //VTK::TCoord::Impl
68:
69: //VTK::Clip::Impl
70:
71: //VTK::PrimID::Impl
72:
73: vertexVCVSOutput = MCVCMatrix * vertexMC;
74: gl_Position = MCDCMatrix * vertexMC;
75:
76:
77: //VTK::ValuePass::Impl
78:
79: //VTK::Light::Impl
80:
81: //VTK::Picking::Impl
82: }
2020-06-01 12:45:41.157 ( 6.341s) [ ] vtkShaderProgram.cxx:438 ERR| vtkShaderProgram (000002380CB1D770): Could not create shader object.
VTK works on a lazy evaluation mode to preserve performance. You should manually ask for rendering after modifications if you want to see them.
So add a renderWindow.Render() after the SetColor(colors.GetColor3d("Silver")) line
What is the best way to create n groups of n size that have a similar total file size given a DataFrame with a column for files and file sizes? While searching, this problem sounded similar to the knapsack problem, except there are no hard stops. Any solution that is quick and produces groups of any length that are close to average (be it under or over) would be a great improvement.
My first attempt (t1) creates groups by counting in a circular order. Next attempt (t2), sorts the DataFrame by size in hopes of preventing one group from getting a clump of large files, but the approach is basically the same as t1. There are usually ~300 files total. In this context, I was not sure if it is practical to calculate all possible combinations or if there is a better approach.
from itertools import repeat, chain
from math import ceil
import pandas as pd
source_dict = {'file_name_': {0: 'file_0', 1: 'file_1', 2: 'file_2', 3: 'file_3', 4: 'file_4', 5: 'file_5', 6: 'file_6', 7: 'file_7', 8: 'file_8', 9: 'file_9'
, 10: 'file_10', 11: 'file_11', 12: 'file_12', 13: 'file_13', 14: 'file_14', 15: 'file_15', 16: 'file_16', 17: 'file_17', 18: 'file_18'
, 19: 'file_19', 20: 'file_20', 21: 'file_21', 22: 'file_22', 23: 'file_23', 24: 'file_24', 25: 'file_25', 26: 'file_26', 27: 'file_27'
, 28: 'file_28', 29: 'file_29', 30: 'file_30', 31: 'file_31', 32: 'file_32', 33: 'file_33', 34: 'file_34', 35: 'file_35', 36: 'file_36'
, 37: 'file_37', 38: 'file_38', 39: 'file_39', 40: 'file_40', 41: 'file_41', 42: 'file_42', 44: 'file_44', 45: 'file_45', 46: 'file_46'
, 47: 'file_47', 48: 'file_48', 49: 'file_49', 50: 'file_50'}
, 'file_size': {0: 3407245, 1: 3973920, 2: 7408640, 3: 4086426, 4: 12795600, 5: 2155039, 6: 9514856, 7: 13190235, 8: 32043703, 9: 4936240, 10: 9591964
, 11: 70153435, 12: 5106282, 13: 212414, 14: 24998146, 15: 11605646, 16: 2427516, 17: 23634036, 18: 169983, 19: 7011305, 20: 2106828
, 21: 3420304, 22: 11254, 23: 1271220, 24: 1164562, 25: 83613105, 26: 1030701, 27: 366948, 28: 7014895, 29: 8274642, 30: 2731629
, 31: 1596299, 32: 524, 33: 302, 34: 42332100, 35: 5441036, 36: 40633457, 37: 34680208, 38: 123505, 39: 15905009, 40: 52071678
, 41: 10624966, 42: 15425993, 44: 27673986, 45: 144988223, 46: 62619919, 47: 21562386, 48: 10620299, 49: 254661, 50: 232406680}}
sampleSizesDF = pd.DataFrame(source_dict)
desired_groups = 4 # multiprocessing.cpu_count()
group_size = ceil(sampleSizesDF.file_name_.count() / desired_groups)
max_length = sampleSizesDF.file_name_.count() # upper bound for list
# trial 1, count off and group
my_groups = list(chain(*repeat(list(range(0,desired_groups)), group_size)))[:max_length]
sampleSizesDF['pGroup_t1'] = my_groups
# trial 2, sort + trial 1
sampleSizesDF.sort_values('file_size', inplace = True)
sampleSizesDF['pGroup_t2'] = my_groups
pGroupDistDF = pd.concat([
sampleSizesDF.groupby('pGroup_t1').agg({'file_size': 'sum'})
, sampleSizesDF.groupby('pGroup_t2').agg({'file_size': 'sum'})
]
, axis=1)
pGroupDistDF.columns = ['t1', 't2']
pGroupDistDF = pGroupDistDF.merge(pd.DataFrame(pGroupDistDF.values, columns=['t1_dist', 't2_dist']).apply(lambda x: x/x.sum()), left_index=True, right_index=True)
presentation_order = ['t1', 't1_dist', 't2', 't2_dist']
pGroupDistDF[presentation_order]
t1 t1_dist t2 t2_dist
0 304015174 0.281916 291719748 0.270514
1 470551775 0.436347 396619142 0.367788
2 134901157 0.125095 183490246 0.170152
3 168921844 0.156643 206560814 0.191546
The following code plots an interactive figure where I can toggle specific lines on/off. This works perfectly when I'm working in an Ipython Notebook
import pandas as pd
import numpy as np
from itertools import cycle
import matplotlib.pyplot as plt, mpld3
from matplotlib.widgets import CheckButtons
import matplotlib.patches
import seaborn as sns
%matplotlib nbagg
sns.set(style="whitegrid")
df = pd.DataFrame({'freq': {0: 0.01, 1: 0.02, 2: 0.029999999999999999, 3: 0.040000000000000001, 4: 0.050000000000000003, 5: 0.059999999999999998, 6: 0.070000000000000007, 7: 0.080000000000000002, 8: 0.089999999999999997, 9: 0.10000000000000001, 10: 0.01, 11: 0.02, 12: 0.029999999999999999, 13: 0.040000000000000001, 14: 0.050000000000000003, 15: 0.059999999999999998, 16: 0.070000000000000007, 17: 0.080000000000000002, 18: 0.089999999999999997, 19: 0.10000000000000001, 20: 0.01, 21: 0.02, 22: 0.029999999999999999, 23: 0.040000000000000001, 24: 0.050000000000000003, 25: 0.059999999999999998, 26: 0.070000000000000007, 27: 0.080000000000000002, 28: 0.089999999999999997, 29: 0.10000000000000001}, 'kit': {0: 'B', 1: 'B', 2: 'B', 3: 'B', 4: 'B', 5: 'B', 6: 'B', 7: 'B', 8: 'B', 9: 'B', 10: 'A', 11: 'A', 12: 'A', 13: 'A', 14: 'A', 15: 'A', 16: 'A', 17: 'A', 18: 'A', 19: 'A', 20: 'C', 21: 'C', 22: 'C', 23: 'C', 24: 'C', 25: 'C', 26: 'C', 27: 'C', 28: 'C', 29: 'C'}, 'SNS': {0: 91.198979591799997, 1: 90.263605442199989, 2: 88.818027210899999, 3: 85.671768707499993, 4: 76.23299319729999, 5: 61.0969387755, 6: 45.1530612245, 7: 36.267006802700003, 8: 33.0782312925, 9: 30.739795918400002, 10: 90.646258503400006, 11: 90.306122449, 12: 90.178571428600009, 13: 89.498299319699996, 14: 88.435374149599994, 15: 83.588435374200003, 16: 75.212585034, 17: 60.969387755100001, 18: 47.278911564600001, 19: 37.627551020399999, 20: 90.986394557800011, 21: 90.136054421799997, 22: 89.540816326499993, 23: 88.690476190499993, 24: 86.479591836799997, 25: 82.397959183699996, 26: 73.809523809499993, 27: 63.180272108800004, 28: 50.935374149700003, 29: 41.241496598699996}, 'FPR': {0: 1.0953616823100001, 1: 0.24489252678500001, 2: 0.15106142277199999, 3: 0.104478605177, 4: 0.089172822253300005, 5: 0.079856258734300009, 6: 0.065881413455800009, 7: 0.059892194050699996, 8: 0.059892194050699996, 9: 0.0578957875824, 10: 0.94097291541899997, 11: 0.208291741532, 12: 0.14773407865800001, 13: 0.107805949291, 14: 0.093165635189999998, 15: 0.082518134025399995, 16: 0.074532508152000007, 17: 0.065881413455800009, 18: 0.062554069341799995, 19: 0.061888600519100001, 20: 0.85313103081100006, 21: 0.18899314567100001, 22: 0.14107939043000001, 23: 0.110467824582, 24: 0.099820323417899995, 25: 0.085180009316599997, 26: 0.078525321088700001, 27: 0.073201570506399985, 28: 0.071870632860800004, 29: 0.0705396952153}})
tableau20 = ["#6C6C6C", "#92D050", "#FFC000"]
tableau20 = cycle(tableau20)
kits = ["A","B", "C"]
color = iter(["#6C6C6C", "#92D050", "#FFC000"])
fig = plt.figure(figsize=(12,8))
for kit in kits:
colour = next(color)
for i in df.groupby('kit'):
grouped_df = pd.DataFrame(np.array(i[1]), columns =
['freq', 'SNS', 'FPR', 'kit'])
if grouped_df.kit.tolist()[1] == kit:
x = [float(value) for i, value in enumerate(grouped_df.FPR)]
y = [float(value) for i, value in enumerate(grouped_df.SNS)]
x, y = (list(x) for x in zip(*sorted(zip(x, y))))
label = grouped_df['kit'].tolist()[1]
p = plt.plot(x, y, "-o",label = label, color = colour)
labels = [label.get_text() for label in plt.legend().texts]
plt.legend().set_visible(False)
for i, value in enumerate(labels):
exec('label%s="%s"'%(i, value))
for i in range(len(labels)):
exec('l%s=fig.axes[0].lines[i]'%(i))
rax = plt.axes([0.92, 0.7, 0.2, 0.2], frameon=False)
check = CheckButtons(rax, (labels), ('True ' * len(labels)))
for i, rec in enumerate(check.rectangles):
rec.set_facecolor(tableau20.next())
def func(label):
for i in range(len(labels)):
if label == eval('label%s'%(i)): eval('l%s.set_visible(not l%s.get_visible())'%(i,i))
plt.draw()
check.on_clicked(func)
plt.show()
Problem is, I need to export the notebook as a html to share with colleagues who know nothing about python. How can I export the notebook to html and get it to maintain the interactive (toggle) functionality (which it currently loses)? Thanks!
Maybe you don't need to export jupyter notebook to html, but share the notebook link to the other people and they can visit the url using their browser.
A jupyter notebook plugin would help you do this more efficiently: jupyter/dashboards, it's maintained by official jupyter team, and it helps you share your notebook like a report, and you can control which cell to display and the location of each cell displayed. Worth a try!
I have a program that has a lot of stuff in it. (Photos, text files and other things). I would like to be able to organize all those files by putting the files in a separate folder. I don't know how to do this. So my question is, how can I move the files but have Python still be able to find them. I have not been able to find other answers relating to this. Thanks. If you need to view some of the code, here it is:
If I want to open my text files, I use this.
text_file = open("takingInventory.txt", "r")
takingInventoryInfo = text_file.read()
text_file.close()
When I want to open a picture, I use this.
image = Image.open("pgraniteRecipe.png")
If I understand your question right. I think the os module can do what you need. Just declare the path of the new folder you just created then join the path of the folder with the name of the file.
import os
new_folder = "Path/to/new/folder"
text_file = open(os.path.join(new_folder,"takingInventory.txt"), "r")
takingInventoryInfo = text_file.read()
text_file.close()
All you need is to supply the full path to the file:
with open("full/path/to/takingInventory.txt") as text_file:
....
If you are creating a python application for distribution you can create a package structure that enables python to find any file relative to the folders in your package but if a user moves or renames a file or folder then you are in trouble. Python has no way of knowing if a user has moved or renamed any files.
You can use a try/except to open the files and do whatever is needed in the case where the file is not available anymore:
try:
f = open("missing.file")
except IOError as e:
print(e.errno)
# do whatever is relevant to warn the user here
There are specific error codes for different IOErrors, you can be more specific as to what action you take based on the errno returned:
import errno
print(errno.errorcode)
{1: 'EPERM', 2: 'ENOENT', 3: 'ESRCH', 4: 'EINTR', 5: 'EIO', 6: 'ENXIO', 7: 'E2BIG', 8: 'ENOEXEC', 9: 'EBADF', 10: 'ECHILD', 11: 'EAGAIN', 12: 'ENOMEM', 13: 'EACCES', 14: 'EFAULT', 15: 'ENOTBLK', 16: 'EBUSY', 17: 'EEXIST', 18: 'EXDEV', 19: 'ENODEV', 20: 'ENOTDIR', 21: 'EISDIR', 22: 'EINVAL', 23: 'ENFILE', 24: 'EMFILE', 25: 'ENOTTY', 26: 'ETXTBSY', 27: 'EFBIG', 28: 'ENOSPC', 29: 'ESPIPE', 30: 'EROFS', 31: 'EMLINK', 32: 'EPIPE', 33: 'EDOM', 34: 'ERANGE', 35: 'EDEADLOCK', 36: 'ENAMETOOLONG', 37: 'ENOLCK', 38: 'ENOSYS', 39: 'ENOTEMPTY', 40: 'ELOOP', 42: 'ENOMSG', 43: 'EIDRM', 44: 'ECHRNG', 45: 'EL2NSYNC', 46: 'EL3HLT', 47: 'EL3RST', 48: 'ELNRNG', 49: 'EUNATCH', 50: 'ENOCSI', 51: 'EL2HLT', 52: 'EBADE', 53: 'EBADR', 54: 'EXFULL', 55: 'ENOANO', 56: 'EBADRQC', 57: 'EBADSLT', 59: 'EBFONT', 60: 'ENOSTR', 61: 'ENODATA', 62: 'ETIME', 63: 'ENOSR', 64: 'ENONET', 65: 'ENOPKG', 66: 'EREMOTE', 67: 'ENOLINK', 68: 'EADV', 69: 'ESRMNT', 70: 'ECOMM', 71: 'EPROTO', 72: 'EMULTIHOP', 73: 'EDOTDOT', 74: 'EBADMSG', 75: 'EOVERFLOW', 76: 'ENOTUNIQ', 77: 'EBADFD', 78: 'EREMCHG', 79: 'ELIBACC', 80: 'ELIBBAD', 81: 'ELIBSCN', 82: 'ELIBMAX', 83: 'ELIBEXEC', 84: 'EILSEQ', 85: 'ERESTART', 86: 'ESTRPIPE', 87: 'EUSERS', 88: 'ENOTSOCK', 89: 'EDESTADDRREQ', 90: 'EMSGSIZE', 91: 'EPROTOTYPE', 92: 'ENOPROTOOPT', 93: 'EPROTONOSUPPORT', 94: 'ESOCKTNOSUPPORT', 95: 'ENOTSUP', 96: 'EPFNOSUPPORT', 97: 'EAFNOSUPPORT', 98: 'EADDRINUSE', 99: 'EADDRNOTAVAIL', 100: 'ENETDOWN', 101: 'ENETUNREACH', 102: 'ENETRESET', 103: 'ECONNABORTED', 104: 'ECONNRESET', 105: 'ENOBUFS', 106: 'EISCONN', 107: 'ENOTCONN', 108: 'ESHUTDOWN', 109: 'ETOOMANYREFS', 110: 'ETIMEDOUT', 111: 'ECONNREFUSED', 112: 'EHOSTDOWN', 113: 'EHOSTUNREACH', 114: 'EALREADY', 115: 'EINPROGRESS', 116: 'ESTALE', 117: 'EUCLEAN', 118: 'ENOTNAM', 119: 'ENAVAIL', 120: 'EISNAM', 121: 'EREMOTEIO', 122: 'EDQUOT', 123: 'ENOMEDIUM', 124: 'EMEDIUMTYPE', 125: 'ECANCELED', 126: 'ENOKEY', 127: 'EKEYEXPIRED', 128: 'EKEYREVOKED', 129: 'EKEYREJECTED', 130: 'EOWNERDEAD', 131: 'ENOTRECOVERABLE', 132: 'ERFKILL'}
Bottom line is even after creating a good package structure there is no magic way to know if a user deletes, moves, renames etc.. any of your package files.
If you are writing a python package to be distributed, and have a file structure like so:
base/
__init__.py
apples.py
bananas.py
cherries.py
main.py
pictures/
# a whole bunch of pictures
in your __init__.py file, you can have a variable listed called base_directory, like so:
base_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)))
When run, base_dir will be the entire path structure that __init__.py is in. For example, on my computer, if the init file were located on some random folder on my desktop, I can import base_dir into my other scripts and it would read:
>>> base_dir
'/users/zinedine/desktop/python/base'
I can then use os.path.join as needed like this (if going to the pictures folder):
pictures_dir = os.path.join(base_dir, "pictures")
>>> pictures_dir
'/users/zinedine/desktop/python/base/pictures'
If you know the folder of where it is in, and you are concerned about whether some files are missing, you can use the glob module.
For a specific Exception type (let's say for IOError), how can i extract the complete list of Errnos and descriptions like this:
Errno 2: No such file or directory
Errno 122: Disk quota exceeded
...
Since the error codes are different by platform, and the language of the user may be different, it is usually best to print the exception in the normal fashion.
However, if you really want the list:
edit:
for python2:
import os
import errno
print {i:os.strerror(i) for i in sorted(errno.errorcode)}
for python3:
import os
import errno
from pprint import pprint
pprint( {i:os.strerror(i) for i in sorted(errno.errorcode)} )
Prints (on OS X):
{1: 'Operation not permitted', 2: 'No such file or directory',
3: 'No such process', 4: 'Interrupted system call',
5: 'Input/output error', 6: 'Device not configured',
7: 'Argument list too long', 8: 'Exec format error',
9: 'Bad file descriptor', 10: 'No child processes',
11: 'Resource deadlock avoided', 12: 'Cannot allocate memory',
13: 'Permission denied', 14: 'Bad address', 15: 'Block device required',
16: 'Resource busy', 17: 'File exists', 18: 'Cross-device link',
19: 'Operation not supported by device', 20: 'Not a directory',
21: 'Is a directory', 22: 'Invalid argument',
23: 'Too many open files in system', 24: 'Too many open files',
25: 'Inappropriate ioctl for device', 26: 'Text file busy',
27: 'File too large', 28: 'No space left on device', 29: 'Illegal seek',
30: 'Read-only file system', 31: 'Too many links', 32: 'Broken pipe',
33: 'Numerical argument out of domain', 34: 'Result too large',
35: 'Resource temporarily unavailable', 36: 'Operation now in progress',
37: 'Operation already in progress', 38: 'Socket operation on non-socket',
39: 'Destination address required', 40: 'Message too long',
41: 'Protocol wrong type for socket', 42: 'Protocol not available',
43: 'Protocol not supported', 44: 'Socket type not supported',
46: 'Protocol family not supported',
47: 'Address family not supported by protocol family',
48: 'Address already in use', 49: "Can't assign requested address",
50: 'Network is down', 51: 'Network is unreachable',
52: 'Network dropped connection on reset',
53: 'Software caused connection abort', 54: 'Connection reset by peer',
55: 'No buffer space available', 56: 'Socket is already connected',
57: 'Socket is not connected', 58: "Can't send after socket shutdown",
59: "Too many references: can't splice", 60: 'Operation timed out',
61: 'Connection refused', 62: 'Too many levels of symbolic links',
63: 'File name too long', 64: 'Host is down', 65: 'No route to host',
66: 'Directory not empty', 68: 'Too many users',
69: 'Disc quota exceeded', 70: 'Stale NFS file handle',
71: 'Too many levels of remote in path', 77: 'No locks available',
78: 'Function not implemented',
84: 'Value too large to be stored in data type', 90: 'Identifier removed',
91: 'No message of desired type', 92: 'Illegal byte sequence',
94: 'Bad message', 95: 'EMULTIHOP (Reserved)',
96: 'No message available on STREAM', 97: 'ENOLINK (Reserved)',
98: 'No STREAM resources', 99: 'Not a STREAM', 100: 'Protocol error',
101: 'STREAM ioctl timeout', 102: 'Operation not supported on socket'}
I fear those come straight from the standard C library, so you'll have to look it up in your system documentation. (GLibC, Microsoft, UNIX…)
As others said, you should check <errno.h> of your system.
If you want to do it in python:
import errno
print errno.errorcode
output would be
{1: 'EPERM', 2: 'ENOENT', 3: 'ESRCH', 4: 'EINTR', 5: 'EIO', 6: 'ENXIO', 7: 'E2BIG', 8: 'ENOEXEC', 9: 'EBADF', 10: 'ECHILD', 11: 'EDEADLK', 12: 'ENOMEM', 13: 'EACCES', 14: 'EFAULT', 15: 'ENOTBLK', 16: 'EBUSY', 17: 'EEXIST', 18: 'EXDEV', 19: 'ENODEV', 20: 'ENOTDIR', 21: 'EISDIR', 22: 'EINVAL', 23: 'ENFILE', 24: 'EMFILE', 25: 'ENOTTY', 26: 'ETXTBSY', 27: 'EFBIG', 28: 'ENOSPC', 29: 'ESPIPE', 30: 'EROFS', 31: 'EMLINK', 32: 'EPIPE', 33: 'EDOM', 34: 'ERANGE', 35: 'EAGAIN', 36: 'EINPROGRESS', 37: 'EALREADY', 38: 'ENOTSOCK', 39: 'EDESTADDRREQ', 40: 'EMSGSIZE', 41: 'EPROTOTYPE', 42: 'ENOPROTOOPT', 43: 'EPROTONOSUPPORT', 44: 'ESOCKTNOSUPPORT', 46: 'EPFNOSUPPORT', 47: 'EAFNOSUPPORT', 48: 'EADDRINUSE', 49: 'EADDRNOTAVAIL', 50: 'ENETDOWN', 51: 'ENETUNREACH', 52: 'ENETRESET', 53: 'ECONNABORTED', 54: 'ECONNRESET', 55: 'ENOBUFS', 56: 'EISCONN', 57: 'ENOTCONN', 58: 'ESHUTDOWN', 59: 'ETOOMANYREFS', 60: 'ETIMEDOUT', 61: 'ECONNREFUSED', 62: 'ELOOP', 63: 'ENAMETOOLONG', 64: 'EHOSTDOWN', 65: 'EHOSTUNREACH', 66: 'ENOTEMPTY', 68: 'EUSERS', 69: 'EDQUOT', 70: 'ESTALE', 71: 'EREMOTE', 77: 'ENOLCK', 78: 'ENOSYS', 84: 'EOVERFLOW', 90: 'EIDRM', 91: 'ENOMSG', 92: 'EILSEQ', 94: 'EBADMSG', 95: 'EMULTIHOP', 96: 'ENODATA', 97: 'ENOLINK', 98: 'ENOSR', 99: 'ENOSTR', 100: 'EPROTO', 101: 'ETIME', 102: 'EOPNOTSUPP'}
look for errno.h on your system.
Thanks #the-wolf: I couldn't remember where the constants came from (os.strerror) and decided to place this here for future reference.
def error_codes():
from os import strerror as SE
from errno import errorcode as EC
for k in EC.keys():
print("error: [Errno %i] %s [%s]"%(k,SE(k),EC[k]))
#print("{%3i: %s}"%(k,EC[k]))
del SE, EC
try: del k
except: pass
# Just print it
error_codes()