File Not Found Error Django - python

Hello I'm new to django and I'm trying to make a web app. I have a running back end, but the problem is it's only running on cli and I have to turn it into a web app.
def testing(request):
ksize = 6
somsize= 10
csvname="input.csv"
testcap = "testing.pcap"
pl.csv5("chap/a",testcap)
tmparr=[]
for filename in os.listdir("chap"):
if filename.endswith(".csv"):
tmparr.append(filename)
continue
else:
continue
tmparr.sort()
visual_list = natsort.natsorted(tmparr)
csv = sl.opencsv(csvname)
norm = sl.normalize(csv)
weights = sl.som(norm,somsize)
label = sl.kmeans(ksize,weights)
#for x in range (2,21):
# label = sl.kmeans(x,weights)
# print("K is", x, "Score is ", label[1])
lblarr = np.reshape(label,(somsize,somsize))
#sl.dispcolor(lblarr)
classess = sl.cluster_coloring(weights,norm,csv)
classpercluster = sl.determine_cluster(classess,lblarr,ksize)
classpercent = sl.toperc(classpercluster)
print (classpercent)
#print(classpercluster)
for x in visual_list:
temp = ("chap/"+x)
tests = sl.opencsv(temp)
print(tests)
hits = sl.som_hits(weights, tests)
name = ("img/" + x + ".png")
sl.disp(lblarr,name,hits)
return render(request,'visualization/detail.html')
The system cannot find the path specified: 'chap', I'm not sure if I should put the chap folder inside the templates folder or in the app folder. Thank you in advance!

You're doing relative paths here it looks like. Change it to an absolute path.
dirpath = os.path.dirname(os.path.abspath(__file__))
chap_dirpath = os.path.join(dirpath, chap_dirpath)

Related

Avoid existing folders and bring only folders that don't exist

I have the code below which is bringing attachments into parent_directory using api connection.
Problem: The code works great but the only problem with this code is it gets stuck when there're existing folders.
Solution: How can make this code bypass the existing folders. So if the folder exists, then don't do anything just move to the next loop.
import pandas as pd
import os
import zipfile
parent_directory = "folderpath"
csv_file_dir = "myfilepath.csv"
user = "API_username"
key = "API_password"
os.chdir(parent_directory)
bdr_data = pd.read_csv(csv_file_dir)
api_first = "… " + user + ":" + key + "…"
for index, row in bdr_data.iterrows():
#print(row['url_attachment'])
name = row['Ref_Num']
os.makedirs(parent_directory + name)
os.chdir(parent_directory + name)
url = api_first + row['url_attachment'] + " -o attachments.zip"
os.system(url)
os.chdir(parent_directory)
You can do it like this.
for index, row in bdr_data.iterrows():
name = row['Ref_Num']
child_dir = (parent_directory + name)
if os.path.exists(child_dir): # check if folder exist.
print(f'{child_dir} already exist') # you may want to know what is skipped
continue # skip iteration.
os.makedirs(child_dir) # if folder not found, do what you need.

Iterate through many directories in Python [Python3]

So I have been working on a program that needs to run through each file in a particular directory and do things depending on the file. That bit is done (code below), however I really need to expand this so that I can parse in as many directories as needed and have the program cycle through them all sequentially. My code is as follows (apologies for really clunky, bad code):
def createTemplate( self, doType = 0, bandNameL = None, bandName430 = None ):
'''
Loads the archive of each file in a directory.
Depending on the choice made on initialization, a template will be
created for one band (of the user's choosing) or all bands.
Templates are saved in self.directory (either CWD or whatever the user
wishes) as 1D numpy arrays, .npy. If arguments are not provided for the
template names (without the .npy suffix), default names will be used.
Useage of the doType parameter: 0 (default) does both bands and returns a tuple.
1 does only the L-band.
2 does only the 430-band.
'''
print( "Beginning template creation..." )
# Initialize the file names if given
nameL = str( bandNameL ) + ".npy"
name430 = str( bandName430 ) + ".npy"
# Set the templates to empty arrays
self.templateProfile430, self.templateProfileL = [], []
# Set the call counters for the creation scripts to 0
self._templateCreationScript430.__func__.counter = 0
self._templateCreationScriptL.__func__.counter = 0
# Cycle through each file in the stored directory
for file in os.listdir( self.directory ):
# Set the file to be a global variable in the class for use elsewhere
self.file = file
# Check whether the file is a fits file
if self.file.endswith( ".fits" ) or self.file.endswith( ".refold" ):
# Check if the file is a calibration file (not included in the template)
if self.file.find( 'cal' ) == -1:
# Open the fits file header
hdul = fits.open( self.directory + self.file )
# Get the frequency band used in the observation.
frequencyBand = hdul[0].header[ 'FRONTEND' ]
# Close the header once it's been used or the program becomes very slow.
hdul.close()
# Check which band the fits file belongs to
if frequencyBand == 'lbw' or frequencyBand == 'L_Band':
if doType == 0 or doType == 1:
self.templateProfileL = self._templateCreationScriptL()
# Check if a save name was provided
if bandNameL == None:
np.save( self.directory + "Lbandtemplate.npy", self.templateProfileL )
else:
np.save( self.directory + nameL, self.templateProfileL )
else:
print( "L-Band file" )
elif frequencyBand == '430':
if doType == 0 or doType == 2:
self.templateProfile430 = self._templateCreationScript430()
# Check if a save name was provided
if bandName430 == None:
np.save( self.directory + "430bandtemplate.npy", self.templateProfile430 )
else:
np.save( self.directory + name430, self.templateProfile430 )
else:
print( "430-Band file" )
else:
print( "Frontend is neither L-Band, nor 430-Band..." )
else:
print( "Skipping calibration file..." )
else:
print( "{} is not a fits file...".format( self.file ) )
# Decide what to return based on doType
if doType == 0:
print( "Template profiles created..." )
return self.templateProfileL, self.templateProfile430
elif doType == 1:
print( "L-Band template profile created..." )
return self.templateProfileL
else:
print( "430-Band template profile created..." )
return self.templateProfile430
So currently, it works perfectly for one directory but just need to know how to modify for multiple directories. Thank you anyone who can help.
EDIT: self.directory is initialised in the class initialisation, so maybe there's something that needs to be changed there instead:
class Template:
'''
Class for the creation, handling and analysis of pulsar template profiles.
Templates can be created for each frequency band of data in a folder which
can either be the current working directory or a folder of the user's
choosing.
'''
def __init__( self, directory = None ):
# Check if the directory was supplied by the user. If not, use current working directory.
if directory == None:
self.directory = str( os.getcwd() )
else:
self.directory = str( directory )
Here is how you can run your logic in different directories:
>>> import os
>>> path = './python'
>>> for name in os.listdir(path) :
... newpath = path+'/'+name
... if os.path.isdir(newpath) :
... for filename in os.listdir(newpath) :
... # do the work
... filepath = newpath + '/' + filename
... print(filepath)
...

folder gets created but no files in it

I have a digital ocean droplet which has a linux os on it. On this vm I have a flask webserver and in the __init__.py script I have these two functions:
#app.route('/ds')
def ds():
fp = "/home/ds/gabo/"
pre = 'das_'
req_value = create_vlc_command(pre, fp)
os.system(req_value)
return req_value
def create_vlc_command(pre_fn, full_path):
pre_sc_path = '--scene-path='
file_name = pre_fn + get_date_time()
vid_settings = ("--rate=1 --video-filter=scene "
"--vout=dummy --run-time=3 "
"--scene-format=png --scene-ratio=24 "
"--scene-prefix=") + file_name
scene_path = pre_sc_path + full_path
req_value = ("vlc {arg_name} {vid_settings} {scene_path} "
"{quit}").format(arg_name="htttp:examplexxx.com",
vid_settings=vid_settings, scene_path=scene_path,
quit='vlc://quit')
return req_value
When I hit ctrl + r on the droplet-ip/ds i can see the req_value and the folder gets created on the vm but I don't see any pictures in that folder. But when I run the req_value later in the terminal, the pictures get created.
The command on the page:
vlc http://exampleamplexxx.com/dsa.m3u8 --video-filter=scene --vout=dummy --run-time=3 --scene-format=png --scene-ratio=24 --scene-prefix=pi_pic_lc_2017_12_09_15_58_30 --scene-path=/home/ds/gabo/ vlc://quit
Is something wrong with the os.system or what am I doing wrong?

Calculate Each dropbox folder size recursively using python api

EDIT: I want to calculate each folder size not just entire dropbox size... My code is working fine for whole dropbox size
I am having difficulty in calculating each folder size of dropbox using python api
as dropbox returns folder size as zero
here's my code so far but it's giving me wrong answer
def main(dp_path):
a= client.metadata(dp_path)
size_local = 0
for x in a['contents']:
if x['is_dir']==False:
global size
size += int(x['bytes'])
size_local += int(x['bytes'])
#print "Total size so far :"+str(size/(1024.00*1024.00))+" Mb..."
if x['is_dir']==True:
a = main(str(x['path']))
print str(x['path'])+" size=="+str(a/(1024.00*1024.00))+" Mb..."
return size_local+size
if __name__ == '__main__':
global size
size=0
main('/')
print str(size/(1024.00*1024.00))+" Mb"
EDIT 2: It seems I misunderstood the question. Here's code that prints out the sizes of each folder (in order of decreasing size):
from dropbox.client import DropboxClient
from collections import defaultdict
client = DropboxClient('<YOUR ACCESS TOKEN>')
sizes = {}
cursor = None
while cursor is None or result['has_more']:
result = client.delta(cursor)
for path, metadata in result['entries']:
sizes[path] = metadata['bytes'] if metadata else 0
cursor = result['cursor']
foldersizes = defaultdict(lambda: 0)
for path, size in sizes.items():
segments = path.split('/')
for i in range(1, len(segments)):
folder = '/'.join(segments[:i])
if folder == '': folder = '/'
foldersizes[folder] += size
for folder in reversed(sorted(foldersizes.keys(), key=lambda x: foldersizes[x])):
print '%s: %d' % (folder, foldersizes[folder])
EDIT: I had a major bug in the second code snippet (the delta one), and I've now tested all three and found them all to report the same number.
This works:
from dropbox.client import DropboxClient
client = DropboxClient('<YOUR ACCESS TOKEN>')
def size(path):
return sum(
f['bytes'] if not f['is_dir'] else size(f['path'])
for f in client.metadata(path)['contents']
)
print size('/')
But it's much more efficient to use /delta:
sizes = {}
cursor = None
while cursor is None or result['has_more']:
result = client.delta(cursor)
for path, metadata in result['entries']:
sizes[path] = metadata['bytes'] if metadata else 0
cursor = result['cursor']
print sum(sizes.values())
And if you truly just need to know the overall usage for the account, you can just do this:
quota_info = client.account_info()['quota_info']
print quota_info['normal'] + quota_info['shared']

Google Docs Api Python Prevent collections resources to be seen in Home

I'm trying with the google docs api (python) to create collections and subcollections and to upload files in a created subcollection.
First question:
Everything is ok with the below code, the hierarchy is ok (subfolder1 under the folder1, uploaded file under the subfolder1), but the only issue is that the subfolder & the file are also seen in the Home for the end user.
I would like to see only the higher level collection in the Home.
Is there a way to prevent the resources (sub-collections & files) to be displayed in the Home ?
Note: I have tried the following alternatives but still get the same result:
1) parameter 'collection=' of create_resource has same result
2) clientlogin or twoleggedoauth have same result
Second question:
Is it possible to set the description field ?
import gdata.data
import gdata.docs.client
import gdata.acl.data
import gdata.docs.data
GAPPS_OAUTH_CONSUMER_KEY = "xxxx"
GAPPS_OAUTH_CONSUMER_SECRET = "xxxxx"
GAPPS_ADMIN_ACCOUNT = "x"
GAPPS_CLIENT_LOGIN_LOGIN='xxxxx'
GAPPS_CLIENT_LOGIN_PWD='xxxxx'
GAPPS_CLIENT_LOGIN_APP='xxxxxx'
filepath = 'C:\\Users\\xxxxx\\Pictures\\'
filename = 'xxxxxx.png'
path = filepath + filename
client = gdata.docs.client.DocsClient()
client.ssl = True
#client.ClientLogin(GAPPS_CLIENT_LOGIN_LOGIN, GAPPS_CLIENT_LOGIN_PWD, GAPPS_CLIENT_LOGIN_APP)
client.auth_token = gdata.gauth.TwoLeggedOAuthHmacToken(GAPPS_OAUTH_CONSUMER_KEY, GAPPS_OAUTH_CONSUMER_SECRET, GAPPS_ADMIN_ACCOUNT)
# create a folder
collection1 = gdata.docs.data.Resource('folder', title = 'Script Folder')
collection1 = client.create_resource(collection1)
# create a sub-folder in collection1
subcollection1 = gdata.docs.data.Resource('folder', title = 'Script Sub Folder')
subcollection1 = client.create_resource(subcollection1)
res = client.move_resource(subcollection1, collection = collection1, keep_in_collections = False)
# Upload the resource in subcollection1
doc = gdata.docs.data.Resource(type = 'file', title = filename)
media = gdata.data.MediaSource()
media.SetFileHandle(path, 'application/octet-stream')
create_uri = gdata.docs.client.RESOURCE_UPLOAD_URI + '?convert=false'
doc = client.CreateResource(doc, create_uri = create_uri, media = media)
print 'Created, and uploaded:', doc.title.text, doc.resource_id.text
client.move_resource(doc, collection = subcollection1, keep_in_collections = False)
Here is the solution:
subcollection1 = gdata.docs.data.Resource('folder', title = 'Script Sub Folder')
subcollection1.AddCategory(gdata.docs.data.LABELS_NS, gdata.docs.data.LABELS_NS + "#" +gdata.docs.data.HIDDEN_LABEL, gdata.docs.data.HIDDEN_LABEL)
subcollection1 = client.create_resource(subcollection1)
There is another much simpler approach.
subcollection1 = client.create_resource(subcollection1,collection=collection1)
The script sub folder never appears on your root folder by this approach.

Categories

Resources