I am running a simple python webserver [SimpleHTTPServer] in my Linux . Wrote a python program to download all the files hosted in that server to my Windows Machine . But for some reason program is throwing FileNotFoundError even though Directory exists and I've provided Absolute Path .
Here is the code : https://drive.google.com/file/d/1CDrueDJcbu2z1XeeB_iYv0zmfIX1cCkx/view?usp=sharing
It's working correctly in Linux but trouble with Windows . Thanks
import requests
import argparse
from sys import argv
from urllib.parse import unquote
import os
from time import time
import random
from colorama import Fore, Style
import platform
def formatFiles(name):
name = name[13:-9]
nameLen = len(name) - 2
nameLen = int(nameLen/2)
name = name[:nameLen]
return name
# Creating a Temporary Folder to Download all the files in it
def fileCreate(saveFolder):
random.seed(int(time()))
text = ""
for x in range(5):
y = random.randrange(65,91)
text += chr(y)
saveFolder += text
os.popen("mkdir {}".format(saveFolder))
print("Temp Directory {} created to save files/folders".format(text))
return saveFolder
def winDows(endPoint, banner):
resp = requests.get(endPoint, allow_redirects=True)
resp = resp.text.split("\n")
resp = list(map(unquote, resp[10:-5])) #URL decoding using unquote
resp = list(map(formatFiles,resp))
for dir in resp:
tempPath = ""
tempEndpoint = endPoint[len(serverURL):] # Getting directory structure by removing IP:PORT in URL
tempPath = "\\".join(tempEndpoint.split("/")) # Removing / and adding \\ for Windows path
print(banner + dir)
tdir = dir
if(dir[-1] == "/"):
if( dir.split(" ")!=1 ): # If the directory name has spaces ,
tdir = dir[:-1]
tdir = "\""+tdir+"\""+"\\"
os.popen("mkdir "+saveFolder+"\\"+tempPath+tdir)
r = winDows(endPoint+dir, banner[:-4]+" |___")
else:
data = open(saveFolder+"\\"+tempPath+dir, "wb")
fileData = requests.get(endPoint+dir, allow_redirects=True)
data.write(fileData.content)
data.close()
return 1
parser = argparse.ArgumentParser()
sideBanner = " |___ "
parser.add_argument("ip", help = "IP address of FTP Server", type=ip_valid)
parser.add_argument("port" , help = "FTP Server Port you want to access", type=port_valid)
parser.add_argument("dst", help="Destination Path to save your files")
args = parser.parse_args()
ip = argv[1]
port = argv[2]
saveFolder = argv[3]
serverURL = "http://"+ip+":"+port+"/"
saveFolder = fileCreate(saveFolder)
print("Destination Folder - {}".format(saveFolder))
if(platform.system() == "Linux"):
linuX(serverURL, sideBanner)
else:
winDows(serverURL, sideBanner)
Related
I have a problem with python input.
Im creating a python sneakers bot, I have a cli setup that when opens it shows you the amount of .txt files that are in the directory and then an input asking you to choose which ones you wanna use to start your task. [1]
i implemented watchdogs that look into my directory to see if file are added or modified, when files get modified watchdogs script refreshes the cli but the input the user was asked still active. I need to stop the input [1] after the screen get cleaned, how can I make this possible?
here is my code:
def proxieschoice():
import findfiles
findfiles.my_observer.start()
proxiesfile = 0
proxynamelist = {}
print('------------------------------')
for file in glob.glob("*.txt"):
proxiesfile = proxiesfile +1
with open(file) as f:
count = sum(1 for _ in f)
proxynamelist[proxiesfile] = file
print(f"[{Fore.BLUE}{proxiesfile}{Style.RESET_ALL}] {file} [{count} proxies]")
print('------------------------------')
try:
prox = int(input(f"{Fore.BLUE}>> {Style.RESET_ALL} Which proxies you want to use? "))
except ValueError:
print('Invalid Input')
proxieschoice()
here is findfiles.py
import time
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import converse
patterns = ["*"]
ignore_patterns = None
ignore_directories = False
case_sensitive = True
my_event_handler = PatternMatchingEventHandler(patterns, ignore_patterns, ignore_directories, case_sensitive)
def on_created(event):
converse.cleanscreen()
converse.proxieschoice()
def on_deleted(event):
converse.cleanscreen()
converse.proxieschoice()
def on_modified(event):
converse.cleanscreen()
converse.proxieschoice()
def on_moved(event):
converse.cleanscreen()
converse.proxieschoice()
my_event_handler.on_created = on_created
my_event_handler.on_deleted = on_deleted
my_event_handler.on_modified = on_modified
my_event_handler.on_moved = on_moved
path = "."
go_recursively = True
my_observer = Observer()
my_observer.schedule(my_event_handler, path, recursive=go_recursively)
I wanna write a python script that grabs the bing.com wallpaper and saves it.
The urls of these wallpapers look like:
http://www.bing.com/az/hprichbg/rb/EuropeESA_DE-DE7849418832_1920x1080.jpg
http://www.bing.com/az/hprichbg/rb/CanisLupus_DE-DE11366975292_1920x1080.jpg
http://www.bing.com/az/hprichbg/rb/HouseBoats_DE-DE8695714746_1920x1080.jpg
Is there a way to find the image url of todays wallpaper automatically?
Based on a few of the useful answers in this related SO question, here's a simple Python script to fetch the Bing photo of the day:
import requests
import json
BING_URI_BASE = "http://www.bing.com"
BING_WALLPAPER_PATH = "/HPImageArchive.aspx?format=js&idx=0&n=1&mkt=en-US"
# open the Bing HPImageArchive URI and ask for a JSON response
resp = requests.get(BING_URI_BASE + BING_WALLPAPER_PATH)
if resp.status_code == 200:
json_response = json.loads(resp.content)
wallpaper_path = json_response['images'][0]['url']
filename = wallpaper_path.split('/')[-1]
wallpaper_uri = BING_URI_BASE + wallpaper_path
# open the actual wallpaper uri, and write the response as an image on the filesystem
response = requests.get(wallpaper_uri)
if resp.status_code == 200:
with open(filename, 'wb') as f:
f.write(response.content)
else:
raise ValueError("[ERROR] non-200 response from Bing server for '{}'".format(wallpaper_uri))
else:
raise ValueError("[ERROR] non-200 response from Bing server for '{}'".format(BING_URI_BASE + BING_WALLPAPER_PATH))
This will write a file such as TurtleTears_EN-US7942276596_1920x1080.jpg to the same directory where the script is executed. Of course, can tweak a whole bunch of things here, but gets the job done reasonably easily.
Grab it and save it in folder by using this Code:
import datetime
from urllib.request import urlopen, urlretrieve
from xml.dom import minidom
import os
import sys
def join_path(*args):
# Takes an list of values or multiple values and returns an valid path.
if isinstance(args[0], list):
path_list = args[0]
else:
path_list = args
val = [str(v).strip(' ') for v in path_list]
return os.path.normpath('/'.join(val))
dir_path = os.path.dirname(os.path.realpath(__file__))
save_dir = join_path(dir_path, 'images')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
def set_wallpaper(pic_path):
if sys.platform.startswith('win32'):
cmd = 'REG ADD \"HKCU\Control Panel\Desktop\" /v Wallpaper /t REG_SZ /d \"%s\" /f' %pic_path
os.system(cmd)
os.system('rundll32.exe user32.dll, UpdatePerUserSystemParameters')
print('Wallpaper is set.')
elif sys.platform.startswith('linux'):
os.system(''.join(['gsettings set org.gnome.desktop.background picture-uri file://', pic_path]))
print('Wallpaper is set.')
else:
print('OS not supported.')
return
return
def download_old_wallpapers(minus_days=False):
"""Uses download_wallpaper(set_wallpaper=False) to download the last 20 wallpapers.
If minus_days is given an integer a specific day in the past will be downloaded.
"""
if minus_days:
download_wallpaper(idx=minus_days, use_wallpaper=False)
return
for i in range(0, 20): # max 20
download_wallpaper(idx=i, use_wallpaper=False)
def download_wallpaper(idx=0, use_wallpaper=True):
# Getting the XML File
try:
usock = urlopen(''.join(['http://www.bing.com/HPImageArchive.aspx?format=xml&idx=',
str(idx), '&n=1&mkt=ru-RU'])) # ru-RU, because they always have 1920x1200 resolution
except Exception as e:
print('Error while downloading #', idx, e)
return
try:
xmldoc = minidom.parse(usock)
# This is raised when there is trouble finding the image url.
except Exception as e:
print('Error while processing XML index #', idx, e)
return
# Parsing the XML File
for element in xmldoc.getElementsByTagName('url'):
url = 'http://www.bing.com' + element.firstChild.nodeValue
# Get Current Date as fileName for the downloaded Picture
now = datetime.datetime.now()
date = now - datetime.timedelta(days=int(idx))
pic_path = join_path(save_dir, ''.join([date.strftime('bing_wp_%d-%m-%Y'), '.jpg']))
if os.path.isfile(pic_path):
print('Image of', date.strftime('%d-%m-%Y'), 'already downloaded.')
if use_wallpaper:
set_wallpaper(pic_path)
return
print('Downloading: ', date.strftime('%d-%m-%Y'), 'index #', idx)
# Download and Save the Picture
# Get a higher resolution by replacing the file name
urlretrieve(url.replace('_1366x768', '_1920x1200'), pic_path)
# Set Wallpaper if wanted by user
if use_wallpaper:
set_wallpaper(pic_path)
if __name__ == "__main__":
download_wallpaper()
for number, url in enumerate(list_of_urls):
urllib.urlretrieve(url, 'Image {}.jpg'.format(number + 1))
I am using smb module to connect to smb server. I am unable to figure out what exactly I could do to copy files from smb to my local drive as I am using linux machine.
import tempfile
from smb.SMBConnection import SMBConnection
from nmb.NetBIOS import NetBIOS
conn = SMBConnection('salead',
'repo#2k12',
'192.168.14.1',
'SERVER',
use_ntlm_v2=True)
assert conn.connect('192.168.1.41', 139)
if conn:
print "successfull",conn
else:
print "failed to connect"
If anyone can help me out it would be a great help for me. Thanks
According to some documentation, SMBConnection.retrieveFile() is the function you are searching for.
Example:
# UNTESTED
conn = SMBConnection('salead',
'repo#2k12',
'192.168.14.1',
'SERVER',
use_ntlm_v2 = True)
assert conn.connect('192.168.1.41', 139)
with open('local_file', 'wb') as fp:
conn.retrieveFile('share', '/path/to/remote_file', fp)
Documentation: http://pysmb.readthedocs.io/en/latest/api/smb_SMBConnection.html
Example (in Japanese): http://symfoware.blog68.fc2.com/blog-entry-999.html
The documentation referenced above by #Rob should get you there. Here is an idea that works using an abstracted class:
> pip install pysmb
import subprocess
from smb import SMBConnection
class SMBClient:
def __init__(self, ip, username, password, servername, share_name):
self._ip = ip
self._username = username
self._password = password
self._port = 445
self._share_name = share_name
self._servername = servername
self._server = ''
def _connect(self):
""" Connect and authenticate to the SMB share. """
self._server = SMBConnection(username=self._username,
password=self._password,
my_name=self._get_localhost(),
remote_name=self._servername,
use_ntlm_v2=True)
self._server.connect(self._ip, port=self._port)
def _download(self, files: list):
""" Download files from the remote share. """
for file in files:
with open(file, 'wb') as file_obj:
self._server.retrieveFile(service_name=self._share_name,
path=file,
file_obj=file_obj)
def _get_localhost(self):
self._host = subprocess.Popen(['hostname'],stdout=subprocess.PIPE).communicate()[0].strip()
Then, all you need to do is this:
filename = [the file you want to download]
smb_client = SMBClient(ip='192.168.14.1', username='salead', password='repo#2k12', servername='SERVER', share_name='SHARE_NAME')
smb_client._connect()
response = smb_client._download(filename)
Main Documentation: https://pysmb.readthedocs.io/en/latest/index.html
SMBConnection Documentation: https://pysmb.readthedocs.io/en/latest/api/smb_SMBConnection.html
In my case, it was necessary to copy files from one remote machine to another remote machine. Both remote machines are managed by Windows OS
import tempfile
from smb.SMBConnection import SMBConnection
client_machine_name = 'mylocalmachinename'
# first domen settings
userID_1 = 'admin_1'
password_1 = '123'
server_name_1 = 'SERVER_1'
server_ip_1 = '192.168.1.1'
# path to remote file in windows format we want to read
# "D:\myfiles\dir\somefile.txt". In this case, full access
# to disk d is open. But if you have published only a separate
# catalog, in our example "myfiles", then:
# share_resource_1 = 'myfiles'
# path_to_file_1 = '/dir/somefile.txt'
share_resource_1 = 'd'
path_to_file_1 = '/myfiles/dir/somefile.txt'
# second domen settings
userID_2 = 'admin_2'
password_2 = '123'
server_name_2 = 'SERVER_2'
server_ip_2 = '192.168.1.2'
# path to remote file we want to save in windows format "D:\myfiles\dir\somefile_copy.txt"
share_resource_2 = 'd'
path_to_file_2 = '/myfiles/dir/somefile_copy.txt'
conn_1 = SMBConnection(
userID_1, password_1, client_machine_name, server_name_1, use_ntlm_v2 = True
)
conn_2 = SMBConnection(
userID_2, password_2, client_machine_name, server_name_2, use_ntlm_v2 = True
)
conn_1.connect(server_ip_1, 139)
conn_2.connect(server_ip_2, 139)
with tempfile.NamedTemporaryFile() as file_obj:
conn_1.retrieveFile(share_resource_1, path_to_file_1, file_obj)
# Note that the file obj is positioned at the end-of-file,
# so you might need to perform a file_obj.seek() if you need
# to read from the beginning
file_obj.seek(0)
conn_2.storeFile(share_resource_2, path_to_file_2, file_obj)
I'm very new to Ubuntu/Python/Bash/Gnome in general, so I still feel like there's a chance I'm doing something wrong, but it's been 3 days now without success...
Here's what the script is supposed to do:
* [✓] Download 1 random image from wallbase.cc
* [✓] Save it to the same directory that the script is running from
* [x] Set it as the wallpaper
There are two attempts made to set the wallpaper two using different commands and NEITHER work when in the script. There is a print statement (2nd line from the bottom) that spits out the correct terminal command because I can C&P the print result and it works fine, it just doesn't work when it's executed in the script.
#!/usr/bin/env python
import urllib2
import os
from gi.repository import Gio
response = urllib2.urlopen("http://wallbase.cc/random/12/eqeq/1366x768/0.000/100/32")
page_source = response.read()
thlink_pos = page_source.find("ico-X")
address_start = (page_source.find("href=\"", thlink_pos) + 6)
address_end = page_source.find("\"", address_start + 1)
response = urllib2.urlopen(page_source[address_start:address_end])
page_source = response.read()
bigwall_pos = page_source.find("bigwall")
address_start = (page_source.find("src=\"", bigwall_pos) + 5)
address_end = page_source.find("\"", address_start + 1)
address = page_source[address_start:address_end]
slash_pos = address.rfind("/") + 1
pic_name = address[slash_pos:]
bashCommand = "wget " + page_source[address_start:address_end]
os.system(bashCommand)
print "Does my new image exists?", os.path.exists(os.getcwd() + "/" + pic_name)
#attempt 1
settings = Gio.Settings.new("org.gnome.desktop.background")
settings.set_string("picture-uri", "file://" + os.getcwd() + "/" + pic_name)
settings.apply()
#attempt 2
bashCommand = "gsettings set org.gnome.desktop.background picture-uri file://" + os.getcwd() + "/" + pic_name
print bashCommand
os.system(bashCommand)
settings.apply()
You've successfully changed your settings, but they're still left unapplied, try next:
settings.apply()
after setting "picture-uri" string.
It works for me (Ubuntu 12.04).
I've modified your script (unrelated to your error):
#!/usr/bin/python
"""Set desktop background using random images from http://wallbase.cc
It uses `gi.repository.Gio.Settings` to set the background.
"""
import functools
import itertools
import logging
import os
import posixpath
import random
import re
import sys
import time
import urllib
import urllib2
import urlparse
from collections import namedtuple
from bs4 import BeautifulSoup # $ sudo apt-get install python-bs4
from gi.repository.Gio import Settings # pylint: disable=F0401,E0611
DEFAULT_IMAGE_DIR = os.path.expanduser('~/Pictures/backgrounds')
HTMLPAGE_SIZE_MAX = 1 << 20 # bytes
TIMEOUT_MIN = 300 # seconds
TIMEOUT_DELTA = 30 # jitter
# "Anime/Manga", "Wallpapers/General", "High Resolution Images"
CATEGORY_W, CATEGORY_WG, CATEGORY_HR = range(1, 4)
PURITY_SFW, PURITY_SKETCHY, PURITY_NSFW, PURITY_DEFAULT = 4, 2, 1, 0
DAY_IN_SECONDS = 86400
UrlRetreiveResult = namedtuple('UrlRetreiveResult', "path headers")
def set_background(image_path, check_exist=True):
"""Change desktop background to image pointed by `image_path`.
"""
if check_exist: # make sure we can read it (at this time)
with open(image_path, 'rb') as f:
f.read(1)
# prepare uri
path = os.path.abspath(image_path)
if isinstance(path, unicode): # quote() doesn't like unicode
path = path.encode('utf-8')
uri = 'file://' + urllib.quote(path)
# change background
bg_setting = Settings.new('org.gnome.desktop.background')
bg_setting.set_string('picture-uri', uri)
bg_setting.apply()
def url2filename(url):
"""Return basename corresponding to url.
>>> url2filename('http://example.com/path/to/file?opt=1')
'file'
"""
urlpath = urlparse.urlsplit(url).path # pylint: disable=E1103
basename = posixpath.basename(urllib.unquote(urlpath))
if os.path.basename(basename) != basename:
raise ValueError # refuse 'dir%5Cbasename.ext' on Windows
return basename
def download(url, dirpath, extensions=True, filename=None):
"""Download url to dirpath.
Use basename of the url path as a filename.
Create destination directory if necessary.
Use `extensions` to require the file to have an extension or any
of in a given sequence of extensions.
Return (path, headers) on success.
Don't retrieve url if path exists (headers are None in this case).
"""
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
logging.info('created directory %s', dirpath)
# get filename from the url
filename = url2filename(url) if filename is None else filename
if os.path.basename(filename) != filename:
logging.critical('filename must not have path separator in it "%s"',
filename)
return
if extensions:
# require the file to have an extension
root, ext = os.path.splitext(filename)
if root and len(ext) > 1:
# require the extension to be in the list
try:
it = iter(extensions)
except TypeError:
pass
else:
if ext not in it:
logging.warn(("file extension is not in the list"
" url=%s"
" extensions=%s"),
url, extensions)
return
else:
logging.warn("file has no extension url=%s", url)
return
# download file
path = os.path.join(dirpath, filename)
logging.info("%s\n%s", url, path)
if os.path.exists(path): # don't retrieve if path exists
logging.info('path exists')
return UrlRetreiveResult(path, None)
try:
return UrlRetreiveResult(*urllib.urlretrieve(url, path,
_print_download_status))
except IOError:
logging.warn('failed to download {url} -> {path}'.format(
url=url, path=path))
def _print_download_status(block_count, block_size, total_size):
logging.debug('%10s bytes of %s', block_count * block_size, total_size)
def min_time_between_calls(min_delay):
"""Enforce minimum time delay between calls."""
def decorator(func):
lastcall = [None] # emulate nonlocal keyword
#functools.wraps(func)
def wrapper(*args, **kwargs):
if lastcall[0] is not None:
delay = time.time() - lastcall[0]
if delay < min_delay:
_sleep(min_delay - delay)
lastcall[0] = time.time()
return func(*args, **kwargs)
return wrapper
return decorator
#min_time_between_calls(5)
def _makesoup(url):
try:
logging.info(vars(url) if isinstance(url, urllib2.Request) else url)
page = urllib2.urlopen(url)
soup = BeautifulSoup(page.read(HTMLPAGE_SIZE_MAX))
return soup
except (IOError, OSError) as e:
logging.warn('failed to return soup for %s, error: %s',
getattr(url, 'get_full_url', lambda: url)(), e)
class WallbaseImages:
"""Given parameters it provides image urls to download."""
def __init__(self,
categories=None, # default; sequence of CATEGORY_*
resolution_exactly=True, # False means 'at least'
resolution=None, # all; (width, height)
aspect_ratios=None, # all; sequence eg, [(5,4),(16,9)]
purity=PURITY_DEFAULT, # combine with |
thumbs_per_page=None, # default; an integer
):
"""See usage below."""
self.categories = categories
self.resolution_exactly = resolution_exactly
self.resolution = resolution
self.aspect_ratios = aspect_ratios
self.purity = purity
self.thumbs_per_page = thumbs_per_page
def _as_request(self):
"""Create a urllib2.Request() using given parameters."""
# make url
if self.categories is not None:
categories = "".join(str(n) for n in (2, 1, 3)
if n in self.categories)
else: # default
categories = "0"
if self.resolution_exactly:
at_least_or_exactly_resolution = "eqeq"
else:
at_least_or_exactly_resolution = "gteq"
if self.resolution is not None:
resolution = "{width:d}x{height:d}".format(
width=self.resolution[0], height=self.resolution[1])
else:
resolution = "0x0"
if self.aspect_ratios is not None:
aspect_ratios = "+".join("%.2f" % (w / float(h),)
for w, h in self.aspect_ratios)
else: # default
aspect_ratios = "0"
purity = "{0:03b}".format(self.purity)
thumbs = 20 if self.thumbs_per_page is None else self.thumbs_per_page
url = ("http://wallbase.cc/random/"
"{categories}/"
"{at_least_or_exactly_resolution}/{resolution}/"
"{aspect_ratios}/"
"{purity}/{thumbs:d}").format(**locals())
logging.info(url)
# make post data
data = urllib.urlencode(dict(query='', board=categories, nsfw=purity,
res=resolution,
res_opt=at_least_or_exactly_resolution,
aspect=aspect_ratios,
thpp=thumbs))
req = urllib2.Request(url, data)
return req
def __iter__(self):
"""Yield background image urls."""
# find links to bigwall pages
# css-like: #thumbs div[class="thumb"] \
# a[class~="thlink" and href^="http://"]
soup = _makesoup(self._as_request())
if not soup:
logging.warn("can't retrieve the main page")
return
thumbs_soup = soup.find(id="thumbs")
for thumb in thumbs_soup.find_all('div', {'class': "thumb"}):
bigwall_a = thumb.find('a', {'class': "thlink",
'href': re.compile(r"^http://")})
if bigwall_a is None:
logging.warn("can't find thlink link")
continue # try the next thumb
# find image url on the bigwall page
# css-like: #bigwall > img[alt and src^="http://"]
bigwall_soup = _makesoup(bigwall_a['href'])
if bigwall_soup is not None:
bigwall = bigwall_soup.find(id='bigwall')
if bigwall is not None:
img = bigwall.find('img',
src=re.compile(r"(?i)^http://.*\.jpg$"),
alt=True)
if img is not None:
url = img['src']
filename = url2filename(url)
if filename.lower().endswith('.jpg'):
yield url, filename # successfully found image url
else:
logging.warn('suspicious url "%s"', url)
continue
logging.warn("can't parse bigwall page")
def main():
level = logging.INFO
if '-d' in sys.argv:
sys.argv.remove('-d')
level = logging.DEBUG
# configure logging
logging.basicConfig(format='%(levelname)s: %(asctime)s %(message)s',
level=level, datefmt='%Y-%m-%d %H:%M:%S %Z')
if len(sys.argv) > 1:
backgrounds_dir = sys.argv[1]
else:
backgrounds_dir = DEFAULT_IMAGE_DIR
# infinite loop: Press Ctrl+C to interrupt it
#NOTE: here's some arbitrary logic: modify for you needs e.g., break
# after the first image found
timeout = TIMEOUT_MIN # seconds
for i in itertools.cycle(xrange(timeout, DAY_IN_SECONDS)):
found = False
try:
for url, filename in WallbaseImages(
categories=[CATEGORY_WG, CATEGORY_HR, CATEGORY_W],
purity=PURITY_SFW,
thumbs_per_page=60):
res = download(url, backgrounds_dir, extensions=('.jpg',),
filename=filename)
if res and res.path:
found = True
set_background(res.path)
# don't hammer the site
timeout = max(TIMEOUT_MIN, i % DAY_IN_SECONDS)
_sleep(random.randint(timeout, timeout + TIMEOUT_DELTA))
except Exception: # pylint: disable=W0703
logging.exception('unexpected error')
_sleep(timeout)
else:
if not found:
logging.error('failed to retrieve any images')
_sleep(timeout)
timeout = (timeout * 2) % DAY_IN_SECONDS
def _sleep(timeout):
"""Add logging to time.sleep() call."""
logging.debug('sleep for %s seconds', timeout)
time.sleep(timeout)
main()
Tried to implement a python script that used the PIL library to write text on an image then update the Gnome background "picture-uri" to point to that image using the Gio class. The python script would ping pong between two images to always modify the one not in use and then attempt to "switch" by updating the Settings. Did this to avoid any flicker as modifying the current background directly drops it out temporarily. While in the shell and calling the script directly I rarely saw any issue, but in the cronjob it simply wouldn't update on the pong. I used both sync and apply and would wait several minutes before trying to switch the images. Didn't work. Tried cron as user (su -c "cmd" user) and that didn't work either.
Finally gave up on the ping pong approach when I noticed that Gnome will detect any change in the background file and update. So dropped the ping pong method and went to a temp file that I just copy over the current background using the shutil library. Works like a charm.
I'm trying with the google docs api (python) to create collections and subcollections and to upload files in a created subcollection.
First question:
Everything is ok with the below code, the hierarchy is ok (subfolder1 under the folder1, uploaded file under the subfolder1), but the only issue is that the subfolder & the file are also seen in the Home for the end user.
I would like to see only the higher level collection in the Home.
Is there a way to prevent the resources (sub-collections & files) to be displayed in the Home ?
Note: I have tried the following alternatives but still get the same result:
1) parameter 'collection=' of create_resource has same result
2) clientlogin or twoleggedoauth have same result
Second question:
Is it possible to set the description field ?
import gdata.data
import gdata.docs.client
import gdata.acl.data
import gdata.docs.data
GAPPS_OAUTH_CONSUMER_KEY = "xxxx"
GAPPS_OAUTH_CONSUMER_SECRET = "xxxxx"
GAPPS_ADMIN_ACCOUNT = "x"
GAPPS_CLIENT_LOGIN_LOGIN='xxxxx'
GAPPS_CLIENT_LOGIN_PWD='xxxxx'
GAPPS_CLIENT_LOGIN_APP='xxxxxx'
filepath = 'C:\\Users\\xxxxx\\Pictures\\'
filename = 'xxxxxx.png'
path = filepath + filename
client = gdata.docs.client.DocsClient()
client.ssl = True
#client.ClientLogin(GAPPS_CLIENT_LOGIN_LOGIN, GAPPS_CLIENT_LOGIN_PWD, GAPPS_CLIENT_LOGIN_APP)
client.auth_token = gdata.gauth.TwoLeggedOAuthHmacToken(GAPPS_OAUTH_CONSUMER_KEY, GAPPS_OAUTH_CONSUMER_SECRET, GAPPS_ADMIN_ACCOUNT)
# create a folder
collection1 = gdata.docs.data.Resource('folder', title = 'Script Folder')
collection1 = client.create_resource(collection1)
# create a sub-folder in collection1
subcollection1 = gdata.docs.data.Resource('folder', title = 'Script Sub Folder')
subcollection1 = client.create_resource(subcollection1)
res = client.move_resource(subcollection1, collection = collection1, keep_in_collections = False)
# Upload the resource in subcollection1
doc = gdata.docs.data.Resource(type = 'file', title = filename)
media = gdata.data.MediaSource()
media.SetFileHandle(path, 'application/octet-stream')
create_uri = gdata.docs.client.RESOURCE_UPLOAD_URI + '?convert=false'
doc = client.CreateResource(doc, create_uri = create_uri, media = media)
print 'Created, and uploaded:', doc.title.text, doc.resource_id.text
client.move_resource(doc, collection = subcollection1, keep_in_collections = False)
Here is the solution:
subcollection1 = gdata.docs.data.Resource('folder', title = 'Script Sub Folder')
subcollection1.AddCategory(gdata.docs.data.LABELS_NS, gdata.docs.data.LABELS_NS + "#" +gdata.docs.data.HIDDEN_LABEL, gdata.docs.data.HIDDEN_LABEL)
subcollection1 = client.create_resource(subcollection1)
There is another much simpler approach.
subcollection1 = client.create_resource(subcollection1,collection=collection1)
The script sub folder never appears on your root folder by this approach.