master
unknown 2016-05-10 01:00:29 -07:00
parent 4d9871494b
commit c27e977c8c
16 changed files with 455 additions and 103 deletions

7
AHK/clipboard.ahk Normal file
View File

@ -0,0 +1,7 @@
#NoEnv ; Recommended for performance and compatibility with future AutoHotkey releases.
SendMode Input ; Recommended for new scripts due to its superior speed and reliability.
SetWorkingDir %A_ScriptDir% ; Ensures a consistent starting directory.
; CTRL+SPACE pastes the clipboard as if it was typed manually.
^SPACE:: SendInput % RegExReplace(Clipboard, "\r\n?|\n\r?", "`n")

View File

@ -0,0 +1,6 @@
#NoEnv ; Recommended for performance and compatibility with future AutoHotkey releases.
SendMode Input ; Recommended for new scripts due to its superior speed and reliability.
SetWorkingDir %A_ScriptDir% ; Ensures a consistent starting directory.
; USE CTRL + SPACE TO TOGGLE THE EFFECT ON AND OFF
^SPACE:: Winset, Alwaysontop, , A

View File

@ -2,6 +2,8 @@
SendMode Input ; Recommended for new scripts due to its superior speed and reliability.
SetWorkingDir %A_ScriptDir% ; Ensures a consistent starting directory.
; Shift-T causes the mousewheel to scroll down.
; I used this to throw lots of dosh in Killing Floor.
+T::
While GetKeyState("t", "P")
{

49
Bytestring/README.md Normal file
View File

@ -0,0 +1,49 @@
Bytestring
==========
Given an integer number of bytes, return a string that best represents it:
>>> import bytestring
>>> bytestring.bytestring(1)
'1.000 b'
>>> bytestring.bytestring(100)
'100.000 b'
>>> bytestring.bytestring(1024)
'1.000 KiB'
>>> bytestring.bytestring(2 ** 10)
'1.000 KiB'
>>> bytestring.bytestring(2 ** 20)
'1.000 MiB'
>>> bytestring.bytestring(2 ** 30)
'1.000 GiB'
>>> bytestring.bytestring(2 ** 40)
'1.000 TiB'
>>> bytestring.bytestring(123456789)
'117.738 MiB'
>>> bytestring.bytestring(753429186)
'718.526 MiB'
>>> bytestring.bytestring(7534291860)
'7.017 GiB'
>>> bytestring.bytestring(75342918600)
'70.169 GiB'
Given a string, return the number of bytes it represents:
>>> bytestring.parsebytes('100')
100.0
>>> bytestring.parsebytes('1k')
1024.0
>>> bytestring.parsebytes('1kb')
1024.0
>>> bytestring.parsebytes('1kib')
1024.0
>>> bytestring.parsebytes('200 mib')
209715200.0
>>> bytestring.parsebytes('2 GB')
2147483648.0
>>> bytestring.parsebytes('0.5 GIB')
536870912.0
>>> bytestring.parsebytes('512M')
536870912.0
>>> bytestring.parsebytes('99 Y')
1.1968365614184829e+26

65
Bytestring/bytestring.py Normal file
View File

@ -0,0 +1,65 @@
BYTE = 1
KIBIBYTE = 1024 * BYTE
MIBIBYTE = 1024 * KIBIBYTE
GIBIBYTE = 1024 * MIBIBYTE
TEBIBYTE = 1024 * GIBIBYTE
PEBIBYTE = 1024 * TEBIBYTE
EXIBYTE = 1024 * PEBIBYTE
ZEBIBYTE = 1024 * EXIBYTE
YOBIBYTE = 1024 * ZEBIBYTE
UNIT_STRINGS = {
BYTE: 'b',
KIBIBYTE: 'KiB',
MIBIBYTE: 'MiB',
GIBIBYTE: 'GiB',
TEBIBYTE: 'TiB',
PEBIBYTE: 'PiB',
EXIBYTE: 'EiB',
ZEBIBYTE: 'ZiB',
YOBIBYTE: 'YiB',
}
def bytestring(bytes):
possible_units = sorted(UNIT_STRINGS.keys(), reverse=True)
# choose which magnitutde to use as the divisor
if bytes < 1:
appropriate_unit = 1
else:
for unit in possible_units:
if bytes >= unit:
appropriate_unit = unit
break
size_unit_string = UNIT_STRINGS[appropriate_unit]
size_string = '%.3f %s' % ((bytes / appropriate_unit), size_unit_string)
return size_string
def parsebytes(string):
import re
string = string.lower().replace(' ', '')
matches = re.findall('((\\.|\\d)+)', string)
if len(matches) == 0:
raise ValueError('No numbers found')
if len(matches) > 1:
raise ValueError('Too many numbers found')
byte_value = matches[0][0]
if not string.startswith(byte_value):
raise ValueError('Number is not at start of string')
string = string.replace(byte_value, '')
byte_value = float(byte_value)
if string == '':
return byte_value
reversed_units = {value.lower():key for (key, value) in UNIT_STRINGS.items()}
for (unit_string, multiplier) in reversed_units.items():
if string in (unit_string, unit_string[0], unit_string.replace('i', '')):
break
else:
raise ValueError('Could not determine byte value of %s' % string)
return byte_value * multiplier

View File

@ -2,7 +2,7 @@ from PIL import Image
import os
import sys
close_enough_threshold = 90
close_enough_threshold = 10
filename = sys.argv[1]
try:
close_enough_threshold = int(sys.argv[2])
@ -17,12 +17,11 @@ def close_enough(a, b):
def deletterbox(filename):
image = Image.open(filename)
trim_top(image)
for x in range(4):
image = trim_top(image)
image = image.rotate(90)
#(base, ext) = os.path.splitext(filename)
#filename = base + 'X' + ext
image = image.rotate(90, expand=True)
(base, ext) = os.path.splitext(filename)
filename = base + 'X' + ext
image.save(filename, quality=100)
def trim_top(image):

View File

@ -11,10 +11,11 @@ DIGEST:
> opendirdl digest http://website.com/directory/ <flags>
flags:
-f | --fullscan : When included, perform HEAD requests on all files, to
know the size of the entire directory.
-dv "x.db" | --databasename "x.db" : Use a custom database filename. By default, databases
are named after the web domain.
-f | --fullscan:
When included, perform HEAD requests on all files, to know the size of the entire directory.
-db "x.db" | --databasename "x.db":
Use a custom database filename. By default, databases are named after the web domain.
DOWNLOAD:
Download the files whose URLs are enabled in the database.
@ -22,12 +23,15 @@ DOWNLOAD:
> opendirdl download website.com.db <flags>
flags:
-o "x" | --outputdir "x" : Save the files to a custom directory, "x". By default,
files are saved to a folder named after the web domain.
-ow | --overwrite : When included, download and overwrite files even if they
already exist in the output directory.
-bps 100 | --bytespersecond 100 : Ratelimit yourself to downloading at 100 BYTES per second.
The webmaster will appreciate this.
-o "x" | --outputdir "x":
Save the files to a custom directory, "x". By default, files are saved to a folder named
after the web domain.
-ow | --overwrite:
When included, download and overwrite files even if they already exist in the output directory.
-bps 100 | --bytespersecond 100:
Ratelimit yourself to downloading at 100 BYTES per second. The webmaster will appreciate this.
KEEP_PATTERN:
Enable URLs which match a regex pattern. Matches are based on the percent-encoded strings!
@ -46,10 +50,9 @@ LIST_BASENAMES:
> opendirdl list_basenames website.com.db <flags>
flags:
-o "x.txt" | --outputfile "x.txt" : Output the results to a file instead of stdout. This is
useful if the filenames contain special characters that
crash Python, or are so long that the console becomes
unreadable.
-o "x.txt" | --outputfile "x.txt":
Output the results to a file instead of stdout. This is useful if the filenames contain
special characters that crash Python, or are so long that the console becomes unreadable.
MEASURE:
Sum up the filesizes of all enabled URLs.
@ -57,25 +60,27 @@ MEASURE:
> opendirdl measure website.com.db <flags>
flags:
-f | --fullscan : When included, perform HEAD requests on any URL whose size is not known.
If this flag is not included, and some file's size is unkown, you will
receive a note.
-f | --fullscan:
When included, perform HEAD requests on any URL whose size is not known. If this flag is
not included, and some file's size is unkown, you will receive a printed note.
'''
# Module names preceeded by two hashes indicate modules that are imported during
# a function, because they are not used anywhere else and we don't need to waste
# time importing them usually.
import sys
sys.path.append('C:\\git\\else\\ratelimiter'); import ratelimiter
import argparse
## import bs4
## import hashlib
import os
import ratelimiter
## import re
import requests
import shutil
import sqlite3
## import sys
## tkinter
import urllib.parse
@ -196,8 +201,7 @@ class Downloader:
# Ignore this value of `root`, because we might have a custom outputdir.
root = self.outputdir
folder = os.path.join(root, folder)
if not os.path.exists(folder):
os.makedirs(folder)
os.makedirs(folder, exist_ok=True)
fullname = os.path.join(folder, basename)
temporary_basename = hashit(url, 16) + '.oddltemporary'
temporary_fullname = os.path.join(folder, temporary_basename)

View File

@ -49,9 +49,7 @@ def start(path, objectives=[32], subfolder="pixel", outpath=""):
print('Unlisted "%s": not .jpg or .png' % name)
break
if not os.path.exists(outpath):
print('Creating directory: ' + outpath)
os.makedirs(outpath)
os.makedirs(outpath, exist_ok=True)
for name in images:
filepath = path + name

51
QuickTips/continue.md Normal file
View File

@ -0,0 +1,51 @@
Continue
========
Discards the current iteration, and restarts the loop using the next item.
>>> for x in range(6):
... if x == 3:
... continue
... print(x)
...
0
1
2
4
5
####Continue is great for cleaning code with lots of conditions:
#####Without continue:
for submission in submissions:
if submission.author is not None:
if submission.over_18 is False:
if 'suggestion' in submission.title.lower():
print('Found:', submission.id)
&nbsp;
for submission in submissions:
if submission.author is not None and submission.over_18 is False and 'suggestion' in submission.title.lower():
print('Found:', submission.id)
#####With continue:
for submission in submissions:
if submission.author is None:
continue
if submission.over_18:
continue
if 'suggestion' not in submission.title.lower():
continue
print('Found:', submission.id)
The mentality changes from "keep only the items with the right properties" to "discard the items with the wrong properties".

69
RateMeter/ratemeter.py Normal file
View File

@ -0,0 +1,69 @@
import collections
import math
import time
class RateMeter:
def __init__(self, span):
'''
This class is used to calculate a rolling average of
units per second over `span` seconds.
Minimum span is 1 second.
Set `span` to None to calculate unit/s over the lifetime of the object
after the first digest, rather than over a span.
This saves the effort of tracking timestamps. Don't just use a large number!
'''
if span is not None and span < 1:
raise ValueError('Span must be >= 1')
self.sum = 0
self.span = span
self.tracking = collections.deque()
self.first_digest = None
def digest(self, value):
now = math.ceil(time.time())
self.sum += value
if self.span is None:
if self.first_digest is None:
self.first_digest = now
return
earlier = now - self.span
while len(self.tracking) > 0 and self.tracking[0][0] < earlier:
(timestamp, pop_value) = self.tracking.popleft()
self.sum -= pop_value
if len(self.tracking) == 0 or self.tracking[-1] != now:
self.tracking.append([now, value])
else:
self.tracking[-1][1] += value
def report(self):
'''
Return a tuple containing the running sum, the time span
over which the rate is being calculated, and the rate in
units per second.
(sum, time_interval, rate)
'''
# Flush the old values, ensure self.first_digest exists.
self.digest(0)
if self.span is None:
now = math.ceil(time.time())
time_interval = now - self.first_digest
else:
# No risk of IndexError because the digest(0) ensures we have
# at least one entry.
time_interval = self.tracking[-1][0] - self.tracking[0][0]
if time_interval == 0:
return (self.sum, 0, self.sum)
rate = self.sum / time_interval
time_interval = round(time_interval, 3)
rate = round(rate, 3)
return (self.sum, time_interval, rate)

39
RateMeter/speedtest.py Normal file
View File

@ -0,0 +1,39 @@
import bytestring
import downloady
import ratemeter
import requests
import time
URL = 'http://cdn.speedof.me/sample32768k.bin?r=0.8817502672426312'
METER = ratemeter.RateMeter(span=10)
METER_2 = ratemeter.RateMeter(span=None)
class G:
pass
g = G()
g.total = 0
g.start = None
g.last = int(time.time())
def callback_progress(bytes_downloaded, bytes_total):
if g.start is None:
g.start = time.time()
percent = 100 * bytes_downloaded / bytes_total
percent = '%07.3f%%:' % percent
chunk = bytes_downloaded - g.total
g.total = bytes_downloaded
METER.digest(chunk)
METER_2.digest(chunk)
now = round(time.time(), 1)
if now > g.last:
g.last = now
percent = percent.rjust(9, ' ')
rate = bytestring.bytestring(METER.report()[2]).rjust(15, ' ')
rate2 = bytestring.bytestring(METER_2.report()[2]).rjust(15, ' ')
elapsed = str(round(now-g.start, 1)).rjust(10, ' ')
print(percent, rate, rate2, elapsed, end='\r', flush=True)
#print(METER.report(), METER_2.report())
print(URL)
print('Progress'.rjust(9, ' '), 'bps over 10s'.rjust(15, ' '), 'bps overall'.rjust(15, ' '), 'elapsed'.rjust(10, ' '))
downloady.download_file(URL, 'nul', callback_progress=callback_progress)

View File

@ -13,4 +13,8 @@ A couple of tools for copying files and directories.
- walk_generator now yields absolute filenames since copy_dir no longer needs to process exclusions, and that was the only reason walk_generator used to yield them in parts.
2016 03 04
- Created a FilePath class to cache os.stat data, which should reduce the number of unecessary filesystem calls.
- Created a FilePath class to cache os.stat data, which should reduce the number of unecessary filesystem calls.
2016 03 18
- Added `glob.escape` to `get_path_casing`.
- Added callbacks for some extra debug output.

View File

@ -47,6 +47,10 @@ class SpinalError(Exception):
pass
class FilePath:
'''
Class for consolidating lots of `os.path` operations,
and caching `os.stat` results.
'''
def __init__(self, path):
self.path = os.path.abspath(path)
self._stat = None
@ -59,7 +63,7 @@ class FilePath:
return self.path.__hash__()
def __repr__(self):
return repr(self.path)
return 'FilePath(%s)' % repr(self.path)
@property
def isdir(self):
@ -92,12 +96,18 @@ class FilePath:
return self._stat
def type_getter(self, attr, resolution):
if getattr(self, attr) is None:
'''
Try to return the cached type. Call resolution(self.stat.st_mode) if
we don't have the stat data yet.
'''
value = getattr(self, attr)
if value is None:
if self.stat is False:
return False
else:
setattr(self, attr, resolution(self.stat.st_mode))
return getattr(self, attr)
value = resolution(self.stat.st_mode)
setattr(self, attr, value)
return value
def bytes_to_unit_string(bytes):
@ -114,13 +124,13 @@ def callback_exclusion(name, path_type):
'''
Example of an exclusion callback function.
'''
print('Excluding', name)
print('Excluding', path_type, name)
def callback_v1(fpobj, written_bytes, total_bytes):
'''
Example of a copy callback function.
Prints "fpobj written/total (percent%)"
Prints "filename written/total (percent%)"
'''
filename = fpobj.path.encode('ascii', 'replace').decode()
if written_bytes >= total_bytes:
@ -158,12 +168,14 @@ def copy_dir(
destination_new_root=None,
bytes_per_second=None,
callback_directory=None,
callback_exclusion=None,
callback_file=None,
callback_permission_denied=None,
callback_verbose=None,
dry_run=False,
exclude_directories=None,
exclude_filenames=None,
exclusion_callback=None,
files_per_second=None,
overwrite_old=True,
precalcsize=False,
):
@ -183,6 +195,8 @@ def copy_dir(
`new_root(source, destination_new_root)`.
Thus, this path acts as a root and the rest of the path is matched.
`destination` and `destination_new_root` are mutually exclusive.
bytes_per_second:
Restrict file copying to this many bytes per second. Can be an integer
or an existing Ratelimiter object.
@ -194,6 +208,13 @@ def copy_dir(
This function will be called after each file copy with three parameters:
name of file copied, number of bytes written to destination so far,
total bytes needed (from precalcsize).
If `precalcsize` is False, this function will receive written bytes
for both written and total, showing 100% always.
Default = None
callback_exclusion:
Passed directly into `walk_generator`.
Default = None
@ -209,6 +230,11 @@ def copy_dir(
Default = None
callback_verbose:
If provided, this function will be called with some operation notes.
Default = None
dry_run:
Do everything except the actual file copying.
@ -224,8 +250,9 @@ def copy_dir(
Default = None
exclusion_callback:
Passed directly into `walk_generator`.
files_per_second:
Maximum number of files to be processed per second. Helps to keep CPU usage
low.
Default = None
@ -251,7 +278,7 @@ def copy_dir(
# Prepare parameters
if not is_xor(destination, destination_new_root):
m = 'One and only one of `destination` and '
m += '`destination_new_root` can be passed'
m += '`destination_new_root` can be passed.'
raise ValueError(m)
source = str_to_fp(source)
@ -261,6 +288,9 @@ def copy_dir(
destination = new_root(source, destination_new_root)
destination = str_to_fp(destination)
callback_directory = callback_directory or do_nothing
callback_verbose = callback_verbose or do_nothing
if is_subfolder(source, destination):
raise RecursiveDirectory(source, destination)
@ -275,20 +305,17 @@ def copy_dir(
else:
total_bytes = 0
if isinstance(bytes_per_second, ratelimiter.Ratelimiter):
limiter = bytes_per_second
elif bytes_per_second is not None:
limiter = ratelimiter.Ratelimiter(allowance_per_period=bytes_per_second, period=1)
else:
limiter = None
bytes_per_second = limiter_or_none(bytes_per_second)
files_per_second = limiter_or_none(files_per_second)
# Copy
written_bytes = 0
walker = walk_generator(
source,
callback_exclusion=callback_exclusion,
callback_verbose=callback_verbose,
exclude_directories=exclude_directories,
exclude_filenames=exclude_filenames,
exclusion_callback=exclusion_callback,
)
for (source_abspath) in walker:
# Terminology:
@ -304,15 +331,15 @@ def copy_dir(
raise DestinationIsDirectory(destination_abspath)
destination_location = os.path.split(destination_abspath.path)[0]
if not os.path.isdir(destination_location):
os.makedirs(destination_location)
os.makedirs(destination_location, exist_ok=True)
copied = copy_file(
source_abspath,
destination_abspath,
bytes_per_second=limiter,
bytes_per_second=bytes_per_second,
callback=callback_file,
callback_permission_denied=callback_permission_denied,
callback_verbose=callback_verbose,
dry_run=dry_run,
overwrite_old=overwrite_old,
)
@ -320,11 +347,13 @@ def copy_dir(
copiedname = copied[0]
written_bytes += copied[1]
if callback_directory is not None:
if precalcsize is False:
callback_directory(copiedname, written_bytes, written_bytes)
else:
callback_directory(copiedname, written_bytes, total_bytes)
if precalcsize is False:
callback_directory(copiedname, written_bytes, written_bytes)
else:
callback_directory(copiedname, written_bytes, total_bytes)
if files_per_second is not None:
files_per_second.limit(1)
return [destination, written_bytes]
@ -334,6 +363,7 @@ def copy_file(
destination_new_root=None,
bytes_per_second=None,
callback=None,
callback_verbose=None,
dry_run=False,
overwrite_old=True,
callback_permission_denied=None,
@ -377,6 +407,11 @@ def copy_file(
Default = None
callback_verbose:
If provided, this function will be called with some operation notes.
Default = None
dry_run:
Do everything except the actual file copying.
@ -404,19 +439,16 @@ def copy_file(
destination = new_root(source, destination_new_root)
destination = str_to_fp(destination)
callback = callback or do_nothing
callback_verbose = callback_verbose or do_nothing
if not source.isfile:
raise SourceNotFile(source)
if destination.isdir:
raise DestinationIsDirectory(destination)
if isinstance(bytes_per_second, ratelimiter.Ratelimiter):
limiter = bytes_per_second
elif bytes_per_second is not None:
limiter = ratelimiter.Ratelimiter(allowance_per_period=bytes_per_second, period=1)
else:
limiter = None
bytes_per_second = limiter_or_none(bytes_per_second)
# Determine overwrite
if destination.stat is not False:
@ -437,11 +469,11 @@ def copy_file(
source_bytes = source.size
destination_location = os.path.split(destination.path)[0]
if not os.path.exists(destination_location):
os.makedirs(destination_location)
os.makedirs(destination_location, exist_ok=True)
written_bytes = 0
try:
callback_verbose('Opening handles.')
source_file = open(source.path, 'rb')
destination_file = open(destination.path, 'wb')
except PermissionError as exception:
@ -460,30 +492,37 @@ def copy_file(
destination_file.write(data_chunk)
written_bytes += data_bytes
if limiter is not None:
limiter.limit(data_bytes)
if bytes_per_second is not None:
bytes_per_second.limit(data_bytes)
if callback is not None:
callback(destination, written_bytes, source_bytes)
callback(destination, written_bytes, source_bytes)
# Fin
callback_verbose('Closing handles.')
source_file.close()
destination_file.close()
callback_verbose('Copying metadata')
shutil.copystat(source.path, destination.path)
return [destination, written_bytes]
def do_nothing(*args):
'''
Used by other functions as the default callback.
'''
return
def get_path_casing(path):
'''
Take what is perhaps incorrectly cased input and get the path's actual
casing according to the filesystem.
Thank you
Thank you:
Ethan Furman http://stackoverflow.com/a/7133137/5430534
xvorsx http://stackoverflow.com/a/14742779/5430534
'''
p = str_to_fp(path)
path = p.path
path = glob.escape(path)
(drive, subpath) = os.path.splitdrive(path)
pattern = ["%s[%s]" % (piece[:-1], piece[-1]) for piece in subpath.split(os.sep)[1:]]
pattern = os.sep.join(pattern)
@ -504,10 +543,8 @@ def get_dir_size(path):
raise SourceNotDirectory(path)
total_bytes = 0
for (directory, filename) in walk_generator(path):
filename = os.path.join(directory, filename)
filesize = os.path.getsize(filename)
total_bytes += filesize
for filepath in walk_generator(path):
total_bytes += filepath.size
return total_bytes
@ -525,6 +562,15 @@ def is_xor(*args):
'''
return [bool(a) for a in args].count(True) == 1
def limiter_or_none(value):
if isinstance(value, ratelimiter.Ratelimiter):
limiter = value
elif value is not None:
limiter = ratelimiter.Ratelimiter(allowance_per_period=value, period=1)
else:
limiter = None
return limiter
def new_root(filepath, root):
'''
Prepend `root` to `filepath`, drive letter included. For example:
@ -557,13 +603,24 @@ def str_to_fp(path):
def walk_generator(
path,
callback_exclusion=None,
callback_verbose=None,
exclude_directories=None,
exclude_filenames=None,
exclusion_callback=None,
):
'''
Yield (location, filename) from the file tree similar to os.walk.
Example value: ('C:\\Users\\Michael\\Music', 'song.mp3')
Yield FilePath objects from the file tree similar to os.walk.
callback_exclusion:
This function will be called when a file or directory is excluded with
two parameters: the path, and 'file' or 'directory'.
Default = None
callback_verbose:
If provided, this function will be called with some operation notes.
Default = None
exclude_filenames:
A set of filenames that will not be copied. Entries can be absolute
@ -579,12 +636,6 @@ def walk_generator(
to exclude all matches. For example:
{'C:\\folder', 'thumbnails'}
Default = None
exclusion_callback:
This function will be called when a file or directory is excluded with
two parameters: the path, and 'file' or 'directory'.
Default = None
'''
if exclude_directories is None:
@ -593,8 +644,8 @@ def walk_generator(
if exclude_filenames is None:
exclude_filenames = set()
if exclusion_callback is None:
exclusion_callback = lambda *x: None
callback_exclusion = callback_exclusion or do_nothing
callback_verbose = callback_verbose or do_nothing
exclude_filenames = {normalize(f) for f in exclude_filenames}
exclude_directories = {normalize(f) for f in exclude_directories}
@ -602,11 +653,11 @@ def walk_generator(
path = str_to_fp(path).path
if normalize(path) in exclude_directories:
exclusion_callback(path, 'directory')
callback_exclusion(path, 'directory')
return
if normalize(os.path.split(path)[1]) in exclude_directories:
exclusion_callback(path, 'directory')
callback_exclusion(path, 'directory')
return
directory_queue = collections.deque()
@ -616,7 +667,9 @@ def walk_generator(
# Thank you for your cooperation.
while len(directory_queue) > 0:
location = directory_queue.popleft()
callback_verbose('listdir: %s' % location)
contents = os.listdir(location)
callback_verbose('received %d items' % len(contents))
directories = []
for base_name in contents:
@ -624,24 +677,25 @@ def walk_generator(
if os.path.isdir(absolute_name):
if normalize(absolute_name) in exclude_directories:
exclusion_callback(absolute_name, 'directory')
callback_exclusion(absolute_name, 'directory')
continue
if normalize(base_name) in exclude_directories:
exclusion_callback(absolute_name, 'directory')
callback_exclusion(absolute_name, 'directory')
continue
directories.append(absolute_name)
else:
if normalize(base_name) in exclude_filenames:
exclusion_callback(absolute_name, 'file')
callback_exclusion(absolute_name, 'file')
continue
if normalize(absolute_name) in exclude_filenames:
exclusion_callback(absolute_filename, 'file')
callback_exclusion(absolute_filename, 'file')
continue
yield(str_to_fp(absolute_name))
# Extendleft causes them to get reversed, so flip it first.
directories.reverse()
directory_queue.extendleft(directories)

View File

@ -15,8 +15,9 @@ for filepath in argv:
folder = os.path.dirname(filepath)
basename = os.path.basename(filepath)
extension = os.path.splitext(basename)[1]
newname = [random.choice(string.ascii_letters) for x in range(16)]
newname = [random.choice(string.ascii_lowercase) for x in range(9)]
newname = ''.join(newname)
newname = '%s\\%s%s' % (folder, newname, extension)
os.rename(filepath, newname)
print('%s -> %s' % (filepath, newname))
newname = newname + extension
newname = os.path.join(folder, newname)
#os.rename(filepath, newname)
print('%s -> %s' % (filepath, newname))

View File

@ -58,8 +58,8 @@ last_request = 0
if DOWNLOAD_DIRECTORY != '':
if DOWNLOAD_DIRECTORY[-1] not in ['/', '\\']:
DOWNLOAD_DIRECTORY += '\\'
if not os.path.exists(DOWNLOAD_DIRECTORY):
os.makedirs(DOWNLOAD_DIRECTORY)
os.makedirs(DOWNLOAD_DIRECTORY, exist_ok=True)
class StatusExc(Exception):
pass
@ -67,8 +67,8 @@ class StatusExc(Exception):
def download_file(url, localname, headers={}):
localname = os.path.join(DOWNLOAD_DIRECTORY, localname)
dirname = os.path.split(localname)[0]
if dirname != '' and not os.path.exists(dirname):
os.makedirs(dirname)
if dirname != '':
os.makedirs(dirname, exist_ok=True)
if 'twimg' in url:
localname = localname.replace(':large', '')
localname = localname.replace(':small', '')
@ -188,8 +188,7 @@ def handle_imgur(url, albumid='', customname=None):
if IMGUR_ALBUMFOLDERS:
if not os.path.exists(DOWNLOAD_DIRECTORY + albumid):
os.makedirs(DOWNLOAD_DIRECTORY + albumid)
os.makedirs(DOWNLOAD_DIRECTORY + albumid, exist_ok=True)
localpath = '%s\\%s' % (albumid, name)
else:
@ -352,13 +351,18 @@ def handle_youtube(url, customname=None):
def handle_generic(url, customname=None):
print('Generic')
try:
remote_name = url.split('/')[-1]
if customname:
name = customname
else:
name = url.split('/')[-1]
name = remote_name
base = name.split('.')[0]
ext = name.split('.')[-1]
if '.' in name:
ext = name.split('.')[-1]
elif '.' in remote_name:
ext = remote_name.split('.')[-1]
if ext in [base, '']:
ext = 'html'
print(base)