Appease some flake8 linting.

This commit is contained in:
Ethan Dalool 2020-01-31 20:53:29 -08:00
parent 3ff49e1a44
commit 0cb646fda0
12 changed files with 24 additions and 65 deletions

View file

@ -19,8 +19,10 @@ def from_base(number, base, alphabet=None):
if number.count('.') > 1:
raise ValueError('Too many decimal points')
mixed_case = any(c in string.ascii_uppercase for c in alphabet) and \
any(c in string.ascii_lowercase for c in alphabet)
mixed_case = (
any(c in string.ascii_uppercase for c in alphabet) and
any(c in string.ascii_lowercase for c in alphabet)
)
if not mixed_case:
alphabet = alphabet.upper()
number = number.upper()
@ -30,7 +32,7 @@ def from_base(number, base, alphabet=None):
differences = char_set.difference(alpha_set)
if len(differences) > 0:
raise ValueError('Unknown characters for base', base, differences)
alpha_dict = {character:index for (index, character) in enumerate(alphabet)}
alpha_dict = {character: index for (index, character) in enumerate(alphabet)}
try:
decimal_pos = number.index('.')
@ -45,7 +47,6 @@ def from_base(number, base, alphabet=None):
if index < decimal_pos:
power -= 1
value = alpha_dict[character] * (base ** power)
#print(value)
result += value
return result

View file

@ -1,10 +1,8 @@
import re
import sys
from voussoirkit import clipext
from voussoirkit import pipeable
__VERSION__ = '0.0.1'
BYTE = 1

View file

@ -1,13 +1,9 @@
import argparse
import os
import pyperclip
import requests
import sys
import time
import urllib
import warnings
# pip install voussoirkit
from voussoirkit import bytestring
from voussoirkit import ratelimiter
from voussoirkit import clipext
@ -15,7 +11,7 @@ from voussoirkit import clipext
warnings.simplefilter('ignore')
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.152 Safari/537.36'
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.152 Safari/537.36'
}
FILENAME_BADCHARS = '*?"<>|\r\n'
@ -72,7 +68,6 @@ def download_file(
timeout=timeout,
verify_ssl=verify_ssl,
)
#print(plan)
if plan is None:
return
@ -291,7 +286,6 @@ class Progress1:
self.solid_char = ''
def step(self, bytes_downloaded):
#print(self.limiter.balance)
percent = bytes_downloaded / self.total_bytes
percent = min(1, percent)
if self.limiter.limit(1) is False and percent < 1:

View file

@ -1,5 +1,4 @@
import datetime
import time
EPOCH = datetime.datetime(
year=1993,

View file

@ -1,4 +1,3 @@
import time
ESCAPE_SEQUENCES = {
'\\': '\\',
'"': '"',
@ -83,7 +82,6 @@ class ExpressionTree:
if child.token in OPERATORS:
childstring = '(%s)' % childstring
children.append(childstring)
#children = [str(child) for child in self.children]
if len(children) == 1:
return '%s %s' % (self.token, children[0])
@ -106,12 +104,10 @@ class ExpressionTree:
current = cls(token=tokens[0])
for token in tokens[1:]:
##print(' '*spaces, 'cur', current, current.token)
if isinstance(token, list):
new = cls.parse(token, spaces=spaces+1)
else:
new = cls(token=token)
##print(' '*spaces, 'new', new)
if 0 == 1:
pass
@ -175,10 +171,7 @@ class ExpressionTree:
else:
raise Exception('Expected new to be my operand or parent binary.')
##print(' '*spaces, 'fin:', current.rootmost(), '\n')
current = current.rootmost()
##print('---', current)
return current
def _evaluate(self, text, match_function=None):
@ -187,7 +180,6 @@ class ExpressionTree:
match_function = DEFAULT_MATCH_FUNCTION
value = match_function(text, self.token)
#print(self.token, value)
return value
operator_function = OPERATOR_FUNCTIONS[self.token]
@ -319,7 +311,6 @@ def implied_tokens(tokens):
if skip_this:
continue
#print('tk:', token, 'hu:', has_unary_operator, 'hb:', has_binary_operator, 'ho:', has_operand)
if isinstance(token, str) and token in OPERATORS:
this_binary = token in BINARY_OPERATORS
this_unary = not this_binary
@ -370,7 +361,6 @@ def order_operations(tokens):
slice_end = None
precedence_stack = []
while index < len(tokens):
#time.sleep(0.1)
token = tokens[index]
try:
precedence = PRECEDENCE.index(token)
@ -382,7 +372,6 @@ def order_operations(tokens):
continue
precedence_stack.append(precedence)
if token in UNARY_OPERATORS:
slice_start = index
slice_end = index + 2
@ -394,8 +383,6 @@ def order_operations(tokens):
elif precedence_stack[-2] < precedence_stack[-1]:
slice_end = index
#print(tokens, index, token, precedence_stack, slice_start, slice_end, sep=' || ')
if slice_start is None or slice_end is None:
index += 1
continue
@ -432,7 +419,6 @@ def sublist_tokens(tokens, _from_index=0, depth=0):
index = _from_index
while index < len(tokens):
token = tokens[index]
#print(index, token)
index += 1
if token is PAREN_OPEN:
(token, index) = sublist_tokens(tokens, _from_index=index, depth=depth+1)
@ -466,10 +452,9 @@ def tokenize(expression):
tokens = []
for character in expression:
if in_escape:
#character = ESCAPE_SEQUENCES.get(character, '\\'+character)
in_escape = False
elif character in {'(', ')'} and not in_quotes:
elif character in {'(', ')'} and not in_quotes:
if character == '(':
sentinel = PAREN_OPEN
paren_depth += 1
@ -509,27 +494,10 @@ def tokenize(expression):
if __name__ == '__main__':
tests = [
#'test you AND(1 OR "harrison ford") AND (where are you) AND pg',
#'(you OR "AND ME")',
#'(3 XOR 2 OR 4',
#'1 NOT OR AND (2 OR (3 OR 4) OR (5 OR 6)))',
#'3 OR (5 OR)',
#'1 AND(4 OR "5 6")OR \\(test) 2',
#'1 2 AND (3 OR 4)',
#'AND 2',
#'1 AND 2 AND ("3 7" OR 6)AND (4 OR 5)',
#'NOT 1 AND NOT (2 OR 3)',
#'1 AND 2 AND 3 AND 4',
#'NOT 1 AND 2 OR 3 OR (5 AND 6)',
#'5 OR 6 AND 7 OR 8',
#'1 OR 2 AND 3 AND 4 OR 5 AND 6 OR 7 OR 8 AND 9',
#'2 XOR 3 AND 4',
#'1 OR (2 OR 3 AND 4)',
#'NOT XOR 4 7'
'[sci-fi] OR [pg-13]',
'([sci-fi] OR [war]) AND [r]',
'[r] XOR [sci-fi]',
'"mark hamill" "harrison ford"',
'[sci-fi] OR [pg-13]',
'([sci-fi] OR [war]) AND [r]',
'[r] XOR [sci-fi]',
'"mark hamill" "harrison ford"',
]
teststrings = {
'Star Wars': '[harrison ford] [george lucas] [sci-fi] [pg] [carrie fisher] [mark hamill] [space]',

View file

@ -14,12 +14,14 @@ class Landmark:
def barsplit(chars):
wordlist = []
wordbuff = []
def flush():
if not wordbuff:
return
word = fusk_join(wordbuff)
wordlist.append(word)
wordbuff.clear()
for item in chars:
if item == '|':
flush()

View file

@ -137,7 +137,7 @@ def urandom_hex(length):
def main_password(argv):
length = listget(argv, 0, DEFAULT_LENGTH)
length = listget(argv, 0, DEFAULT_LENGTH)
options = [a.lower() for a in argv[1:]]
if '-' in length:

View file

@ -312,12 +312,10 @@ def get_path_casing(path):
imaginary_portion = input_path.absolute_path
imaginary_portion = imaginary_portion[len(cased):]
#real_portion = os.path.normcase(cased)
#imaginary_portion = imaginary_portion.replace(real_portion, '')
imaginary_portion = imaginary_portion.lstrip(os.sep)
cased = os.path.join(cased, imaginary_portion)
cased = cased.rstrip(os.sep)
if not os.sep in cased:
if os.sep not in cased:
cased += os.sep
return cased
@ -341,7 +339,6 @@ def glob_patternize(piece):
for character in piece:
if character not in '![]':
replacement = '[%s]' % character
#print(piece, character, replacement)
piece = piece.replace(character, replacement, 1)
break
return piece

View file

@ -1,4 +1,4 @@
#import pyperclip moved to stay lazy.
# import pyperclip moved to stay lazy.
import sys
builtin_input = input

View file

@ -1,4 +1,3 @@
import collections
import hashlib
import logging
import os
@ -420,9 +419,12 @@ def copy_file(
while True:
try:
data_chunk = source_handle.read(chunk_size)
except PermissionError as e:
print(source)
raise
except PermissionError as exception:
if callback_permission_denied is not None:
callback_permission_denied(source, exception)
return [destination, 0]
else:
raise
data_bytes = len(data_chunk)
if data_bytes == 0:
break

View file

@ -111,7 +111,7 @@ def update_filler(pairs, where_key):
return (qmarks, bindings)
def hex_byte(byte):
if byte not in range (0, 256):
if byte not in range(0, 256):
raise ValueError(byte)
return hex(byte)[2:].rjust(2, '0')

View file

@ -1,5 +1,3 @@
import os
class ExistingChild(Exception):
pass