Use separate loggers per module instead of common.log.

The separate names assist in debugging.
This commit is contained in:
voussoir 2021-11-14 13:48:51 -08:00
parent baded8e5fb
commit 6867d32798
No known key found for this signature in database
GPG key ID: 5F7554F8C26DACCB
4 changed files with 25 additions and 15 deletions

View file

@ -4,6 +4,8 @@ import os
import time import time
import traceback import traceback
from voussoirkit import vlogging
VERSION = '2020.09.06.0' VERSION = '2020.09.06.0'
try: try:
@ -30,7 +32,7 @@ if bot is None or bot.praw != praw:
raise ImportError(message) raise ImportError(message)
log = logging.getLogger('timesearch') log = vlogging.get_logger(__name__)
r = bot.anonymous() r = bot.anonymous()

View file

@ -7,6 +7,9 @@ from . import common
from . import exceptions from . import exceptions
from . import tsdb from . import tsdb
from voussoirkit import vlogging
log = vlogging.get_logger(__name__)
def _listify(x): def _listify(x):
''' '''
@ -32,7 +35,7 @@ def generator_printer(generator):
clear_prev = (' ' * prev_message_length) + '\r' clear_prev = (' ' * prev_message_length) + '\r'
print(clear_prev + status, end='') print(clear_prev + status, end='')
prev_message_length = len(status) prev_message_length = len(status)
if totalnew == 0 and common.log.level != common.logging.DEBUG: if totalnew == 0 and log.level == 0 or log.level > vlogging.DEBUG:
# Since there were no news, allow the next line to overwrite status # Since there were no news, allow the next line to overwrite status
print('\r', end='', flush=True) print('\r', end='', flush=True)
else: else:
@ -131,13 +134,13 @@ def _livestream_as_a_generator(
common.login() common.login()
if subreddit: if subreddit:
common.log.debug('Getting subreddit %s', subreddit) log.debug('Getting subreddit %s', subreddit)
(database, subreddit) = tsdb.TSDB.for_subreddit(subreddit, fix_name=True) (database, subreddit) = tsdb.TSDB.for_subreddit(subreddit, fix_name=True)
subreddit = common.r.subreddit(subreddit) subreddit = common.r.subreddit(subreddit)
submission_function = subreddit.new if do_submissions else None submission_function = subreddit.new if do_submissions else None
comment_function = subreddit.comments if do_comments else None comment_function = subreddit.comments if do_comments else None
else: else:
common.log.debug('Getting redditor %s', username) log.debug('Getting redditor %s', username)
(database, username) = tsdb.TSDB.for_user(username, fix_name=True) (database, username) = tsdb.TSDB.for_user(username, fix_name=True)
user = common.r.redditor(username) user = common.r.redditor(username)
submission_function = user.submissions.new if do_submissions else None submission_function = user.submissions.new if do_submissions else None
@ -180,16 +183,16 @@ def _livestream_helper(
results = [] results = []
if submission_function: if submission_function:
common.log.debug('Getting submissions %s %s', args, kwargs) log.debug('Getting submissions %s %s', args, kwargs)
this_kwargs = copy.deepcopy(kwargs) this_kwargs = copy.deepcopy(kwargs)
submission_batch = submission_function(*args, **this_kwargs) submission_batch = submission_function(*args, **this_kwargs)
results.extend(submission_batch) results.extend(submission_batch)
if comment_function: if comment_function:
common.log.debug('Getting comments %s %s', args, kwargs) log.debug('Getting comments %s %s', args, kwargs)
this_kwargs = copy.deepcopy(kwargs) this_kwargs = copy.deepcopy(kwargs)
comment_batch = comment_function(*args, **this_kwargs) comment_batch = comment_function(*args, **this_kwargs)
results.extend(comment_batch) results.extend(comment_batch)
common.log.debug('Got %d posts', len(results)) log.debug('Got %d posts', len(results))
return results return results
def livestream_argparse(args): def livestream_argparse(args):

View file

@ -15,6 +15,9 @@ import traceback
from . import common from . import common
from voussoirkit import ratelimiter from voussoirkit import ratelimiter
from voussoirkit import vlogging
log = vlogging.get_logger(__name__)
print('Thank you Jason Baumgartner of Pushshift.io!') print('Thank you Jason Baumgartner of Pushshift.io!')
@ -122,7 +125,7 @@ def _pagination_core(url, params, dummy_type, lower=None, upper=None):
else: else:
break break
common.log.debug('Got batch of %d items.', len(batch)) log.debug('Got batch of %d items.', len(batch))
batch_ids = setify(batch) batch_ids = setify(batch)
if len(batch_ids) == 0 or batch_ids.issubset(prev_batch_ids): if len(batch_ids) == 0 or batch_ids.issubset(prev_batch_ids):
break break
@ -141,13 +144,13 @@ def _initialize_ratelimiter():
global ratelimit global ratelimit
if ratelimit is not None: if ratelimit is not None:
return return
common.log.debug('Initializing pushshift ratelimiter.') log.debug('Initializing pushshift ratelimiter.')
url = 'https://api.pushshift.io/meta' url = 'https://api.pushshift.io/meta'
response = session.get(url) response = session.get(url)
response.raise_for_status() response.raise_for_status()
response = response.json() response = response.json()
limit = response['server_ratelimit_per_minute'] limit = response['server_ratelimit_per_minute']
common.log.debug('Pushshift ratelimit is %d items per minute.', limit) log.debug('Pushshift ratelimit is %d requests per minute.', limit)
ratelimit = ratelimiter.Ratelimiter(allowance=limit, period=60) ratelimit = ratelimiter.Ratelimiter(allowance=limit, period=60)
def get(url, params=None): def get(url, params=None):
@ -161,7 +164,7 @@ def get(url, params=None):
for (key, val) in DEFAULT_PARAMS.items(): for (key, val) in DEFAULT_PARAMS.items():
params.setdefault(key, val) params.setdefault(key, val)
common.log.debug('Requesting %s with %s', url, params) log.debug('Requesting %s with %s', url, params)
ratelimit.limit() ratelimit.limit()
response = session.get(url, params=params) response = session.get(url, params=params)
response.raise_for_status() response.raise_for_status()
@ -238,7 +241,7 @@ def supplement_reddit_data(dummies, chunk_size=100):
''' '''
chunks = common.generator_chunker(dummies, chunk_size) chunks = common.generator_chunker(dummies, chunk_size)
for chunk in chunks: for chunk in chunks:
common.log.debug('Supplementing %d items with live reddit data.', len(chunk)) log.debug('Supplementing %d items with live reddit data.', len(chunk))
ids = [item.fullname for item in chunk] ids = [item.fullname for item in chunk]
live_copies = list(common.r.info(ids)) live_copies = list(common.r.info(ids))
live_copies = {item.fullname: item for item in live_copies} live_copies = {item.fullname: item for item in live_copies}

View file

@ -9,7 +9,9 @@ from . import pushshift
from voussoirkit import pathclass from voussoirkit import pathclass
from voussoirkit import sqlhelpers from voussoirkit import sqlhelpers
from voussoirkit import vlogging
log = vlogging.get_logger(__name__)
# For backwards compatibility reasons, this list of format strings will help # For backwards compatibility reasons, this list of format strings will help
# timesearch find databases that are using the old filename style. # timesearch find databases that are using the old filename style.
@ -324,7 +326,7 @@ class TSDB:
def insert(self, objects, commit=True): def insert(self, objects, commit=True):
if not isinstance(objects, (list, tuple, types.GeneratorType)): if not isinstance(objects, (list, tuple, types.GeneratorType)):
objects = [objects] objects = [objects]
common.log.debug('Trying to insert %d objects.', len(objects)) log.debug('Trying to insert %d objects.', len(objects))
new_values = { new_values = {
'tsdb': self, 'tsdb': self,
@ -346,10 +348,10 @@ class TSDB:
new_values[key] += status new_values[key] += status
if commit: if commit:
common.log.debug('Committing insert.') log.debug('Committing insert.')
self.sql.commit() self.sql.commit()
common.log.debug('Done inserting.') log.debug('Done inserting.')
return new_values return new_values
def insert_edited(self, obj, old_text): def insert_edited(self, obj, old_text):