Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add package logger #174

Merged
merged 4 commits into from
Jan 18, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
50 changes: 50 additions & 0 deletions neurotic/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,56 @@
from .version import version as __version__
from .version import git_revision as __git_revision__


import os
import sys
import logging
import logging.handlers

class FileLoggingFormatter(logging.Formatter):
"""
A custom formatter for file logging
"""
default_msec_format = '%s.%03d' # use period instead of comma in decimal seconds
def __init__(self):
super().__init__(fmt='[%(asctime)s] [%(levelname)-8s] %(message)s')

class StreamLoggingFormatter(logging.Formatter):
"""
A custom formatter for stream logging
"""
def format(self, record):
if record.levelno == logging.INFO:
self._style._fmt = '[neurotic] %(message)s' # do not print "INFO"
else:
self._style._fmt = '[neurotic] %(levelname)s: %(message)s'
return super().format(record)


# set the file path for logging
log_dir = os.path.join(os.path.expanduser('~'), '.neurotic')
if not os.path.exists(log_dir):
os.mkdir(log_dir)
log_file = os.path.join(log_dir, 'neurotic-log.txt')

# set the default level for logging to INFO
logger = logging.getLogger(__name__)
if logger.level == logging.NOTSET:
logger.setLevel(logging.INFO)

# write log records to a file, rotating files if it exceeds 10 MB
logger_filehandler = logging.handlers.RotatingFileHandler(filename=log_file, maxBytes=10000000, backupCount=2)
logger_filehandler.setFormatter(FileLoggingFormatter())
logger.addHandler(logger_filehandler)
logger.info('===========================') # file logger only
logger.info(f'Importing neurotic {__version__}') # file logger only

# stream log records to stderr
logger_streamhandler = logging.StreamHandler(stream=sys.stderr)
logger_streamhandler.setFormatter(StreamLoggingFormatter())
logger.addHandler(logger_streamhandler)


from .datasets import *
from .gui import *
from .scripts import *
32 changes: 20 additions & 12 deletions neurotic/datasets/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,10 @@
from ..datasets.metadata import _abs_path
from ..elephant_tools import _butter, _isi, _peak_detection

import logging
logger = logging.getLogger(__name__)


def load_dataset(metadata, lazy=False, signal_group_mode='split-all', filter_events_from_epochs=False):
"""
Load a dataset.
Expand Down Expand Up @@ -142,7 +146,7 @@ def _read_data_file(metadata, lazy=False, signal_group_mode='split-all'):
# force lazy=False if lazy is not supported by the reader class
if lazy and not io.support_lazy:
lazy = False
print(f'NOTE: Not reading signals in lazy mode because Neo\'s {io.__class__.__name__} reader does not support it.')
logger.info(f'NOTE: Not reading signals in lazy mode because Neo\'s {io.__class__.__name__} reader does not support it.')

if 'signal_group_mode' in inspect.signature(io.read_block).parameters.keys():
# - signal_group_mode='split-all' is the default because this ensures
Expand Down Expand Up @@ -247,15 +251,17 @@ def _read_annotations_file(metadata):
# discard entries with missing or negative start times
bad_start = df['Start (s)'].isnull() | (df['Start (s)'] < 0)
if bad_start.any():
print('NOTE: These rows will be discarded because their Start times are missing or negative:')
print(df[bad_start])
logger.warning('These rows will be discarded because their Start '
'times are missing or negative:\n'
f'{df[bad_start]}')
df = df[~bad_start]

# discard entries with end time preceding start time
bad_end = df['End (s)'] < df['Start (s)']
if bad_end.any():
print('NOTE: These rows will be discarded because their End times precede their Start times:')
print(df[bad_end])
logger.warning('These rows will be discarded because their End '
'times precede their Start times:\n'
f'{df[bad_end]}')
df = df[~bad_end]

# compute durations
Expand Down Expand Up @@ -310,15 +316,17 @@ def _read_epoch_encoder_file(metadata):
# discard entries with missing or negative start times
bad_start = df['Start (s)'].isnull() | (df['Start (s)'] < 0)
if bad_start.any():
print('NOTE: These rows will be discarded because their Start times are missing or negative:')
print(df[bad_start])
logger.warning('These rows will be discarded because their Start '
'times are missing or negative:\n'
f'{df[bad_start]}')
df = df[~bad_start]

# discard entries with end time preceding start time
bad_end = df['End (s)'] < df['Start (s)']
if bad_end.any():
print('NOTE: These rows will be discarded because their End times precede their Start times:')
print(df[bad_end])
logger.warning('These rows will be discarded because their End '
'times precede their Start times:\n'
f'{df[bad_end]}')
df = df[~bad_end]

# compute durations
Expand Down Expand Up @@ -482,7 +490,7 @@ def _apply_filters(metadata, blk):
index = signalNameToIndex.get(sig_filter['channel'], None)
if index is None:

print('Warning: skipping filter with channel name {} because channel was not found!'.format(sig_filter['channel']))
logger.warning('Skipping filter with channel name {} because channel was not found!'.format(sig_filter['channel']))

else:

Expand Down Expand Up @@ -519,7 +527,7 @@ def _run_amplitude_discriminators(metadata, blk):
index = signalNameToIndex.get(discriminator['channel'], None)
if index is None:

print('Warning: skipping amplitude discriminator with channel name {} because channel was not found!'.format(discriminator['channel']))
logger.warning('Skipping amplitude discriminator with channel name {} because channel was not found!'.format(discriminator['channel']))

else:

Expand Down Expand Up @@ -608,7 +616,7 @@ def _run_burst_detectors(metadata, blk):
index = spikeTrainNameToIndex.get(detector['spiketrain'], None)
if index is None:

print("Warning: skipping burst detector for spike train named "
logger.warning("Skipping burst detector for spike train named "
f"\"{detector['spiketrain']}\" because spike train was "
"not found!")

Expand Down
25 changes: 14 additions & 11 deletions neurotic/datasets/download.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,9 @@
from urllib.request import HTTPBasicAuthHandler, HTTPPasswordMgrWithDefaultRealm
from ..datasets.ftpauth import FTPBasicAuthHandler

import logging
logger = logging.getLogger(__name__)


# install HTTP and FTP authentication handlers, the latter of which also adds
# reliable file size retrieval before downloading
Expand All @@ -36,13 +39,13 @@ def download(url, local_file, overwrite_existing=False, show_progress=True, byte
Download a file.
"""
if not overwrite_existing and os.path.exists(local_file):
print(f'Skipping {os.path.basename(local_file)} (already exists)')
logger.info(f'Skipping {os.path.basename(local_file)} (already exists)')
return

error = None
error_code = None

print(f'Downloading {os.path.basename(local_file)}')
logger.info(f'Downloading {os.path.basename(local_file)}')
try:
_download_with_progress_bar(url, local_file, show_progress=show_progress, bytes_per_chunk=bytes_per_chunk)

Expand Down Expand Up @@ -77,28 +80,28 @@ def download(url, local_file, overwrite_existing=False, show_progress=True, byte

if error_code == 404:
# not found
print(f'Skipping {os.path.basename(local_file)} (not found on server)')
logger.critical(f'Skipping {os.path.basename(local_file)} (not found on server)')
return

elif error_code == 550:
# no such file or folder, or permission denied
print(f'Skipping {os.path.basename(local_file)} (not found on server, or user is unauthorized)')
logger.critical(f'Skipping {os.path.basename(local_file)} (not found on server, or user is unauthorized)')
return

elif error_code == 10060:
# timeout
hostname = urllib.parse.urlparse(url).hostname
print(f'Skipping {os.path.basename(local_file)} (timed out when connecting to {hostname})')
logger.critical(f'Skipping {os.path.basename(local_file)} (timed out when connecting to {hostname})')
return

elif error_code == 11001:
# could not reach server or resolve hostname
hostname = urllib.parse.urlparse(url).hostname
print(f'Skipping {os.path.basename(local_file)} (cannot connect to {hostname})')
logger.critical(f'Skipping {os.path.basename(local_file)} (cannot connect to {hostname})')
return

else:
print(f'Encountered a problem: {error}')
logger.critical(f'Encountered a problem: {error}')
return


Expand Down Expand Up @@ -250,13 +253,13 @@ def _authenticate(url):
raise error

if bad_login_attempts >= _max_bad_login_attempts:
print('Unauthorized: Aborting login')
logger.critical('Unauthorized: Aborting login')
return False
else:
if bad_login_attempts == 0:
print('Authentication required')
logger.info('Authentication required')
else:
print(f'Failed login ({bad_login_attempts} of '
logger.warning(f'Failed login ({bad_login_attempts} of '
f'{_max_bad_login_attempts}): Bad login credentials, or '
f'else user {user} does not have permission to access '
f'{url}')
Expand All @@ -266,7 +269,7 @@ def _authenticate(url):
host, port = urllib.parse.splitport(netloc)
user = input(f'User name on {host}: ')
if not user:
print('No user given, aborting login')
logger.critical('No user given, aborting login')
return False
passwd = getpass('Password: ')
handler.add_password(None, netloc, user, passwd)
3 changes: 3 additions & 0 deletions neurotic/datasets/ftpauth.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@
from urllib.request import FTPHandler, HTTPPasswordMgr
from urllib.parse import splitport, splituser, unquote

import logging
logger = logging.getLogger(__name__)


class FTPBasicAuthHandler(FTPHandler):
"""
Expand Down
9 changes: 6 additions & 3 deletions neurotic/datasets/metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,9 @@

from ..datasets.download import download

import logging
logger = logging.getLogger(__name__)


class MetadataSelector():
"""
Expand Down Expand Up @@ -96,7 +99,7 @@ def select(self, selection):
Select a metadata set.
"""
if self.all_metadata is None:
print('load metadata before selecting')
logger.critical('Load metadata before selecting')
elif selection not in self.all_metadata:
raise ValueError('{} was not found in {}'.format(selection, self.file))
else:
Expand Down Expand Up @@ -448,7 +451,7 @@ def _download_file(metadata, file, **kwargs):
"""

if not _is_url(metadata['remote_data_dir']):
print('metadata[remote_data_dir] is not a full URL')
logger.critical('metadata[remote_data_dir] is not a full URL')
return

if metadata[file]:
Expand All @@ -470,7 +473,7 @@ def _download_all_data_files(metadata, **kwargs):
"""

if not _is_url(metadata['remote_data_dir']):
print('metadata[remote_data_dir] is not a full URL')
logger.critical('metadata[remote_data_dir] is not a full URL')
return

for file in [k for k in metadata if k.endswith('_file')]:
Expand Down
48 changes: 27 additions & 21 deletions neurotic/gui/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,10 @@
from ..datasets.metadata import _abs_path
from ..gui.epochencoder import NeuroticWritableEpochSource

import logging
logger = logging.getLogger(__name__)


pq.mN = pq.UnitQuantity('millinewton', pq.N/1e3, symbol = 'mN'); # define millinewton


Expand Down Expand Up @@ -60,7 +64,7 @@ def __init__(self, metadata, blk, rauc_sigs = None, lazy = False):
self.viewer_settings = {
'traces': {'show': True, 'disabled': False, 'reason': ''},
'traces_rauc': {'show': False, 'disabled': False, 'reason': ''},
'freqs': {'show': False, 'disabled': True, 'reason': 'Experimental and computationally expensive'},
'freqs': {'show': False, 'disabled': True, 'reason': 'Disabled because feature is experimental and computationally expensive'},
'spike_trains': {'show': True, 'disabled': False, 'reason': ''},
'epochs': {'show': True, 'disabled': False, 'reason': ''},
'epoch_encoder': {'show': True, 'disabled': False, 'reason': ''},
Expand Down Expand Up @@ -121,24 +125,26 @@ def __init__(self, metadata, blk, rauc_sigs = None, lazy = False):

# warn about potential video sync problems
if metadata['video_file'] is not None and metadata['video_offset'] is None:
print('WARNING: Your video will likely be out of sync with your')
print('data because video_offset is unspecified! Consider adding')
print('it to your metadata.')
logger.warning('Your video will likely be out of sync with your '
'data because video_offset is unspecified! '
'Consider adding it to your metadata.')
if metadata['video_file'] is not None and metadata['video_jumps'] is None:
approx_video_jumps = _estimate_video_jump_times(blk)
if approx_video_jumps:
print('WARNING: It seems that AxoGraph was paused at least once')
print('during data acquisition, but video_jumps is unspecified.')
print('This will cause your video and data to get out of sync.')
print('Consider adding the following to your metadata:')
print(' video_jumps:')
for t, dur in approx_video_jumps:
print(f' - [{t}, {dur}]')
print('Each ordered pair specifies the timing of a pause and')
print('approximately how long the pause lasted in seconds. The')
print('pause durations are only rough estimates +/- a second! You')
print('should refine them by inspecting the video to make sure')
print('your sync is accurate!')
approx_video_jumps_recommendation = ' video_jumps:\n' + \
'\n'.join([f' - [{t}, {dur}]' for t, dur in approx_video_jumps])
logger.warning('It seems that AxoGraph was paused at least '
'once during data acquisition, but video_jumps '
'is unspecified. This will cause your video '
'and data to get out of sync. Consider adding '
'the following to your metadata:'
f'\n{approx_video_jumps_recommendation}\n'
'Each ordered pair specifies the timing of a '
'pause and approximately how long the pause '
'lasted in seconds. The pause durations are '
'only rough estimates +/- a second! You should '
'refine them by inspecting the video to make '
'sure your sync is accurate!')

def is_enabled(self, name):
"""
Expand Down Expand Up @@ -180,9 +186,9 @@ def show(self, name):
if not self.viewer_settings[name]['disabled']:
self.viewer_settings[name]['show'] = True
else:
print(self.viewer_settings[name]['reason'])
logger.warning(self.viewer_settings[name]['reason'])
else:
print(f'"{name}" is not a viewer in viewer_settings')
logger.critical(f'"{name}" is not a viewer in viewer_settings')

def hide(self, name):
"""
Expand All @@ -191,7 +197,7 @@ def hide(self, name):
if name in self.viewer_settings:
self.viewer_settings[name]['show'] = False
else:
print(f'"{name}" is not a viewer in viewer_settings')
logger.critical(f'"{name}" is not a viewer in viewer_settings')

def show_all(self):
"""
Expand Down Expand Up @@ -322,7 +328,7 @@ def create_ephyviewer_window(self, theme='light', support_increased_line_width=F
for channel in st.annotations['channels']:
index = plotNameToIndex.get(channel, None)
if index is None:
print('Note: Spike train {} will not be plotted on channel {} because that channel isn\'t being plotted'.format(st.name, channel))
logger.warning('Spike train {} will not be plotted on channel {} because that channel isn\'t being plotted'.format(st.name, channel))
else:
c.append(index)
if c:
Expand Down Expand Up @@ -642,7 +648,7 @@ def _set_defaults_for_plots(metadata, blk):
for plot in metadata['plots']:
index = signalNameToIndex.get(plot['channel'], None)
if index is None:
print('Warning: removing plot with channel name "{}" because channel was not found in blk!'.format(plot['channel']))
logger.warning('Removing plot with channel name "{}" because channel was not found in blk!'.format(plot['channel']))
else:
plot['index'] = index
plot.setdefault('units', _default_units(sigs[index]))
Expand Down
4 changes: 4 additions & 0 deletions neurotic/gui/epochencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,10 @@
import pandas as pd
from ephyviewer import WritableEpochSource

import logging
logger = logging.getLogger(__name__)


class NeuroticWritableEpochSource(WritableEpochSource):
"""
A subclass of :class:`ephyviewer.datasource.epochs.WritableEpochSource` for
Expand Down
Loading