From c79e57ee3b22dff466d3152ba8ccaf2bb81cb9a8 Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Tue, 7 Oct 2025 16:03:03 -0700 Subject: [PATCH 01/33] getting there --- pypeit/__init__.py | 57 +-------- pypeit/logger.py | 280 +++++++++++++++++++++++++++++++++++++++++++++ pypeit/pypmsgs.py | 2 +- 3 files changed, 283 insertions(+), 56 deletions(-) create mode 100644 pypeit/logger.py diff --git a/pypeit/__init__.py b/pypeit/__init__.py index d3b7fe41ba..f16e7a2a4d 100644 --- a/pypeit/__init__.py +++ b/pypeit/__init__.py @@ -9,67 +9,23 @@ import os import sys import signal -import warnings from .version import version -def short_warning(message, category, filename, lineno, file=None, line=None): - """ - Return the format for a short warning message. - """ - return ' %s: %s (%s:%s)\n' % (category.__name__, message, os.path.split(filename)[1], lineno) - -warnings.formatwarning = short_warning - -# NOTE: This is essentially a hack to deal with all the RankWarnings that numpy -# can throw during polynomial fitting. Specifically this happens frequently -# in pypeit.core.fitting.PypeItFit.fit. We should instead determine why these -# rank warnings are happening and address the root cause! -# 'default' means: "print the first occurrence of matching warnings for each -# location (module + line number) where the warning is issued" -# See: https://docs.python.org/3/library/warnings.html#warning-filter -import numpy as np -warnings.simplefilter('default', np.exceptions.RankWarning) - # Set version __version__ = version # Report current coverage __coverage__ = 0.55 -# Import and instantiate the user -# NOTE: This **MUST** come before instantiating the logger, msgs -try: - # There appears to be a bug in getpass in windows systems where the pwd - # module doesn't load - import getpass - pypeit_user = getpass.getuser() -except (ModuleNotFoundError, OSError): - pypeit_user = None -if pypeit_user is None: - try: - pypeit_user = os.getlogin() - except OSError: - pypeit_user = None -if pypeit_user is None: - # Assume the user is not defined - pypeit_user = 'unknownuser' - -# Import and instantiate the logger -# NOTE: This **MUST** be defined after __version__; i.e., pypmsgs imports pypeit -# and uses pypeit.__version__. -from pypeit import pypmsgs -msgs = pypmsgs.Messages() +from pypeit import logger +msgs = logger.get_logger() # Import and instantiate the data path parser # NOTE: This *MUST* come after msgs and __version__ are defined above from pypeit import pypeitdata dataPaths = pypeitdata.PypeItDataPaths() -# Import the close_qa method so that it can be called when a hard stop -# is requested by the user -from pypeit.core.qa import close_qa - # Send all signals to messages to be dealt with (i.e. someone hits ctrl+c) def signal_handler(signalnum, handler): """ @@ -77,16 +33,7 @@ def signal_handler(signalnum, handler): """ if signalnum == 2: msgs.info('Ctrl+C was pressed. Ending processes...') - close_qa(msgs.pypeit_file, msgs.qa_path) - msgs.close() sys.exit() signal.signal(signal.SIGINT, signal_handler) -# Ignore all warnings given by python -# TODO: I'd rather we not do this. Is there a way we can redirect -# warnings to pypeit.msgs ? -#warnings.resetwarnings() -#warnings.simplefilter('ignore') - - diff --git a/pypeit/logger.py b/pypeit/logger.py new file mode 100644 index 0000000000..5efed80426 --- /dev/null +++ b/pypeit/logger.py @@ -0,0 +1,280 @@ +""" +PypeIt logging + +Implementation heavily references loggers from astropy and sdsstools. +""" + +import copy +import inspect +import logging +from pathlib import Path +import re +import sys +from typing import Optional + +from IPython import embed + +import warnings +def short_warning(message, category, filename, lineno, file=None, line=None): + """ + Return the format for a short warning message. + """ + return f'{category.__name__}: {message}' + +warnings.formatwarning = short_warning + +# NOTE: This is essentially a hack to deal with all the RankWarnings that numpy +# can throw during polynomial fitting. Specifically this happens frequently +# in pypeit.core.fitting.PypeItFit.fit. We should instead determine why these +# rank warnings are happening and address the root cause! +# 'default' means: "print the first occurrence of matching warnings for each +# location (module + line number) where the warning is issued" +# See: https://docs.python.org/3/library/warnings.html#warning-filter +import numpy as np +warnings.simplefilter('default', np.exceptions.RankWarning) + +WARNING_RE = re.compile(r"^.*?\s*?(\w*?Warning): (.*)") + + +def color_text(text, color, bold=False): + msg = '\033[1;' if bold else '\033[' + return f'{msg}38;2;{color[0]};{color[1]};{color[2]}m{text}\033[0m' + + +class StreamFormatter(logging.Formatter): + """Custom `Formatter ` for the stream handler.""" + + base_level = None + + def format(self, record): + + level_colors = { + 'debug': [116, 173, 209], + 'info': [49, 54, 149], + 'warning': [253, 174, 97], + 'error': [215, 48, 39], + 'critical': [165, 0, 38], + } + inspect_color = level_colors['debug'] + + rec = copy.copy(record) + levelname = rec.levelname.lower() + if levelname not in level_colors: + return logging.Formatter.format(self, record) + + msg = color_text(f'[{levelname.upper()}]', level_colors[levelname], bold=True) + if self.base_level == logging.DEBUG: + # If including debug messages, include file inspection in *all* log + # messages. + msg += ' - ' + color_text(f'{rec.filename}:{rec.funcName}:{rec.lineno}', inspect_color) + msg += ' - ' + rec.msg + rec.msg = msg + +# if levelname == "warning" and rec.args and len(rec.args) > 0: +# warning_category_groups = WARNING_RE.match(rec.args[0]) +# if warning_category_groups is not None: +# wcategory, wtext = warning_category_groups.groups() +# wcategory_colour = color_text(wcategory, level_colors['warning']) +# message = f'{color_text(wtext, [256, 256, 256])}' + wcategory_colour +# rec.args = tuple([message] + list(args[1:])) + + return logging.Formatter.format(self, rec) + + +class DebugStreamFormatter(StreamFormatter): + base_level = logging.DEBUG + + +class FileFormatter(logging.Formatter): + """Custom `Formatter ` for the file handler.""" + + base_fmt = "%(levelname)8s | %(asctime)s | %(filename)s:%(funcName)s:%(lineno)s | %(message)s" + ansi_escape = re.compile(r'\x1b[^m]*m') + + def __init__(self, fmt=base_fmt): + logging.Formatter.__init__(self, fmt, datefmt='%Y-%m-%d %H:%M:%S') + + def format(self, record): + # Copy the record so that any modifications we make do not + # affect how the record is displayed in other handlers. + record_cp = copy.copy(record) + + record_cp.msg = self.ansi_escape.sub("", record_cp.msg) + + # TODO: Pulled this from sdsstools, but I'm not sure if it's still + # relevant + args = list(record_cp.args) + + # The format of a warnings redirected with warnings.captureWarnings + # has the format : : message\n . + # We reorganise that into a cleaner message. For some reason in this + # case the message is in record.args instead of in record.msg. + if ( + record_cp.levelno == logging.WARNING + and record_cp.args + and len(record_cp.args) > 0 + ): + match = re.match(r"^(.*?):\s*?(\w*?Warning): (.*)", args[0]) + if match: + message = "{1} - {2} [{0}]".format(*match.groups()) + record_cp.args = tuple([message] + list(args[1:])) + + return logging.Formatter.format(self, record_cp) + + +class PypeItLogger(logging.Logger): + """ + Custom logging system for pypeit. + + This borrows heavily from implementations in astropy and sdsstools. + """ + _excepthook_orig = None + + def init(self, + level: int = logging.INFO, + capture_exceptions: bool = True, + capture_warnings: bool = True, + log_file: Optional[str | Path] = None, + log_file_level: Optional[int] = None, + ): + """ + Initialise the logger. + + Parameters + ---------- + level + The logging level printed to the console + capture_exceptions + Override the exception hook and redirect all exceptions to the + logging system. + capture_warnings + Capture warnings and redirect them to the log. + log_file + Name for a log file. If None, logging is only recorded to the + console. If the file provided already exists, it will be + ovewritten! + log_file_level + The logging level specific to the log file. If None, adopt the + console logging level. + """ + self.warnings_logger = logging.getLogger("py.warnings") + + self.setLevel(logging.DEBUG) + + # Clear handlers before recreating. + for handler in self.handlers.copy(): + if handler in self.warnings_logger.handlers: + # Remove any added to the warnings logger + self.warnings_logger.removeHandler(handler) + self.removeHandler(handler) + + # Reset the exception hook (only if it was reset by this logger) + if self._excepthook_orig is not None and sys.excepthook == self._excepthook: + sys.excepthook = self._excepthook_orig + self._excepthook_orig = None + + # Catches exceptions + if capture_exceptions: + self._excepthook_orig = sys.excepthook + sys.excepthook = self._excepthook + + # Set the stream handler + self.sh = logging.StreamHandler() + formatter = DebugStreamFormatter() if level <= logging.DEBUG else StreamFormatter() + self.sh.setFormatter(formatter) + self.sh.setLevel(level) + self.addHandler(self.sh) + + if capture_warnings: + logging.captureWarnings(True) + + # Only enable the sh handler if none is attached to the warnings + # logger yet. Prevents duplicated prints of the warnings. + for handler in self.warnings_logger.handlers: + if isinstance(handler, logging.StreamHandler): + return + + self.warnings_logger.addHandler(self.sh) + + # Get the file handler + if log_file is None: + self.fh = None + self.log_filename = None + else: + if log_file_level is None: + log_file_level = level + self.log_file = Path(log_file).absolute() + self.fh = logging.FileHandler(str(self.log_file), mode='w') + self.fh.setFormatter(FileFormatter()) + self.fh.setLevel(log_file_level) + self.addHandler(self.fh) + + if self.warnings_logger: + self.warnings_logger.addHandler(self.fh) + + def _excepthook(self, etype, value, traceback): + if traceback is None: + mod = None + else: + tb = traceback + while tb.tb_next is not None: + tb = tb.tb_next + mod = inspect.getmodule(tb) + + # include the error type in the message. + if len(value.args) > 0: + message = f"{etype.__name__}: {str(value)}" + else: + message = str(etype.__name__) + + if mod is not None: + self.error(message, extra={"origin": mod.__name__}) + else: + self.error(message) + self._excepthook_orig(etype, value, traceback) + + +def get_logger( + level: int = logging.INFO, + capture_exceptions: bool = True, + capture_warnings: bool = True, + log_file: Optional[str | Path] = None, + log_file_level: Optional[int] = None, +): + """ + Instantiate a new logger. + + Parameters + ---------- + level + The logging level printed to the console + capture_exceptions + Override the exception hook and redirect all exceptions to the + logging system. + capture_warnings + Capture warnings and redirect them to the log. + log_file + Name for a log file. If None, logging is only recorded to the + console. If the file provided already exists, it will be + ovewritten! + log_file_level + The logging level specific to the log file. If None, adopt the + console logging level. + """ + + orig_logger = logging.getLoggerClass() + logging.setLoggerClass(PypeItLogger) + + try: + log = logging.getLogger("pypeit") + log.init( + level=level, + capture_exceptions=capture_exceptions, + capture_warnings=capture_warnings, + log_file=log_file, + log_file_level=log_file_level + ) + finally: + logging.setLoggerClass(orig_logger) + + return log diff --git a/pypeit/pypmsgs.py b/pypeit/pypmsgs.py index 7feb9f0384..902fcb6b1f 100644 --- a/pypeit/pypmsgs.py +++ b/pypeit/pypmsgs.py @@ -18,7 +18,7 @@ import pypeit from pypeit.core.qa import close_qa -from pypeit import pypeit_user +#from pypeit import pypeit_user #pypeit_logger = None From 736f9ac896bf48d96c80d1d2c7856b976eb85fdd Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Wed, 8 Oct 2025 09:37:56 -0700 Subject: [PATCH 02/33] error handling --- pypeit/__init__.py | 3 +- pypeit/cache.py | 30 +++--- pypeit/coadd2d.py | 28 +++--- pypeit/logger.py | 146 +++++++++++++++++++---------- pypeit/pypeit.py | 19 ++-- pypeit/scripts/arxiv_solution.py | 2 +- pypeit/scripts/coadd_1dspec.py | 2 +- pypeit/scripts/coadd_2dspec.py | 2 +- pypeit/scripts/coadd_datacube.py | 2 +- pypeit/scripts/collate_1d.py | 2 +- pypeit/scripts/extract_datacube.py | 2 +- pypeit/scripts/flux_calib.py | 2 +- pypeit/scripts/identify.py | 2 +- pypeit/scripts/run_pypeit.py | 17 ++-- pypeit/scripts/sensfunc.py | 2 +- pypeit/scripts/setup.py | 4 +- pypeit/scripts/show_2dspec.py | 2 +- pypeit/scripts/tellfit.py | 2 +- pypeit/scripts/trace_edges.py | 2 +- pypeit/tests/test_runpypeit.py | 1 + 20 files changed, 158 insertions(+), 114 deletions(-) diff --git a/pypeit/__init__.py b/pypeit/__init__.py index f16e7a2a4d..ee6ad2942a 100644 --- a/pypeit/__init__.py +++ b/pypeit/__init__.py @@ -18,8 +18,9 @@ # Report current coverage __coverage__ = 0.55 +import logging from pypeit import logger -msgs = logger.get_logger() +msgs = logger.get_logger(level=logging.DEBUG) # Import and instantiate the data path parser # NOTE: This *MUST* come after msgs and __version__ are defined above diff --git a/pypeit/cache.py b/pypeit/cache.py index 86756ddbac..34a737da50 100644 --- a/pypeit/cache.py +++ b/pypeit/cache.py @@ -260,26 +260,26 @@ def fetch_remote_file( in [requests.codes.forbidden, requests.codes.not_found] ): err_msg = ( - f"The file {filename}{msgs.newline()}" - f"is not hosted in the cloud. Please download this file from{msgs.newline()}" - f"the PypeIt Google Drive and install it using the script{msgs.newline()}" - f"pypeit_install_telluric --local. See instructions at{msgs.newline()}" + f"The file {filename}" + f"is not hosted in the cloud. Please download this file from" + f"the PypeIt Google Drive and install it using the script" + f"pypeit_install_telluric --local. See instructions at" "https://pypeit.readthedocs.io/en/latest/installing.html#additional-data" ) elif filetype == "arc_lines/lists": err_msg = ( - f"Cannot find local arc line list {filename}{msgs.newline()}" - f"Use the script `pypeit_install_linelist` to install{msgs.newline()}" - f"your custom line list into the cache. See instructions at{msgs.newline()}" + f"Cannot find local arc line list {filename}" + f"Use the script `pypeit_install_linelist` to install" + f"your custom line list into the cache. See instructions at" "https://pypeit.readthedocs.io/en/latest/wave_calib.html#line-lists" ) elif filetype == "extinction": err_msg = ( - f"Cannot find local extinction file {filename}{msgs.newline()}" - f"Use the script `pypeit_install_extinctfile` to install{msgs.newline()}" - f"your custom extinction file into the cache. See instructions at{msgs.newline()}" + f"Cannot find local extinction file {filename}" + f"Use the script `pypeit_install_extinctfile` to install" + f"your custom extinction file into the cache. See instructions at" "https://pypeit.readthedocs.io/en/latest/fluxing.html#extinction-correction" ) @@ -288,11 +288,11 @@ def fetch_remote_file( else: err_msg = ( - f"Error downloading {filename}: {error}{msgs.newline()}" - f"URL attempted: {remote_url}{msgs.newline()}" - f"If the error relates to the server not being found,{msgs.newline()}" - f"check your internet connection. If the remote server{msgs.newline()}" - f"name has changed, please contact the PypeIt development{msgs.newline()}" + f"Error downloading {filename}: {error}" + f"URL attempted: {remote_url}" + f"If the error relates to the server not being found," + f"check your internet connection. If the remote server" + f"name has changed, please contact the PypeIt development" "team." ) diff --git a/pypeit/coadd2d.py b/pypeit/coadd2d.py index ca7d2265df..e809b9df45 100644 --- a/pypeit/coadd2d.py +++ b/pypeit/coadd2d.py @@ -848,13 +848,13 @@ def offsets_report(offsets, pixscale, offsets_method): """ if offsets_method is not None and offsets is not None: - msg_string = msgs.newline() + '---------------------------------------------------------------------------------' - msg_string += msgs.newline() + ' Summary of offsets from {} '.format(offsets_method) - msg_string += msgs.newline() + '---------------------------------------------------------------------------------' - msg_string += msgs.newline() + ' exp# offset (pixels) offset (arcsec)' + msg_string = '---------------------------------------------------------------------------------' + msg_string += ' Summary of offsets from {} '.format(offsets_method) + msg_string += '---------------------------------------------------------------------------------' + msg_string += ' exp# offset (pixels) offset (arcsec)' for iexp, off in enumerate(offsets): - msg_string += msgs.newline() + ' {:2d} {:6.2f} {:6.3f}'.format(iexp, off, off*pixscale) - msg_string += msgs.newline() + '---------------------------------------------------------------------------------' + msg_string += ' {:2d} {:6.2f} {:6.3f}'.format(iexp, off, off*pixscale) + msg_string += '---------------------------------------------------------------------------------' msgs.info(msg_string) def offset_slit_cen(self, slitid, offsets): @@ -1599,15 +1599,15 @@ def snr_report(self, slitid, spat_pixpos, snr_bar): """ # Print out a report on the SNR - msg_string = msgs.newline() + '-------------------------------------' - msg_string += msgs.newline() + ' Summary for highest S/N object' - msg_string += msgs.newline() + ' found on slitid = {:d} '.format(slitid) - msg_string += msgs.newline() + '-------------------------------------' - msg_string += msgs.newline() + ' exp# spat_pixpos S/N' - msg_string += msgs.newline() + '-------------------------------------' + msg_string = '-------------------------------------' + msg_string += ' Summary for highest S/N object' + msg_string += ' found on slitid = {:d} '.format(slitid) + msg_string += '-------------------------------------' + msg_string += ' exp# spat_pixpos S/N' + msg_string += '-------------------------------------' for iexp, (spat,snr) in enumerate(zip(spat_pixpos, snr_bar)): - msg_string += msgs.newline() + ' {:2d} {:7.1f} {:5.2f}'.format(iexp, spat, snr) - msg_string += msgs.newline() + '-------------------------------------' + msg_string += ' {:2d} {:7.1f} {:5.2f}'.format(iexp, spat, snr) + msg_string += '-------------------------------------' msgs.info(msg_string) diff --git a/pypeit/logger.py b/pypeit/logger.py index 5efed80426..93f81b3be7 100644 --- a/pypeit/logger.py +++ b/pypeit/logger.py @@ -4,6 +4,7 @@ Implementation heavily references loggers from astropy and sdsstools. """ +import traceback import copy import inspect import logging @@ -33,16 +34,20 @@ def short_warning(message, category, filename, lineno, file=None, line=None): import numpy as np warnings.simplefilter('default', np.exceptions.RankWarning) -WARNING_RE = re.compile(r"^.*?\s*?(\w*?Warning): (.*)") +#WARNING_RE = re.compile(r"^.*?\s*?(\w*?Warning): (.*)") -def color_text(text, color, bold=False): + +def color_text(text, color, bold=False, nchar=None): msg = '\033[1;' if bold else '\033[' - return f'{msg}38;2;{color[0]};{color[1]};{color[2]}m{text}\033[0m' + _text = f'{text}' if nchar is None else f'{text:>{nchar}}' + return f'{msg}38;2;{color[0]};{color[1]};{color[2]}m{_text}\033[0m' class StreamFormatter(logging.Formatter): - """Custom `Formatter ` for the stream handler.""" + """ + Custom `Formatter ` for the stream handler. + """ base_level = None @@ -60,16 +65,21 @@ def format(self, record): rec = copy.copy(record) levelname = rec.levelname.lower() if levelname not in level_colors: + # Unknown level name so default to the standard formatter return logging.Formatter.format(self, record) - msg = color_text(f'[{levelname.upper()}]', level_colors[levelname], bold=True) + # Add the level in colored text + msg = color_text(f'[{levelname.upper()}]', level_colors[levelname], bold=True, nchar=10) + msg += ' : ' if self.base_level == logging.DEBUG: # If including debug messages, include file inspection in *all* log # messages. - msg += ' - ' + color_text(f'{rec.filename}:{rec.funcName}:{rec.lineno}', inspect_color) - msg += ' - ' + rec.msg - rec.msg = msg + msg += color_text(f'{rec.filename}:{rec.funcName}:{rec.lineno}', inspect_color) + ' : ' + # Add the message header + rec.msg = msg + rec.msg +# NOTE: This is in the sdsstools looger, but I'm not sure what it does. I have +# commented it out for the moment, but we may want to bring it back. # if levelname == "warning" and rec.args and len(rec.args) > 0: # warning_category_groups = WARNING_RE.match(rec.args[0]) # if warning_category_groups is not None: @@ -78,49 +88,54 @@ def format(self, record): # message = f'{color_text(wtext, [256, 256, 256])}' + wcategory_colour # rec.args = tuple([message] + list(args[1:])) + # Return the base formatting return logging.Formatter.format(self, rec) class DebugStreamFormatter(StreamFormatter): + """ + Set the base logging level to DEBUG + """ base_level = logging.DEBUG class FileFormatter(logging.Formatter): - """Custom `Formatter ` for the file handler.""" + """ + Custom `Formatter ` for the file handler. + """ base_fmt = "%(levelname)8s | %(asctime)s | %(filename)s:%(funcName)s:%(lineno)s | %(message)s" - ansi_escape = re.compile(r'\x1b[^m]*m') +# ansi_escape = re.compile(r'\x1b[^m]*m') def __init__(self, fmt=base_fmt): logging.Formatter.__init__(self, fmt, datefmt='%Y-%m-%d %H:%M:%S') - def format(self, record): - # Copy the record so that any modifications we make do not - # affect how the record is displayed in other handlers. - record_cp = copy.copy(record) - - record_cp.msg = self.ansi_escape.sub("", record_cp.msg) - - # TODO: Pulled this from sdsstools, but I'm not sure if it's still - # relevant - args = list(record_cp.args) - - # The format of a warnings redirected with warnings.captureWarnings - # has the format : : message\n . - # We reorganise that into a cleaner message. For some reason in this - # case the message is in record.args instead of in record.msg. - if ( - record_cp.levelno == logging.WARNING - and record_cp.args - and len(record_cp.args) > 0 - ): - match = re.match(r"^(.*?):\s*?(\w*?Warning): (.*)", args[0]) - if match: - message = "{1} - {2} [{0}]".format(*match.groups()) - record_cp.args = tuple([message] + list(args[1:])) - - return logging.Formatter.format(self, record_cp) - +# def format(self, record): +# # Copy the record so that any modifications we make do not +# # affect how the record is displayed in other handlers. +# record_cp = copy.copy(record) +# +# record_cp.msg = self.ansi_escape.sub("", record_cp.msg) +# +# # TODO: Pulled this from sdsstools, but I'm not sure if it's still +# # relevant +# args = list(record_cp.args) +# +# # The format of a warnings redirected with warnings.captureWarnings +# # has the format : : message\n . +# # We reorganise that into a cleaner message. For some reason in this +# # case the message is in record.args instead of in record.msg. +# if ( +# record_cp.levelno == logging.WARNING +# and record_cp.args +# and len(record_cp.args) > 0 +# ): +# match = re.match(r"^(.*?):\s*?(\w*?Warning): (.*)", args[0]) +# if match: +# message = "{1} - {2} [{0}]".format(*match.groups()) +# record_cp.args = tuple([message] + list(args[1:])) +# +# return logging.Formatter.format(self, record_cp) class PypeItLogger(logging.Logger): """ @@ -173,7 +188,7 @@ def init(self, sys.excepthook = self._excepthook_orig self._excepthook_orig = None - # Catches exceptions + # Catch and parse exceptions if capture_exceptions: self._excepthook_orig = sys.excepthook sys.excepthook = self._excepthook @@ -212,27 +227,58 @@ def init(self, if self.warnings_logger: self.warnings_logger.addHandler(self.fh) - def _excepthook(self, etype, value, traceback): - if traceback is None: - mod = None + def _excepthook(self, etype, value, trace): + """ + Override the default exception hook to log an error message. + """ + tb = trace + if tb is None: + exc_info = None else: - tb = traceback - while tb.tb_next is not None: + # If the traceback is available, jump to the calling frame, which + # gets passed to makeRecord + while tb.tb_next: tb = tb.tb_next - mod = inspect.getmodule(tb) + exc_info = (etype, value, tb) - # include the error type in the message. + # Add the error type to the message. if len(value.args) > 0: message = f"{etype.__name__}: {str(value)}" else: message = str(etype.__name__) - if mod is not None: - self.error(message, extra={"origin": mod.__name__}) - else: - self.error(message) - self._excepthook_orig(etype, value, traceback) + # Log the error + self.error(message, exc_info=exc_info) + + # Call the original exception hook + self._excepthook_orig(etype, value, trace) + def makeRecord( + self, name, level, pathname, lineno, msg, args, exc_info, func=None, extra=None, + sinfo=None + ): + """ + Override the default makeRecord function to rework the message for exceptions. + """ + # If this is an error message, the execution information is provided, + # and the error originates from the exception hook, reset the frame + # information (file, function, and line number) to the calling function. + if (level == logging.ERROR + and exc_info is not None + and Path(pathname).name == 'logger.py' + and func is not None + and func == '_excepthook' + ): + calling_frame = traceback.extract_tb(exc_info[2])[-1] + pathname = calling_frame.filename + lineno = calling_frame.lineno + func = calling_frame.name + # This keeps the traceback from being printed twice! + exc_info = None + return logging.Logger.makeRecord( + self, name, level, pathname, lineno, msg, args, exc_info, func=func, extra=extra, + sinfo=sinfo + ) def get_logger( level: int = logging.INFO, diff --git a/pypeit/pypeit.py b/pypeit/pypeit.py index 64d36eb7a7..a81ae05863 100644 --- a/pypeit/pypeit.py +++ b/pypeit/pypeit.py @@ -91,7 +91,7 @@ def __init__(self, pypeit_file, verbosity=2, overwrite=True, reuse_calibs=False, self.verbosity = verbosity self.pypeit_file = pypeit_file - self.msgs_reset() + msgs.init(level=msgs.level, log_file=self.logname) # Load up PypeIt file self.pypeItFile = inputfiles.PypeItFile.from_file(pypeit_file) @@ -175,7 +175,7 @@ def build_qa(self): """ Generate QA wrappers """ - msgs.qa_path = self.qa_path +# msgs.qa_path = self.qa_path qa.gen_qa_dir(self.qa_path) qa.gen_mf_html(self.pypeit_file, self.qa_path) qa.gen_exp_html() @@ -1255,14 +1255,13 @@ def save_exposure(self, frame:int, all_spec2d:spec2dobj.AllSpec2DObj, update_det=update_det, slitspatnum=self.par['rdx']['slitspatnum']) - def msgs_reset(self): - """ - Reset the msgs object - """ - - # Reset the global logger - msgs.reset(log=self.logname, verbosity=self.verbosity) - msgs.pypeit_file = self.pypeit_file +# def msgs_reset(self): +# """ +# Reset the msgs object +# """ +# # Reset the global logger +# msgs.reset(log=self.logname, verbosity=self.verbosity) +# msgs.pypeit_file = self.pypeit_file def print_end_time(self): """ diff --git a/pypeit/scripts/arxiv_solution.py b/pypeit/scripts/arxiv_solution.py index dfaecdc26a..8370e6d9c5 100644 --- a/pypeit/scripts/arxiv_solution.py +++ b/pypeit/scripts/arxiv_solution.py @@ -38,7 +38,7 @@ def main(args): chk_version = not args.try_old # Set the verbosity, and create a logfile if verbosity == 2 - msgs.set_logfile_and_verbosity('arxiv_solution', args.verbosity) +# msgs.set_logfile_and_verbosity('arxiv_solution', args.verbosity) # Check that a file has been provided if args.file is None: diff --git a/pypeit/scripts/coadd_1dspec.py b/pypeit/scripts/coadd_1dspec.py index 98306e1126..5cce61cb00 100644 --- a/pypeit/scripts/coadd_1dspec.py +++ b/pypeit/scripts/coadd_1dspec.py @@ -151,7 +151,7 @@ def main(args): """ Runs the 1d coadding steps """ # Set the verbosity, and create a logfile if verbosity == 2 - msgs.set_logfile_and_verbosity('coadd_1dspec', args.verbosity) +# msgs.set_logfile_and_verbosity('coadd_1dspec', args.verbosity) # Load the file #config_lines, spec1dfiles, objids = read_coaddfile(args.coadd1d_file) diff --git a/pypeit/scripts/coadd_2dspec.py b/pypeit/scripts/coadd_2dspec.py index 6e8629ac34..8d8bb80cb2 100644 --- a/pypeit/scripts/coadd_2dspec.py +++ b/pypeit/scripts/coadd_2dspec.py @@ -65,7 +65,7 @@ def main(args): from pypeit.spectrographs.util import load_spectrograph # Set the verbosity, and create a logfile if verbosity == 2 - msgs.set_logfile_and_verbosity('coadd_2dspec', args.verbosity) +# msgs.set_logfile_and_verbosity('coadd_2dspec', args.verbosity) # Load the file coadd2dFile = inputfiles.Coadd2DFile.from_file(args.coadd2d_file) diff --git a/pypeit/scripts/coadd_datacube.py b/pypeit/scripts/coadd_datacube.py index d3c9c0aaa7..a07d935386 100644 --- a/pypeit/scripts/coadd_datacube.py +++ b/pypeit/scripts/coadd_datacube.py @@ -35,7 +35,7 @@ def main(args): from pypeit.spectrographs.util import load_spectrograph # Set the verbosity, and create a logfile if verbosity == 2 - msgs.set_logfile_and_verbosity('coadd_datacube', args.verbosity) +# msgs.set_logfile_and_verbosity('coadd_datacube', args.verbosity) # Check that a file has been provided if args.file is None: diff --git a/pypeit/scripts/collate_1d.py b/pypeit/scripts/collate_1d.py index d9d7b71e47..a8d72e2759 100644 --- a/pypeit/scripts/collate_1d.py +++ b/pypeit/scripts/collate_1d.py @@ -729,7 +729,7 @@ def get_parser(cls, width=None): def main(args): # Set the verbosity, and create a logfile if verbosity == 2 - msgs.set_logfile_and_verbosity('collate_1d', args.verbosity) +# msgs.set_logfile_and_verbosity('collate_1d', args.verbosity) start_time = datetime.now() (par, spectrograph, spec1d_files) = build_parameters(args) diff --git a/pypeit/scripts/extract_datacube.py b/pypeit/scripts/extract_datacube.py index f819c4aad6..ce52d04119 100644 --- a/pypeit/scripts/extract_datacube.py +++ b/pypeit/scripts/extract_datacube.py @@ -44,7 +44,7 @@ def main(args): from pypeit.coadd3d import DataCube # Set the verbosity, and create a logfile if verbosity == 2 - msgs.set_logfile_and_verbosity('extract_datacube', args.verbosity) +# msgs.set_logfile_and_verbosity('extract_datacube', args.verbosity) # Check that a file has been provided if args.file is None: diff --git a/pypeit/scripts/flux_calib.py b/pypeit/scripts/flux_calib.py index 8155da6a51..1bd78b65f7 100644 --- a/pypeit/scripts/flux_calib.py +++ b/pypeit/scripts/flux_calib.py @@ -78,7 +78,7 @@ def main(args): chk_version = not args.try_old # Set the verbosity, and create a logfile if verbosity == 2 - msgs.set_logfile_and_verbosity('flux_calib', args.verbosity) +# msgs.set_logfile_and_verbosity('flux_calib', args.verbosity) # Load the file fluxFile = inputfiles.FluxFile.from_file(args.flux_file) diff --git a/pypeit/scripts/identify.py b/pypeit/scripts/identify.py index 9690fc35d1..0260c4b3e8 100644 --- a/pypeit/scripts/identify.py +++ b/pypeit/scripts/identify.py @@ -73,7 +73,7 @@ def main(args): chk_version = not args.try_old # Set the verbosity, and create a logfile if verbosity == 2 - msgs.set_logfile_and_verbosity('identify', args.verbosity) +# msgs.set_logfile_and_verbosity('identify', args.verbosity) # Load the Arc file msarc = ArcImage.from_file(args.arc_file, chk_version=chk_version) diff --git a/pypeit/scripts/run_pypeit.py b/pypeit/scripts/run_pypeit.py index d5b3302e78..ae2e6b68ea 100644 --- a/pypeit/scripts/run_pypeit.py +++ b/pypeit/scripts/run_pypeit.py @@ -31,21 +31,18 @@ def usage(cls): """ Print pypeit usage description. """ + from pypeit import __version__ + + descr = 'PypeIt: The Python Spectroscopic Data Reduction Pipeline\n' + descr += f'Version {__version__}\n\n' import textwrap - import pypeit from pypeit.spectrographs import available_spectrographs - spclist = ', '.join(available_spectrographs) spcl = textwrap.wrap(spclist, width=70) - descs = '## ' - descs += '\x1B[1;37;42m' + 'PypeIt : ' - descs += 'The Python Spectroscopic Data Reduction Pipeline v{0:s}'.format(pypeit.__version__) \ - + '\x1B[' + '0m' + '\n' - descs += '## ' - descs += '\n## Available spectrographs include:' + descr += 'Available spectrographs include:\n' for ispcl in spcl: - descs += '\n## ' + ispcl - return descs + descr += f' {ispcl}\n' + return descr @classmethod def get_parser(cls, width=None): diff --git a/pypeit/scripts/sensfunc.py b/pypeit/scripts/sensfunc.py index 6130da9664..4f983f07ae 100644 --- a/pypeit/scripts/sensfunc.py +++ b/pypeit/scripts/sensfunc.py @@ -100,7 +100,7 @@ def main(args): from pypeit.spectrographs.util import load_spectrograph # Set the verbosity, and create a logfile if verbosity == 2 - msgs.set_logfile_and_verbosity('sensfunc', args.verbosity) +# msgs.set_logfile_and_verbosity('sensfunc', args.verbosity) # Check parameter inputs if args.algorithm is not None and args.sens_file is not None: diff --git a/pypeit/scripts/setup.py b/pypeit/scripts/setup.py index aee936337e..aee0b49c5a 100644 --- a/pypeit/scripts/setup.py +++ b/pypeit/scripts/setup.py @@ -99,8 +99,8 @@ def main(args): # Start the GUI from pypeit.setup_gui.controller import start_gui start_gui(args) - else: - msgs.set_logfile_and_verbosity("setup", args.verbosity) +# else: +# msgs.set_logfile_and_verbosity("setup", args.verbosity) # Initialize PypeItSetup based on the arguments ps = PypeItSetup.from_file_root(args.root, args.spectrograph, extension=args.extension) diff --git a/pypeit/scripts/show_2dspec.py b/pypeit/scripts/show_2dspec.py index fd16e874fd..8d7b751407 100644 --- a/pypeit/scripts/show_2dspec.py +++ b/pypeit/scripts/show_2dspec.py @@ -140,7 +140,7 @@ def main(args): return # Set the verbosity, and create a logfile if verbosity == 2 - msgs.set_logfile_and_verbosity('show_2dspec', args.verbosity) +# msgs.set_logfile_and_verbosity('show_2dspec', args.verbosity) # Parse the detector name if args.det is None: diff --git a/pypeit/scripts/tellfit.py b/pypeit/scripts/tellfit.py index acaf87177b..8ec24efbee 100644 --- a/pypeit/scripts/tellfit.py +++ b/pypeit/scripts/tellfit.py @@ -79,7 +79,7 @@ def main(args): from pypeit import inputfiles # Set the verbosity, and create a logfile if verbosity == 2 - msgs.set_logfile_and_verbosity('tellfit', args.verbosity) +# msgs.set_logfile_and_verbosity('tellfit', args.verbosity) # Determine the spectrograph header = fits.getheader(args.spec1dfile) diff --git a/pypeit/scripts/trace_edges.py b/pypeit/scripts/trace_edges.py index 46c1d394d7..b766df3671 100644 --- a/pypeit/scripts/trace_edges.py +++ b/pypeit/scripts/trace_edges.py @@ -74,7 +74,7 @@ def main(args): from IPython import embed # Set the verbosity, and create a logfile if verbosity == 2 - msgs.set_logfile_and_verbosity('trace_edges', args.verbosity) +# msgs.set_logfile_and_verbosity('trace_edges', args.verbosity) if args.show: msgs.warn('"show" option is deprecated. Setting debug = 1.') diff --git a/pypeit/tests/test_runpypeit.py b/pypeit/tests/test_runpypeit.py index 65167d2dec..86ed9acdda 100644 --- a/pypeit/tests/test_runpypeit.py +++ b/pypeit/tests/test_runpypeit.py @@ -117,4 +117,5 @@ def test_run_pypeit(): # Clean-up shutil.rmtree(outdir) +test_run_pypeit() From 9dd92d41ac31cdf820eaa15c170339bb82a6ad54 Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Wed, 8 Oct 2025 13:49:46 -0700 Subject: [PATCH 03/33] find replace --- pypeit/__init__.py | 1 + pypeit/alignframe.py | 8 +- pypeit/archive.py | 4 +- pypeit/cache.py | 16 +- pypeit/calibframe.py | 24 +-- pypeit/calibrations.py | 82 ++++----- pypeit/coadd1d.py | 26 +-- pypeit/coadd2d.py | 92 +++++----- pypeit/coadd3d.py | 62 +++---- pypeit/core/arc.py | 12 +- pypeit/core/coadd.py | 48 ++--- pypeit/core/collate.py | 4 +- pypeit/core/combine.py | 26 +-- pypeit/core/datacube.py | 30 ++-- pypeit/core/extract.py | 22 +-- pypeit/core/findobj_skymask.py | 38 ++-- pypeit/core/fitting.py | 38 ++-- pypeit/core/flat.py | 34 ++-- pypeit/core/flexure.py | 72 ++++---- pypeit/core/flux_calib.py | 54 +++--- pypeit/core/framematch.py | 16 +- pypeit/core/gui/edge_inspector.py | 4 +- pypeit/core/gui/identify.py | 22 +-- pypeit/core/gui/skysub_regions.py | 4 +- pypeit/core/mosaic.py | 22 +-- pypeit/core/parse.py | 10 +- pypeit/core/pixels.py | 12 +- pypeit/core/procimg.py | 74 ++++---- pypeit/core/pydl.py | 34 ++-- pypeit/core/scattlight.py | 6 +- pypeit/core/skysub.py | 50 +++--- pypeit/core/slitdesign_matching.py | 8 +- pypeit/core/telluric.py | 34 ++-- pypeit/core/trace.py | 24 +-- pypeit/core/tracewave.py | 6 +- pypeit/core/transform.py | 8 +- pypeit/core/wave.py | 2 +- pypeit/core/wavecal/autoid.py | 98 +++++------ pypeit/core/wavecal/waveio.py | 6 +- pypeit/core/wavecal/wv_fitting.py | 10 +- pypeit/core/wavecal/wvutils.py | 30 ++-- pypeit/datamodel.py | 18 +- pypeit/display/display.py | 18 +- pypeit/edgetrace.py | 214 +++++++++++------------ pypeit/extraction.py | 24 +-- pypeit/find_objects.py | 18 +- pypeit/flatfield.py | 98 +++++------ pypeit/images/bitmaskarray.py | 6 +- pypeit/images/buildimage.py | 4 +- pypeit/images/combineimage.py | 16 +- pypeit/images/mosaic.py | 10 +- pypeit/images/pypeitimage.py | 38 ++-- pypeit/images/rawimage.py | 90 +++++----- pypeit/inputfiles.py | 42 ++--- pypeit/io.py | 16 +- pypeit/manual_extract.py | 2 +- pypeit/metadata.py | 70 ++++---- pypeit/par/parset.py | 8 +- pypeit/par/pypeitpar.py | 16 +- pypeit/par/util.py | 2 +- pypeit/pypeit.py | 28 +-- pypeit/pypeitdata.py | 8 +- pypeit/pypeitsetup.py | 10 +- pypeit/scattlight.py | 2 +- pypeit/scripts/arxiv_solution.py | 6 +- pypeit/scripts/cache_github_data.py | 2 +- pypeit/scripts/chk_edges.py | 4 +- pypeit/scripts/chk_flexure.py | 4 +- pypeit/scripts/chk_for_calibs.py | 6 +- pypeit/scripts/chk_noise_1dspec.py | 2 +- pypeit/scripts/chk_noise_2dspec.py | 6 +- pypeit/scripts/chk_plugins.py | 2 +- pypeit/scripts/chk_scattlight.py | 4 +- pypeit/scripts/chk_wavecalib.py | 4 +- pypeit/scripts/clean_cache.py | 4 +- pypeit/scripts/coadd_1dspec.py | 4 +- pypeit/scripts/coadd_2dspec.py | 8 +- pypeit/scripts/coadd_datacube.py | 2 +- pypeit/scripts/collate_1d.py | 26 +-- pypeit/scripts/compile_wvarxiv.py | 4 +- pypeit/scripts/extract_datacube.py | 2 +- pypeit/scripts/flux_calib.py | 2 +- pypeit/scripts/flux_setup.py | 2 +- pypeit/scripts/identify.py | 2 +- pypeit/scripts/install_extinctfile.py | 2 +- pypeit/scripts/install_linelist.py | 2 +- pypeit/scripts/install_wvarxiv.py | 2 +- pypeit/scripts/parse_slits.py | 4 +- pypeit/scripts/print_bpm.py | 2 +- pypeit/scripts/ql.py | 46 ++--- pypeit/scripts/run_pypeit.py | 2 +- pypeit/scripts/run_to_calibstep.py | 10 +- pypeit/scripts/sensfunc.py | 8 +- pypeit/scripts/setup.py | 2 +- pypeit/scripts/setup_coadd2d.py | 10 +- pypeit/scripts/show_1dspec.py | 10 +- pypeit/scripts/show_2dspec.py | 28 +-- pypeit/scripts/show_pixflat.py | 6 +- pypeit/scripts/skysub_regions.py | 2 +- pypeit/scripts/tellfit.py | 8 +- pypeit/scripts/trace_edges.py | 10 +- pypeit/scripts/view_fits.py | 14 +- pypeit/sensfilearchive.py | 2 +- pypeit/sensfunc.py | 28 +-- pypeit/setup_gui/controller.py | 14 +- pypeit/setup_gui/dialog_helpers.py | 2 +- pypeit/setup_gui/model.py | 6 +- pypeit/setup_gui/view.py | 6 +- pypeit/slittrace.py | 74 ++++---- pypeit/spec2dobj.py | 22 +-- pypeit/specobj.py | 28 +-- pypeit/specobjs.py | 48 ++--- pypeit/spectrographs/aat_uhrf.py | 6 +- pypeit/spectrographs/apf_levy.py | 8 +- pypeit/spectrographs/bok_bc.py | 8 +- pypeit/spectrographs/gemini_flamingos.py | 4 +- pypeit/spectrographs/gemini_gmos.py | 12 +- pypeit/spectrographs/gemini_gnirs.py | 34 ++-- pypeit/spectrographs/gtc_osiris.py | 32 ++-- pypeit/spectrographs/jwst_nirspec.py | 2 +- pypeit/spectrographs/keck_deimos.py | 32 ++-- pypeit/spectrographs/keck_esi.py | 8 +- pypeit/spectrographs/keck_hires.py | 18 +- pypeit/spectrographs/keck_kcwi.py | 32 ++-- pypeit/spectrographs/keck_lris.py | 58 +++--- pypeit/spectrographs/keck_mosfire.py | 12 +- pypeit/spectrographs/keck_nires.py | 2 +- pypeit/spectrographs/keck_nirspec.py | 14 +- pypeit/spectrographs/lbt_luci.py | 12 +- pypeit/spectrographs/lbt_mods.py | 4 +- pypeit/spectrographs/ldt_deveny.py | 12 +- pypeit/spectrographs/magellan_fire.py | 4 +- pypeit/spectrographs/magellan_mage.py | 2 +- pypeit/spectrographs/mdm_modspec.py | 6 +- pypeit/spectrographs/mdm_osmos.py | 6 +- pypeit/spectrographs/mmt_binospec.py | 2 +- pypeit/spectrographs/mmt_bluechannel.py | 6 +- pypeit/spectrographs/mmt_mmirs.py | 4 +- pypeit/spectrographs/not_alfosc.py | 6 +- pypeit/spectrographs/ntt_efosc2.py | 4 +- pypeit/spectrographs/opticalmodel.py | 2 +- pypeit/spectrographs/p200_dbsp.py | 20 +-- pypeit/spectrographs/p200_ngps.py | 6 +- pypeit/spectrographs/p200_tspec.py | 2 +- pypeit/spectrographs/shane_kast.py | 8 +- pypeit/spectrographs/soar_goodman.py | 6 +- pypeit/spectrographs/spectrograph.py | 104 +++++------ pypeit/spectrographs/subaru_focas.py | 10 +- pypeit/spectrographs/tng_dolores.py | 6 +- pypeit/spectrographs/util.py | 6 +- pypeit/spectrographs/vlt_fors.py | 12 +- pypeit/spectrographs/vlt_sinfoni.py | 4 +- pypeit/spectrographs/vlt_xshooter.py | 6 +- pypeit/spectrographs/wht_isis.py | 6 +- pypeit/specutils/pypeit_loaders.py | 14 +- pypeit/tests/test_msgs.py | 2 +- pypeit/tracepca.py | 6 +- pypeit/utils.py | 26 +-- pypeit/wavecalib.py | 36 ++-- pypeit/wavemodel.py | 20 +-- pypeit/wavetilts.py | 24 +-- 161 files changed, 1553 insertions(+), 1548 deletions(-) diff --git a/pypeit/__init__.py b/pypeit/__init__.py index ee6ad2942a..34a03a4b18 100644 --- a/pypeit/__init__.py +++ b/pypeit/__init__.py @@ -16,6 +16,7 @@ __version__ = version # Report current coverage +# TODO: How old is this? Can we update it automatically? __coverage__ = 0.55 import logging diff --git a/pypeit/alignframe.py b/pypeit/alignframe.py index d7b8916226..02ccaac2b8 100644 --- a/pypeit/alignframe.py +++ b/pypeit/alignframe.py @@ -77,7 +77,7 @@ def is_synced(self, slits): """ if not np.array_equal(self.spat_id, slits.spat_id): - msgs.error('Your alignment solutions are out of sync with your slits. Remove ' + raise PypeItError('Your alignment solutions are out of sync with your slits. Remove ' 'Calibrations and restart from scratch.') def show(self, slits=None): @@ -205,7 +205,7 @@ def build_traces(self, show_peaks=False, debug=False): nperslit=len(self.alignpar['locations'])) if len(align_traces) != len(self.alignpar['locations']): # Align tracing has failed for this slit - msgs.error("Alignment tracing has failed on slit {0:d}/{1:d}".format(slit_idx+1,self.slits.nslits)) + raise PypeItError("Alignment tracing has failed on slit {0:d}/{1:d}".format(slit_idx+1,self.slits.nslits)) align_prof['{0:d}'.format(slit_idx)] = align_traces.copy() # Steps @@ -242,7 +242,7 @@ def generate_traces(self, align_prof): sls = '{0:d}'.format(slit_idx) for bar in range(nbars): if align_prof[sls][bar].SLITID != slit_idx: - msgs.error("Alignment profiling failed to generate traces") + raise PypeItError("Alignment profiling failed to generate traces") align_traces[:, bar, slit_idx] = align_prof[sls][bar].TRACE_SPAT return align_traces @@ -350,7 +350,7 @@ def __init__(self, traces, locations, tilts): if type(locations) is list: locations = np.array(locations) if locations.size != traces.shape[1]: - msgs.error("The size of locations must be the same as traces.shape[1]") + raise PypeItError("The size of locations must be the same as traces.shape[1]") # Store the relevant input self.traces = traces self.locations = locations diff --git a/pypeit/archive.py b/pypeit/archive.py index 888b7f49bf..de29f6f222 100644 --- a/pypeit/archive.py +++ b/pypeit/archive.py @@ -224,7 +224,7 @@ def _archive_file(self, orig_file, dest_file): return orig_file if not os.path.exists(orig_file): - msgs.error(f'File {orig_file} does not exist') + raise PypeItError(f'File {orig_file} does not exist') full_dest_path = os.path.join(self.archive_root, dest_file) os.makedirs(os.path.dirname(full_dest_path), exist_ok=True) @@ -233,6 +233,6 @@ def _archive_file(self, orig_file, dest_file): try: shutil.copy2(orig_file, full_dest_path) except: - msgs.error(f'Failed to copy {orig_file} to {full_dest_path}') + raise PypeItError(f'Failed to copy {orig_file} to {full_dest_path}') return full_dest_path diff --git a/pypeit/cache.py b/pypeit/cache.py index 34a737da50..d6abde0fb4 100644 --- a/pypeit/cache.py +++ b/pypeit/cache.py @@ -179,7 +179,7 @@ def git_most_recent_tag(): tags = [packaging.version.parse(ref.split('/')[-1]) \ for ref in repo.references if 'refs/tags' in ref] if len(tags) == 0: - msgs.warn('Unable to find any tags in pypeit repository.') + msgs.warning('Unable to find any tags in pypeit repository.') return __version__, None latest_version = str(sorted(tags)[-1]) timestamp = repo.resolve_refish(f'refs/tags/{latest_version}')[0].author.time @@ -239,7 +239,7 @@ def fetch_remote_file( if remote_host == "s3_cloud" and not install_script: # Display a warning that this may take a while, and the user may wish to # download use an install script - msgs.warn(f'Note: If this file takes a while to download, you may wish to used one of ' + msgs.warning(f'Note: If this file takes a while to download, you may wish to used one of ' 'the install scripts (e.g., pypeit_install_telluric) to install the file ' 'independent of this processing script.') @@ -297,10 +297,10 @@ def fetch_remote_file( ) # Raise the appropriate error message - msgs.error(err_msg) + raise PypeItError(err_msg) except TimeoutError as error: - msgs.error(f"Timeout Error encountered: {error}") + raise PypeItError(f"Timeout Error encountered: {error}") # If no error, return the pathlib object return pathlib.Path(cache_fn).resolve() @@ -388,7 +388,7 @@ def remove_from_cache(cache_url=None, pattern=None, allow_multiple=False): if cache_url is None: _url = search_cache(pattern, path_only=False) if len(_url) == 0: - msgs.warn(f'Cache does not include a file matching the pattern {pattern}.') + msgs.warning(f'Cache does not include a file matching the pattern {pattern}.') return _url = list(_url.keys()) elif not isinstance(cache_url, list): @@ -397,7 +397,7 @@ def remove_from_cache(cache_url=None, pattern=None, allow_multiple=False): _url = cache_url if len(_url) > 1 and not allow_multiple: - msgs.warn('Function found or was provided with multiple entries to be removed. Either ' + msgs.warning('Function found or was provided with multiple entries to be removed. Either ' 'set allow_multiple=True, or try again with a single url or more specific ' 'pattern. URLs passed/found are:\n' + '\n'.join(_url)) return @@ -452,7 +452,7 @@ def parse_cache_url(url): return 's3_cloud', None, None, str(sub_path.parent), sub_path.name # Unknown host - msgs.warn(f'URL not recognized as a pypeit cache url:\n\t{url}') + msgs.warning(f'URL not recognized as a pypeit cache url:\n\t{url}') return None, None, None, None, None @@ -520,7 +520,7 @@ def _build_remote_url(f_name: str, f_type: str, remote_host: str=None): return reduce(lambda a, b: urljoin(a, b), parts_perm), \ [reduce(lambda a, b: urljoin(a, b), parts_fake)] - msgs.error(f"Remote host type {remote_host} is not supported for package data caching.") + raise PypeItError(f"Remote host type {remote_host} is not supported for package data caching.") def _get_s3_hostname() -> str: diff --git a/pypeit/calibframe.py b/pypeit/calibframe.py index 4fe3ec5087..91c01db676 100644 --- a/pypeit/calibframe.py +++ b/pypeit/calibframe.py @@ -76,12 +76,12 @@ def _validate(self): """ if self.calib_type is None: - msgs.error(f'CODING ERROR: Must define calib_type for {self.__class__.__name__}.') + raise PypeItError(f'CODING ERROR: Must define calib_type for {self.__class__.__name__}.') if self.datamodel is None: - msgs.error(f'CODING ERROR: datamodel cannot be None for {self.__class__.__name__}.') + raise PypeItError(f'CODING ERROR: datamodel cannot be None for {self.__class__.__name__}.') for key in CalibFrame.datamodel.keys(): if key not in self.keys(): - msgs.error(f'CODING ERROR: datamodel for {self.__class__.__name__} must inherit ' + raise PypeItError(f'CODING ERROR: datamodel for {self.__class__.__name__} must inherit ' 'all datamodel components from CalibFrame.datamodel.') def set_paths(self, odir, setup, calib_id, detname): @@ -197,11 +197,11 @@ def calib_keys_from_header(self, hdr): try: self.calib_key, self.calib_dir = CalibFrame.parse_key_dir(hdr) except PypeItError as e: - msgs.warn(f'{e}') + msgs.warning(f'{e}') if 'CALIBID' in hdr: self.calib_id = self.ingest_calib_id(hdr['CALIBID']) else: - msgs.warn('Header does not have CALIBID card; cannot parse calibration IDs.') + msgs.warning('Header does not have CALIBID card; cannot parse calibration IDs.') @staticmethod def parse_key_dir(inp, from_filename=False): @@ -233,16 +233,16 @@ def parse_key_dir(inp, from_filename=False): ext = h.name break if ext is None: - msgs.error(f'None of the headers in {inp} have both CALIBKEY and CALIBDIR ' + raise PypeItError(f'None of the headers in {inp} have both CALIBKEY and CALIBDIR ' 'keywords!') return hdu[ext].header['CALIBKEY'], hdu[ext].header['CALIBDIR'] if isinstance(inp, fits.Header): if 'CALIBKEY' not in inp or 'CALIBDIR' not in inp: - msgs.error('Header does not include CALIBKEY and/or CALIBDIR.') + raise PypeItError('Header does not include CALIBKEY and/or CALIBDIR.') return inp['CALIBKEY'], inp['CALIBDIR'] - msgs.error(f'Input object must have type str or astropy.io.fits.Header, not {type(inp)}.') + raise PypeItError(f'Input object must have type str or astropy.io.fits.Header, not {type(inp)}.') @staticmethod def ingest_calib_id(calib_id): @@ -284,7 +284,7 @@ def ingest_calib_id(calib_id): _calib_id = [calib_id] _calib_id = np.unique(np.concatenate([str(c).split(',') for c in _calib_id])) if 'all' in _calib_id and len(_calib_id) != 1: - msgs.warn(f'Calibration groups set to {_calib_id}, resetting to simply "all".') + msgs.warning(f'Calibration groups set to {_calib_id}, resetting to simply "all".') _calib_id = np.array(['all']) for c in _calib_id: if c == 'all': @@ -293,7 +293,7 @@ def ingest_calib_id(calib_id): _c = int(c) except ValueError: # TODO: Not sure this is strictly necessary - msgs.error(f'Invalid calibration group {c}; must be convertible to an integer.') + raise PypeItError(f'Invalid calibration group {c}; must be convertible to an integer.') return _calib_id.tolist() @staticmethod @@ -429,10 +429,10 @@ def construct_file_name(cls, calib_key, calib_dir=None): otherwise the file name """ if None in [cls.calib_type, cls.calib_file_format]: - msgs.error(f'CODING ERROR: {cls.__name__} does not have all ' + raise PypeItError(f'CODING ERROR: {cls.__name__} does not have all ' 'the attributes needed to construct its filename.') if calib_key is None: - msgs.error('CODING ERROR: calib_key cannot be None when constructing the ' + raise PypeItError('CODING ERROR: calib_key cannot be None when constructing the ' f'{cls.__name__} file name.') filename = f'{cls.calib_type}_{calib_key}.{cls.calib_file_format}' return filename if calib_dir is None else Path(calib_dir).absolute() / filename diff --git a/pypeit/calibrations.py b/pypeit/calibrations.py index 6154ac7a15..358038467f 100644 --- a/pypeit/calibrations.py +++ b/pypeit/calibrations.py @@ -145,11 +145,11 @@ def __init__(self, fitstbl, par, spectrograph, caldir, qadir=None, # TODO -- Remove this None option once we have data models for all the Calibrations # outputs and use them to feed Reduce instead of the Calibrations object if not isinstance(fitstbl, PypeItMetaData) and fitstbl is not None: - msgs.error('fitstbl must be an PypeItMetaData object') + raise PypeItError('fitstbl must be an PypeItMetaData object') if not isinstance(par, pypeitpar.CalibrationsPar): - msgs.error('Input parameters must be a CalibrationsPar instance.') + raise PypeItError('Input parameters must be a CalibrationsPar instance.') if not isinstance(spectrograph, Spectrograph): - msgs.error('Must provide Spectrograph instance to Calibrations.') + raise PypeItError('Must provide Spectrograph instance to Calibrations.') # Required inputs self.fitstbl = fitstbl @@ -227,7 +227,7 @@ def check_calibrations(self, file_list, check_lamps=True): # Check that the lamps being combined are all the same if check_lamps: if not lampstat[1:] == lampstat[:-1]: - msgs.warn("The following files contain different lamp status") + msgs.warning("The following files contain different lamp status") # Get the longest strings maxlen = max([len("Filename")] + [len(os.path.split(x)[1]) for x in file_list]) maxlmp = max([len("Lamp status")] + [len(x) for x in lampstat]) @@ -272,7 +272,7 @@ def find_calibrations(self, frametype, frameclass): # NOTE: This will raise an exception if the frametype is not valid! framematch.valid_frametype(frametype, raise_error=True) if not issubclass(frameclass, CalibFrame): - msgs.error(f'CODING ERROR: {frameclass} is not a subclass of CalibFrame.') + raise PypeItError(f'CODING ERROR: {frameclass} is not a subclass of CalibFrame.') # Grab rows with relevant frames detname = self.spectrograph.get_det_name(self.det) @@ -358,7 +358,7 @@ def get_arc(self, force:str=None): = self.find_calibrations(frame['type'], frame['class']) if len(raw_files) == 0 and cal_file is None: - msgs.warn(f'No raw {frame["type"]} frames found and unable to identify a relevant ' + msgs.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' 'processed calibration frame. Continuing...') self.msarc = None return self.msarc @@ -410,7 +410,7 @@ def get_tiltimg(self, force:str=None): = self.find_calibrations(frame['type'], frame['class']) if len(raw_files) == 0 and cal_file is None: - msgs.warn(f'No raw {frame["type"]} frames found and unable to identify a relevant ' + msgs.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' 'processed calibration frame. Continuing...') self.mstilt = None return self.mstilt @@ -456,7 +456,7 @@ def get_align(self, force:str=None): """ # Check for existing data if not self._chk_objs(['msbpm', 'slits']): - msgs.error('Must have the bpm and slits to make the alignments!') + raise PypeItError('Must have the bpm and slits to make the alignments!') # Check internals self._chk_set(['det', 'calib_ID', 'par']) @@ -467,7 +467,7 @@ def get_align(self, force:str=None): = self.find_calibrations(frame['type'], frame['class']) if len(raw_files) == 0 and cal_file is None: - msgs.warn(f'No raw {frame["type"]} frames found and unable to identify a relevant ' + msgs.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' 'processed calibration frame. Continuing...') self.alignments = None return self.alignments @@ -530,7 +530,7 @@ def get_bias(self, force:str=None): # If no raw files are available and no processed calibration frame if len(raw_files) == 0 and cal_file is None: - msgs.warn(f'No raw {frame["type"]} frames found and unable to identify a relevant ' + msgs.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' 'processed calibration frame. Continuing without a bias...') self.msbias = None return self.msbias @@ -578,7 +578,7 @@ def get_dark(self, force:str=None): = self.find_calibrations(frame['type'], frame['class']) if len(raw_files) == 0 and cal_file is None: - msgs.warn(f'No raw {frame["type"]} frames found and unable to identify a relevant ' + msgs.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' 'processed calibration frame. Continuing...') self.msdark = None return self.msdark @@ -656,7 +656,7 @@ def get_scattlight(self, force:str=None): """ # Check for existing data if not self._chk_objs(['msbpm', 'slits']): - msgs.warn('Must have the bpm and the slits defined to make a scattered light image! ' + msgs.warning('Must have the bpm and the slits defined to make a scattered light image! ' 'Skipping and may crash down the line') return self.msscattlight @@ -670,7 +670,7 @@ def get_scattlight(self, force:str=None): scatt_idx = self.fitstbl.find_frames(frame['type'], calib_ID=self.calib_ID, index=True) if len(raw_scattlight_files) == 0 and cal_file is None: - msgs.warn(f'No raw {frame["type"]} frames found and unable to identify a relevant ' + msgs.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' 'processed calibration frame. Continuing...') return self.msscattlight @@ -712,7 +712,7 @@ def get_scattlight(self, force:str=None): if not success: # Something went awry - msgs.warn('Scattered light modelling failed. Continuing, but likely to fail soon...') + msgs.warning('Scattered light modelling failed. Continuing, but likely to fail soon...') self.success = False return self.msscattlight @@ -756,7 +756,7 @@ def get_flats(self, force:str=None): """ # Check for existing data if not self._chk_objs(['msarc', 'msbpm', 'slits', 'wv_calib']): - msgs.warn('Must have the arc, bpm, slits, and wv_calib defined to make flats! ' + msgs.warning('Must have the arc, bpm, slits, and wv_calib defined to make flats! ' 'Skipping and may crash down the line') # TODO: Why was this an empty object and not None? self.flatimages = None #flatfield.FlatImages() @@ -765,7 +765,7 @@ def get_flats(self, force:str=None): # Slit and tilt traces are required to flat-field the data if not self._chk_objs(['slits', 'wavetilts']): # TODO: Why doesn't this fault? - msgs.warn('Flats were requested, but there are quantities missing necessary to ' + msgs.warning('Flats were requested, but there are quantities missing necessary to ' 'create flats. Proceeding without flat fielding....') # TODO: Why was this an empty object and not None? self.flatimages = None #flatfield.FlatImages() @@ -806,7 +806,7 @@ def get_flats(self, force:str=None): and len(raw_illum_files) == 0 and illum_cal_file is None: # if no calibration frames are found, check if the user has provided a pixel flat file if self.par['flatfield']['pixelflat_file'] is not None: - msgs.warn(f'No raw {pixel_frame["type"]} or {illum_frame["type"]} frames found but a ' + msgs.warning(f'No raw {pixel_frame["type"]} or {illum_frame["type"]} frames found but a ' 'user-defined pixel flat file was provided. Using that file.') self.flatimages = flatfield.FlatImages(PYP_SPEC=self.spectrograph.name, spat_id=self.slits.spat_id) self.flatimages.calib_key = flatfield.FlatImages.construct_calib_key(self.fitstbl['setup'][self.frame], @@ -815,7 +815,7 @@ def get_flats(self, force:str=None): self.det, self.flatimages, calib_dir=self.calib_dir, chk_version=self.chk_version) else: - msgs.warn(f'No raw {pixel_frame["type"]} or {illum_frame["type"]} frames found and ' + msgs.warning(f'No raw {pixel_frame["type"]} or {illum_frame["type"]} frames found and ' 'unable to identify a relevant processed calibration frame. Continuing...') self.flatimages = None return self.flatimages @@ -825,7 +825,7 @@ def get_flats(self, force:str=None): # issued if both are present and not the same. if illum_cal_file is not None and pixel_cal_file is not None \ and pixel_cal_file != illum_cal_file: - msgs.warn('Processed calibration frames were found for both pixel and ' + msgs.warning('Processed calibration frames were found for both pixel and ' 'slit-illumination flats, and the files are not the same. Ignoring the ' 'slit-illumination flat.') cal_file = illum_cal_file if pixel_cal_file is None else pixel_cal_file @@ -998,7 +998,7 @@ def get_slits(self, force:str=None): raw_lampoff_files = self.fitstbl.find_frame_files('lampoffflats', calib_ID=self.calib_ID) if len(raw_trace_files) == 0 and cal_file is None: - msgs.warn(f'No raw {frame["type"]} frames found and unable to identify a relevant ' + msgs.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' 'processed calibration frame. Continuing...') self.slits = None return self.slits @@ -1069,7 +1069,7 @@ def get_slits(self, force:str=None): qa_path=self.qa_path, auto=True) if not edges.success: # Something went amiss - msgs.warn('Edge tracing failed. Continuing, but likely to fail soon...') + msgs.warning('Edge tracing failed. Continuing, but likely to fail soon...') traceImage = None edges = None self.success = False @@ -1117,7 +1117,7 @@ def get_wv_calib(self, force:str=None): # Check for existing data req_objs = ['msarc', 'msbpm', 'slits'] if not self._chk_objs(req_objs): - msgs.warn('Not enough information to load/generate the wavelength calibration. ' + msgs.warning('Not enough information to load/generate the wavelength calibration. ' 'Skipping and may crash down the line') return None @@ -1130,7 +1130,7 @@ def get_wv_calib(self, force:str=None): = self.find_calibrations(frame['type'], frame['class']) if len(raw_files) == 0 and cal_file is None: - msgs.warn(f'No raw {frame["type"]} frames found and unable to identify a relevant ' + msgs.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' 'processed calibration frame. Continuing...') self.wv_calib = None return self.wv_calib @@ -1146,7 +1146,7 @@ def get_wv_calib(self, force:str=None): if self.par['wavelengths']['method'] == 'echelle': msgs.info('Method set to Echelle -- checking wv_calib for 2dfits') if not hasattr(self.wv_calib, 'wv_fit2d'): - msgs.error('There is no 2d fit in this Echelle wavelength ' + raise PypeItError('There is no 2d fit in this Echelle wavelength ' 'calibration! Please generate a new one with a 2d fit.') # Return @@ -1203,7 +1203,7 @@ def get_tilts(self, force:str=None): # Check for existing data # TODO: add mstilt_inmask to this list when it gets implemented. if not self._chk_objs(['mstilt', 'msbpm', 'slits', 'wv_calib']): - msgs.warn('Do not have all the necessary objects for tilts. Skipping and may crash ' + msgs.warning('Do not have all the necessary objects for tilts. Skipping and may crash ' 'down the line.') return None @@ -1216,7 +1216,7 @@ def get_tilts(self, force:str=None): = self.find_calibrations(frame['type'], frame['class']) if len(raw_files) == 0 and cal_file is None: - msgs.warn(f'No raw {frame["type"]} frames found and unable to identify a relevant ' + msgs.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' 'processed calibration frame. Continuing...') self.wavetilts = None return self.wavetilts @@ -1281,12 +1281,12 @@ def process_load_selection(self, frame, cal_file, force): Either the loaded calibration object or None. """ if force not in [None, 'remake', 'reload']: - msgs.error(f'`force` keyword must be None, remake, or reload, not {force}') + raise PypeItError(f'`force` keyword must be None, remake, or reload, not {force}') if force == 'remake': return None _cal_file = Path(cal_file).absolute() if force == 'reload' and not _cal_file.exists(): - msgs.warn(f"{_cal_file} does not exist; cannot reload " + msgs.warning(f"{_cal_file} does not exist; cannot reload " f"{frame['class'].__name__} calibration.") self.success = False return None @@ -1324,7 +1324,7 @@ def _chk_set(self, items): """ for item in items: if getattr(self, item) is None: - msgs.error("Use self.set to specify '{:s}' prior to generating XX".format(item)) + raise PypeItError("Use self.set to specify '{:s}' prior to generating XX".format(item)) # This is specific to `self.ms*` attributes def _chk_objs(self, items): @@ -1343,8 +1343,8 @@ def _chk_objs(self, items): if getattr(self, obj) is None: # Strip ms iobj = obj[2:] if obj[0:2] == 'ms' else obj - msgs.warn("You need to generate {:s} prior to this calibration..".format(obj)) - msgs.warn("Use get_{:s}".format(iobj)) + msgs.warning("You need to generate {:s} prior to this calibration..".format(obj)) + msgs.warning("Use get_{:s}".format(iobj)) return False return True @@ -1405,10 +1405,10 @@ def get_association(fitstbl, spectrograph, caldir, setup, calib_ID, det, must_ex science/standard frames if ``include_science`` is True. """ if fitstbl.calib_groups is None: - msgs.error('Calibration groups have not been defined!') + raise PypeItError('Calibration groups have not been defined!') if include_science and proc_only: - msgs.warn('Requested to include the science/standard frames and to only return the ' + msgs.warning('Requested to include the science/standard frames and to only return the ' 'processed calibration frames. Ignoring former request.') # Set the calibrations path @@ -1435,7 +1435,7 @@ def get_association(fitstbl, spectrograph, caldir, setup, calib_ID, det, must_ex asn = {} setups = fitstbl.unique_configurations(copy=True, rm_none=True) if setup not in setups: - msgs.warn(f'Requested setup {setup} is invalid. Choose from {",".join(setups)}.') + msgs.warning(f'Requested setup {setup} is invalid. Choose from {",".join(setups)}.') return asn # Subset to output @@ -1461,7 +1461,7 @@ def get_association(fitstbl, spectrograph, caldir, setup, calib_ID, det, must_ex continue if not (all(fitstbl['calib'][indx] == fitstbl['calib'][indx][0]) or all([fitstbl['calib'][indx][0] in cc.split(',') for cc in fitstbl['calib'][indx]])): - msgs.error(f'CODING ERROR: All {frametype} frames in group {calib_ID} ' + raise PypeItError(f'CODING ERROR: All {frametype} frames in group {calib_ID} ' 'are not all associated with the same subset of calibration ' 'groups; calib for the first file is ' f'{fitstbl["calib"][indx][0]}.') @@ -1537,11 +1537,11 @@ def association_summary(ofile, fitstbl, spectrograph, caldir, subset=None, det=N Overwrite any existing file of the same name. """ if fitstbl.calib_groups is None: - msgs.error('Calibration groups have not been defined!') + raise PypeItError('Calibration groups have not been defined!') _ofile = Path(ofile).absolute() if _ofile.exists() and not overwrite: - msgs.error(f'{_ofile} exists! To overwrite, set overwrite=True.') + raise PypeItError(f'{_ofile} exists! To overwrite, set overwrite=True.') _det = 1 if det is None else det detname = spectrograph.get_det_name(_det) @@ -1683,9 +1683,9 @@ def check_for_calibs(par, fitstbl, raise_error=True, cut_cfg=None): 'if this is a standard run!' pass_calib = False if raise_error: - msgs.error(msg) + raise PypeItError(msg) else: - msgs.warn(msg) + msgs.warning(msg) # Explore science frame for key, ftype in zip(['use_biasimage', 'use_darkimage', 'use_pixelflat', @@ -1707,9 +1707,9 @@ def check_for_calibs(par, fitstbl, raise_error=True, cut_cfg=None): 'step. Add them to your PypeIt file!' pass_calib = False if raise_error: - msgs.error(msg) + raise PypeItError(msg) else: - msgs.warn(msg) + msgs.warning(msg) if pass_calib: msgs.info("Congrats!! You passed the calibrations inspection!!") diff --git a/pypeit/coadd1d.py b/pypeit/coadd1d.py index 7084aacbc2..dce4bcf607 100644 --- a/pypeit/coadd1d.py +++ b/pypeit/coadd1d.py @@ -116,7 +116,7 @@ def load(self): """ Load the arrays we need for performing coadds. Dummy method overloaded by children. """ - msgs.error('This method is undefined in the base classes and should only be called by the subclasses') + raise PypeItError('This method is undefined in the base classes and should only be called by the subclasses') def save(self, coaddfile, telluric=None, obj_model=None, overwrite=True): @@ -229,10 +229,10 @@ def load(self): chk_version=self.chk_version) indx = sobjs.name_indices(self.objids[iexp]) if not np.any(indx): - msgs.error( + raise PypeItError( "No matching objects for {:s}. Odds are you input the wrong OBJID".format(self.objids[iexp])) if np.sum(indx) > 1: - msgs.error("Error in spec1d file for exposure {:d}: " + raise PypeItError("Error in spec1d file for exposure {:d}: " "More than one object was identified with the OBJID={:s} in file={:s}".format( iexp, self.objids[iexp], self.spec1dfiles[iexp])) wave_iexp, flux_iexp, ivar_iexp, gpm_iexp, blaze_iexp, _, header = \ @@ -286,8 +286,8 @@ def check_exposures(self): # check if there are exposures that are completely masked out, i.e., gpms = False for all spectral pixels masked_exps = [np.all(np.logical_not(gpm)) for gpm in _gpms] if np.any(masked_exps): - msgs.warn(f'The following exposure(s) is/are completely masked out. It/They will not be coadded.') - [msgs.warn(f"Exposure {i}: {fname.split('/')[-1]} {obj}") + msgs.warning(f'The following exposure(s) is/are completely masked out. It/They will not be coadded.') + [msgs.warning(f"Exposure {i}: {fname.split('/')[-1]} {obj}") for i, (fname, obj, masked_exp) in enumerate(zip(_spec1dfiles, _objids, masked_exps)) if masked_exp] # remove masked out exposure _waves = [wave for (wave, masked_exp) in zip(_waves, masked_exps) if not masked_exp] @@ -301,7 +301,7 @@ def check_exposures(self): # check if there is still at least 1 exposure left if len(_fluxes) < 1: - msgs.error('At least 1 unmasked exposures are required for coadding.') + raise PypeItError('At least 1 unmasked exposures are required for coadding.') # check if there is any bad exposure by comparing the rms_sn with the median rms_sn among all exposures if len(_fluxes) > 2: @@ -318,8 +318,8 @@ def check_exposures(self): f'({_sigrej} sigma above the median S/N in the stack).' if self.par['sigrej_exp'] is not None: warn_msg += ' It/They WILL NOT BE COADDED.' - msgs.warn(warn_msg) - [msgs.warn(f"Exposure {i}: {fname.split('/')[-1]} {obj}") + msgs.warning(warn_msg) + [msgs.warning(f"Exposure {i}: {fname.split('/')[-1]} {obj}") for i, (fname, obj, bad_exp) in enumerate(zip(_spec1dfiles, _objids, bad_exps)) if bad_exp] if self.par['sigrej_exp'] is not None: # remove bad exposure @@ -383,7 +383,7 @@ def __init__(self, spec1dfiles, objids, spectrograph=None, par=None, sensfuncfil chk_version=chk_version) if sensfuncfile is None: - msgs.error('sensfuncfile is a required argument for echelle coadding') + raise PypeItError('sensfuncfile is a required argument for echelle coadding') self.sensfuncfile = self.nexp * [sensfuncfile] if isinstance(sensfuncfile, str) else sensfuncfile nsens = len(self.sensfuncfile) @@ -391,7 +391,7 @@ def __init__(self, spec1dfiles, objids, spectrograph=None, par=None, sensfuncfil self.sensfuncfile = self.nexp * [self.sensfuncfile[0]] nsens = self.nexp if nsens != self.nexp: - msgs.error('Must enter either one sensfunc file for all exposures or one sensfunc file for ' + raise PypeItError('Must enter either one sensfunc file for all exposures or one sensfunc file for ' f'each exposure. Entered {nsens} files for {self.nexp} exposures.') if setup_id is None: @@ -403,7 +403,7 @@ def __init__(self, spec1dfiles, objids, spectrograph=None, par=None, sensfuncfil self.setup_id = self.nexp * [self.setup_id[0]] nsetup = self.nexp if nsetup != self.nexp: - msgs.error('Must enter either a single setup_id for all exposures or one setup_id for ' + raise PypeItError('Must enter either a single setup_id for all exposures or one setup_id for ' f'each exposure. Entered {nsetup} files for {self.nexp} exposures.') @@ -485,7 +485,7 @@ def load_ech_arrays(self, spec1dfiles, objids, sensfuncfiles): sobjs = specobjs.SpecObjs.from_fitsfile(spec1dfiles[iexp], chk_version=self.chk_version) indx = sobjs.name_indices(objids[iexp]) if not np.any(indx): - msgs.error("No matching objects for {:s}. Odds are you input the wrong OBJID".format(objids[iexp])) + raise PypeItError("No matching objects for {:s}. Odds are you input the wrong OBJID".format(objids[iexp])) wave_iexp, flux_iexp, ivar_iexp, gpm_iexp, blaze_iexp, meta_spec, header = \ sobjs[indx].unpack_object(ret_flam=self.par['flux_value'], extract_type=self.par['ex_value']) # This np.atleast2d hack deals with the situation where we are wave_iexp is actually Multislit data, i.e. we are treating @@ -517,7 +517,7 @@ def load_ech_arrays(self, spec1dfiles, objids, sensfuncfiles): waves[...,iexp], fluxes[...,iexp], ivars[..., iexp], gpms[...,iexp], weights_sens[...,iexp] \ = wave_iexp, flux_iexp, ivar_iexp, gpm_iexp, weights_sens_iexp except ValueError: - msgs.error('The shape (Nspec,Norder) of spectra is not consistent between exposures. ' + raise PypeItError('The shape (Nspec,Norder) of spectra is not consistent between exposures. ' 'These spec1ds cannot be coadded at this time.') return waves, fluxes, ivars, gpms, weights_sens, header_out diff --git a/pypeit/coadd2d.py b/pypeit/coadd2d.py index ff60f13f47..517efac203 100644 --- a/pypeit/coadd2d.py +++ b/pypeit/coadd2d.py @@ -180,7 +180,7 @@ def __init__(self, spec2d, spectrograph, par, det=1, # Check that there are the same number of slits on every exposure nslits_list = [slits.nslits for slits in self.stack_dict['slits_list']] if not len(set(nslits_list)) == 1: - msgs.error('Not all of your exposures have the same number of slits. Check your inputs') + raise PypeItError('Not all of your exposures have the same number of slits. Check your inputs') # This is the number of slits of the single (un-coadded) frames self.nslits_single = nslits_list[0] @@ -192,9 +192,9 @@ def __init__(self, spec2d, spectrograph, par, det=1, binspec_list = [slits.binspec for slits in self.stack_dict['slits_list']] binspat_list = [slits.binspat for slits in self.stack_dict['slits_list']] if not len(set(binspec_list)) == 1: - msgs.error('Not all of your exposures have the same spectral binning. Check your inputs') + raise PypeItError('Not all of your exposures have the same spectral binning. Check your inputs') if not len(set(binspat_list)) == 1: - msgs.error('Not all of your exposures have the same spatial binning. Check your inputs') + raise PypeItError('Not all of your exposures have the same spatial binning. Check your inputs') self.binning = np.array([self.stack_dict['slits_list'][0].binspec, self.stack_dict['slits_list'][0].binspat]) @@ -256,7 +256,7 @@ def default_par(spectrograph, inp_cfg=None, det=None, only_slits=None, exclude_s if inp_cfg is not None: cfg = utils.recursive_update(cfg, dict(inp_cfg)) if only_slits is not None and det is not None: - msgs.warn('only_slits and det are mutually exclusive. Ignoring det.') + msgs.warning('only_slits and det are mutually exclusive. Ignoring det.') _det = None else: _det = det @@ -265,7 +265,7 @@ def default_par(spectrograph, inp_cfg=None, det=None, only_slits=None, exclude_s cfg['rdx']['detnum'] = _det if only_slits is not None and exclude_slits is not None: - msgs.warn('only_slits and exclude_slits are mutually exclusive. Ignoring exclude_slits.') + msgs.warning('only_slits and exclude_slits are mutually exclusive. Ignoring exclude_slits.') _exclude_slits = None else: _exclude_slits = exclude_slits @@ -309,13 +309,13 @@ def default_basename(spec2d_files): frsthdr = fits.getheader(spec2d_files[0]) lasthdr = fits.getheader(spec2d_files[-1]) if 'FILENAME' not in frsthdr: - msgs.error(f'Missing FILENAME keyword in {spec2d_files[0]}. Set the basename ' + raise PypeItError(f'Missing FILENAME keyword in {spec2d_files[0]}. Set the basename ' 'using the command-line option.') if 'FILENAME' not in lasthdr: - msgs.error(f'Missing FILENAME keyword in {spec2d_files[-1]}. Set the basename ' + raise PypeItError(f'Missing FILENAME keyword in {spec2d_files[-1]}. Set the basename ' 'using the command-line option.') if 'TARGET' not in frsthdr: - msgs.error(f'Missing TARGET keyword in {spec2d_files[0]}. Set the basename ' + raise PypeItError(f'Missing TARGET keyword in {spec2d_files[0]}. Set the basename ' 'using the command-line option.') return f"{frsthdr['FILENAME'].split('.fits')[0]}-" \ f"{lasthdr['FILENAME'].split('.fits')[0]}-{frsthdr['TARGET']}" @@ -383,7 +383,7 @@ def good_slitindx(self, only_slits=None, exclude_slits=None): """ if exclude_slits is not None and only_slits is not None: - msgs.warn('Both `only_slits` and `exclude_slits` are provided. They are mutually exclusive. ' + msgs.warning('Both `only_slits` and `exclude_slits` are provided. They are mutually exclusive. ' 'Using `only_slits` and ignoring `exclude_slits`') _exclude_slits = None else: @@ -417,7 +417,7 @@ def good_slitindx(self, only_slits=None, exclude_slits=None): for islit in _only_slits: if islit not in slits0.slitord_id[good_slitindx]: # Warnings for the slits that are selected by the user but NOT good slits - msgs.warn('Slit {} cannot be coadd because masked'.format(islit)) + msgs.warning('Slit {} cannot be coadd because masked'.format(islit)) else: msgs.info(f'Slit {islit}') indx = np.where(slits0.slitord_id[good_slitindx] == islit)[0] @@ -502,7 +502,7 @@ def optimal_weights(self, uniq_obj_id, order=None, weight_method='auto'): for iexp, sobjs in enumerate(self.stack_dict['specobjs_list']): ithis = sobjs.slitorder_uniq_id_indices(uniq_obj_id[iexp], order=order) if not np.any(ithis): - msgs.error(f'Object {uniq_obj_id[iexp]} provided not valid. Optimal weights cannot be determined.') + raise PypeItError(f'Object {uniq_obj_id[iexp]} provided not valid. Optimal weights cannot be determined.') order_str = f' on slit/order {order}' if order is not None else '' # check if OPT_COUNTS is available if sobjs[ithis][0].has_opt_ext() and np.any(sobjs[ithis][0].OPT_MASK): @@ -518,10 +518,10 @@ def optimal_weights(self, uniq_obj_id, order=None, weight_method='auto'): fluxes.append(flux_iexp) ivars.append(ivar_iexp) gpms.append(gpm_iexp) - msgs.warn(f'Optimal extraction not available for object ' + msgs.warning(f'Optimal extraction not available for object ' f'{uniq_obj_id[iexp]} {order_str} in exp {iexp}. Using box extraction.') else: - msgs.error(f'Optimal weights cannot be determined because ' + raise PypeItError(f'Optimal weights cannot be determined because ' f'flux not available for object = {uniq_obj_id[iexp]} {order_str} in exp {iexp}. ') # TODO For now just use the zero as the reference for the wavelengths? Perhaps we should be rebinning the data though? @@ -561,7 +561,7 @@ def coadd(self, interp_dspat=True): # check if the slit is found in every exposure if not np.all([np.any(thismask) for thismask in thismask_stack]): - msgs.warn(f'Slit/order {_slitord_id[slit_idx]} was not found in every exposures. ' + msgs.warning(f'Slit/order {_slitord_id[slit_idx]} was not found in every exposures. ' f'2D coadd cannot be performed on this slit. Try increasing the parameter spat_toler') continue @@ -584,7 +584,7 @@ def coadd(self, interp_dspat=True): coadd_list.append(coadd_dict) if len(coadd_list) == 0: - msgs.error("All the slits were missing in one or more exposures. 2D coadd cannot be performed") + raise PypeItError("All the slits were missing in one or more exposures. 2D coadd cannot be performed") return coadd_list @@ -597,7 +597,7 @@ def create_pseudo_image(self, coadd_list): # Check that self.nslit is equal to len(coadd_list) if self.nslits_coadded != len(coadd_list): - msgs.error('Wrong number of slits for the 2d coadded frame') + raise PypeItError('Wrong number of slits for the 2d coadded frame') nspec_vec = np.zeros(self.nslits_coadded,dtype=int) nspat_vec = np.zeros(self.nslits_coadded,dtype=int) @@ -1004,7 +1004,7 @@ def load_coadd2d_stacks(self, spec2d, chk_version=False): exptime_coadd = np.percentile(exptime_stack, 50., method='higher') isclose_exptime = np.isclose(exptime_stack, exptime_coadd, atol=1.) if not np.all(isclose_exptime): - msgs.warn('Exposure time is not consistent (within 1 sec) for all frames being coadded! ' + msgs.warning('Exposure time is not consistent (within 1 sec) for all frames being coadded! ' f'Scaling each image by the median exposure time ({exptime_coadd} s) before coadding.') exp_scale = exptime_coadd / exptime_stack for iexp in range(nfiles): @@ -1040,12 +1040,12 @@ def check_input(self, input, type): :obj:`list` or `numpy.ndarray`_: User input values """ if type != 'weights' and type != 'offsets': - msgs.error('Unrecognized type for check_input') + raise PypeItError('Unrecognized type for check_input') if isinstance(input, (list, np.ndarray)): if len(input) != self.nexp: - msgs.error(f'If {type} are input it must be a list/array with same number of elements as exposures') + raise PypeItError(f'If {type} are input it must be a list/array with same number of elements as exposures') return np.atleast_1d(input).tolist() if type == 'weights' else np.atleast_1d(input) - msgs.error(f'Unrecognized format for {type}') + raise PypeItError(f'Unrecognized format for {type}') def compute_offsets(self): """ @@ -1061,14 +1061,14 @@ def compute_offsets(self): msgs.info('Using offsets from header') dithoffs = [self.spectrograph.get_meta_value(f, 'dithoff') for f in self.spec2d] if None in dithoffs: - msgs.error('Dither offsets keyword not found for one or more spec2d files. ' + raise PypeItError('Dither offsets keyword not found for one or more spec2d files. ' 'Choose another option for `offsets`') dithoffs_pix = - np.array(dithoffs) / pixscale self.offsets = dithoffs_pix[0] - dithoffs_pix self.offsets_report(self.offsets, pixscale, 'header keyword') elif self.obj_id_bri is None and self.par['coadd2d']['offsets'] == 'auto': - msgs.error('Offsets cannot be computed because no unique reference object ' + raise PypeItError('Offsets cannot be computed because no unique reference object ' 'with the highest S/N was found. To continue, provide offsets in `Coadd2DPar`') # 2) a list of offsets is provided by the user (no matter if we have a bright object or not) @@ -1083,7 +1083,7 @@ def compute_offsets(self): self.maskdef_offset = np.array([slits.maskdef_offset for slits in self.stack_dict['slits_list']]) # Check if maskdef_offset is actually recoded in the SlitTraceSet if np.any(self.maskdef_offset == None): - msgs.error('maskdef_offsets are not recoded in the SlitTraceSet ' + raise PypeItError('maskdef_offsets are not recoded in the SlitTraceSet ' 'for one or more exposures. They cannot be used.') # the offsets computed during the main reduction (`run_pypeit`) are used msgs.info('Determining offsets using maskdef_offset recoded in SlitTraceSet') @@ -1095,7 +1095,7 @@ def compute_offsets(self): # see child method pass else: - msgs.error('Invalid value for `offsets`') + raise PypeItError('Invalid value for `offsets`') def compute_weights(self): """ @@ -1123,7 +1123,7 @@ def compute_weights(self): # and they might miss the warning. Its debatable though. # warn if the user had put `auto` in the parset - msgs.warn('Weights cannot be computed because no unique reference object ' + msgs.warning('Weights cannot be computed because no unique reference object ' 'with the highest S/N was found. Using uniform weights instead.') elif self.par['coadd2d']['weights'] == 'uniform': msgs.info('Using uniform weights') @@ -1135,7 +1135,7 @@ def compute_weights(self): # see child method pass else: - msgs.error('Invalid value for `weights`') + raise PypeItError('Invalid value for `weights`') def _get_weights(self, indx=None): """ @@ -1184,10 +1184,10 @@ def unpack_specobj(spec, spatord_id=None): # check if BOX_COUNTS is available elif spec.has_box_ext() and np.any(spec.BOX_MASK): _, flux, ivar, gpm = spec.get_box_ext() - msgs.warn(f'Optimal extraction not available for obj_id {objid} ' + msgs.warning(f'Optimal extraction not available for obj_id {objid} ' f'in slit/order {spatord_id}. Using box extraction.') else: - msgs.warn(f'Optimal and Boxcar extraction not available for obj_id {objid} in slit/order {spatord_id}.') + msgs.warning(f'Optimal and Boxcar extraction not available for obj_id {objid} in slit/order {spatord_id}.') _, flux, ivar, gpm = None, None, None, None return flux, ivar, gpm @@ -1205,14 +1205,14 @@ def get_brightest_obj(self, specobjs_list, spat_ids): ------- """ - msgs.error('The get_brightest_obj() method should be overloaded by the child class.') + raise PypeItError('The get_brightest_obj() method should be overloaded by the child class.') def handle_reference_obj(self): """ Dummy method to handle the reference object. Overloaded by child methods. """ - msgs.error('The handle_reference_obj() method should be overloaded by the child class.') + raise PypeItError('The handle_reference_obj() method should be overloaded by the child class.') def reference_trace_stack(self, slitid, offsets=None, uniq_obj_id=None): @@ -1236,7 +1236,7 @@ def reference_trace_stack(self, slitid, offsets=None, uniq_obj_id=None): List of reference traces for the slit/order specified by slitid. """ - msgs.error('The reference_trace_stack() method should be overloaded by the child class.') + raise PypeItError('The reference_trace_stack() method should be overloaded by the child class.') def get_maskdef_dict(self, slit_idx, ref_trace_stack): @@ -1269,7 +1269,7 @@ def wave_method(self): str: The wavelength method to be used in the coadd2d. """ - msgs.error('The wave_method() method should be overloaded by the child class.') + raise PypeItError('The wave_method() method should be overloaded by the child class.') # Multislit can coadd with: # 1) input offsets or if offsets is None, it will find the brightest trace and compute them @@ -1323,9 +1323,9 @@ def handle_reference_obj(self): # be optionally used for offsets and weights. if self.par['coadd2d']['user_obj_ids'] is not None: if self.par['coadd2d']['weights'] != 'auto': - msgs.error('Parameter `user_obj_ids` can only be used if weights are set to `auto`.') + raise PypeItError('Parameter `user_obj_ids` can only be used if weights are set to `auto`.') if len(self.par['coadd2d']['user_obj_ids']) != self.nexp: - msgs.error('Parameter `user_obj_ids` must have the same number of elements as exposures.') + raise PypeItError('Parameter `user_obj_ids` must have the same number of elements as exposures.') user_obj_exist = np.zeros(self.nexp, dtype=bool) # get the flux, ivar, gpm, and spatial pixel position of the user object fluxes, ivars, gpms, spatids, spat_pixpos = [], [], [], [], [] @@ -1344,10 +1344,10 @@ def handle_reference_obj(self): user_obj_exist[i] = True # check if the user object exists in all the exposures if not np.all(user_obj_exist): - msgs.error('Not all of the spat_pixpos_ids provided through `user_obj_ids` exist in all of the exposures.') + raise PypeItError('Not all of the spat_pixpos_ids provided through `user_obj_ids` exist in all of the exposures.') # Check that all spatids are within the spat_toler of each other if not np.all(np.abs(spatids - np.mean(spatids[0])) <= self.par['coadd2d']['spat_toler']): - msgs.error('Not all spatial IDs are within spat_toler of each other') + raise PypeItError('Not all spatial IDs are within spat_toler of each other') self.spatid_bri = int(np.rint(np.mean(spatids))) self.spat_pixpos_bri = np.array(spat_pixpos) self.snr_bar_bri, _ = coadd.calc_snr(fluxes, ivars, gpms) @@ -1426,7 +1426,7 @@ def compute_offsets(self): find_min_max=self.par['reduce']['findobj']['find_min_max'], show_trace=self.debug_offsets, show_peaks=self.debug_offsets) if len(sobjs_exp) == 0: - msgs.error(f'No objects found in the rebinned image for exposure {iexp} ' + raise PypeItError(f'No objects found in the rebinned image for exposure {iexp} ' f'(used to compute the offsets). ' f'Check `FindObjPar` parameters and try to adjust `snr_thresh`') if self.par['coadd2d']['user_obj_ids'] is not None: @@ -1448,7 +1448,7 @@ def compute_offsets(self): traces_rect[:, iexp] = sobjs_exp[np.argmin(dspat_exp_orig)].TRACE_SPAT user_obj_dspats.append(dspat_ex_orig_min) else: - msgs.error(f'Could not identify an object in the rebinned image corresponding ' + raise PypeItError(f'Could not identify an object in the rebinned image corresponding ' f'to the trace for the user object {self.par["coadd2d"]["user_obj_ids"][iexp]} ' f'in exposure {iexp+1} within the specified spatial ' f'tolerance ={self.par["coadd2d"]["spat_toler"]}') @@ -1572,7 +1572,7 @@ def get_brightest_obj(self, specobjs_list, slit_spat_ids): # Find the highest snr object among all the slits if np.all(bpm): - msgs.warn('You do not appear to have a unique reference object that was traced as the highest S/N ' + msgs.warning('You do not appear to have a unique reference object that was traced as the highest S/N ' 'ratio on the same slit of every exposure. Try increasing the parameter `spat_toler`') return None, None, None, None else: @@ -1770,7 +1770,7 @@ def handle_reference_obj(self): # If a user-input object to compute offsets and weights is provided, check if it exists and get the needed info if len(self.stack_dict['specobjs_list']) > 0 and self.par['coadd2d']['user_obj_ids'] is not None: if len(self.par['coadd2d']['user_obj_ids']) != self.nexp: - msgs.error(f'Parameter `user_obj_ids` {self.par["coadd2d"]["user_obj_ids"]} must have the same number ' + raise PypeItError(f'Parameter `user_obj_ids` {self.par["coadd2d"]["user_obj_ids"]} must have the same number ' f'of elements as exposures {self.nexp}.') else: # does it exists? @@ -1781,14 +1781,14 @@ def handle_reference_obj(self): # check if the object exists in this exposure ind = sobjs.slitorder_uniq_id_indices(self.par['coadd2d']['user_obj_ids'][iexp], order=ord) if (len(ind) == 0) or (not np.any(ind)): - msgs.error(f'Object with user_obj_id {self.par["coadd2d"]["user_obj_ids"][iexp]} does not exist in exposure {iexp+1} for order {ord}.') + raise PypeItError(f'Object with user_obj_id {self.par["coadd2d"]["user_obj_ids"][iexp]} does not exist in exposure {iexp+1} for order {ord}.') flux, ivar, mask = self.unpack_specobj(sobjs[ind][0]) if flux is not None and ivar is not None and mask is not None: user_obj_exist[iexp, iord] = True if not np.all(user_obj_exist): - msgs.error('Object provided through `user_obj_ids` does not exist in all the exposures.') + raise PypeItError('Object provided through `user_obj_ids` does not exist in all the exposures.') # get the needed info about the user object self.obj_id_bri = np.array(self.par['coadd2d']['user_obj_ids']) @@ -1871,7 +1871,7 @@ def _get_weights(self, indx=None): # then the weights are computed per order, i.e., every order has a # different set of weights in each exposure (len(self.use_weights[indx]) = nexp) if self.par['coadd2d']['weights'] == 'auto' and indx is None: - msgs.error('The index of the slit/order must be provided when using auto weights for Echelle data.') + raise PypeItError('The index of the slit/order must be provided when using auto weights for Echelle data.') return self.use_weights[indx] if self.par['coadd2d']['weights'] == 'auto' else super()._get_weights() @@ -1929,7 +1929,7 @@ def get_brightest_obj(self, specobjs_list, orders): fracpos_id[iexp] = uni_fracpos_id[snr_bar_vec.argmax()] snr_bar[iexp] = snr_bar_vec[snr_bar_vec.argmax()] if 0 in snr_bar: - msgs.warn('You do not appear to have a unique reference object that was traced as the highest S/N ' + msgs.warning('You do not appear to have a unique reference object that was traced as the highest S/N ' 'ratio for every exposure') return None, None return fracpos_id, snr_bar @@ -1997,9 +1997,9 @@ def reference_trace_stack(self, slitid, offsets=None, uniq_obj_id=None): # check inputs if offsets is not None and uniq_obj_id is not None: - msgs.error('You can only input offsets or an uniq_obj_id, but not both') + raise PypeItError('You can only input offsets or an uniq_obj_id, but not both') if offsets is None and uniq_obj_id is None: - msgs.error('You must input either offsets or a uniq_obj_id to determine the stack of ' + raise PypeItError('You must input either offsets or a uniq_obj_id to determine the stack of ' 'reference traces') # if offset is provided, we stack about the center of the slit diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index f9248549e4..853b2a1a1b 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -444,22 +444,22 @@ def __init__(self, spec2dfiles, par, skysub_frame=None, sensfile=None, scale_cor self.correct_dar = self.cubepar['correct_dar'] # Do some quick checks on the input options if skysub_frame is not None and len(skysub_frame) != self.numfiles: - msgs.error("The skysub_frame list should be identical length to the spec2dfiles list") + raise PypeItError("The skysub_frame list should be identical length to the spec2dfiles list") if sensfile is not None and len(sensfile) != self.numfiles: - msgs.error("The sensfile list should be identical length to the spec2dfiles list") + raise PypeItError("The sensfile list should be identical length to the spec2dfiles list") if scale_corr is not None and len(scale_corr) != self.numfiles: - msgs.error("The scale_corr list should be identical length to the spec2dfiles list") + raise PypeItError("The scale_corr list should be identical length to the spec2dfiles list") if grating_corr is not None and len(grating_corr) != self.numfiles: - msgs.error("The grating_corr list should be identical length to the spec2dfiles list") + raise PypeItError("The grating_corr list should be identical length to the spec2dfiles list") if ra_offsets is not None and len(ra_offsets) != self.numfiles: - msgs.error("The ra_offsets list should be identical length to the spec2dfiles list") + raise PypeItError("The ra_offsets list should be identical length to the spec2dfiles list") if dec_offsets is not None and len(dec_offsets) != self.numfiles: - msgs.error("The dec_offsets list should be identical length to the spec2dfiles list") + raise PypeItError("The dec_offsets list should be identical length to the spec2dfiles list") # Make sure both ra_offsets and dec_offsets are either both None or both lists if ra_offsets is None and dec_offsets is not None: - msgs.error("If you provide dec_offsets, you must also provide ra_offsets") + raise PypeItError("If you provide dec_offsets, you must also provide ra_offsets") if ra_offsets is not None and dec_offsets is None: - msgs.error("If you provide ra_offsets, you must also provide dec_offsets") + raise PypeItError("If you provide ra_offsets, you must also provide dec_offsets") # Set the frame specific options self.sensfile = None if sensfile is None: @@ -478,12 +478,12 @@ def __init__(self, spec2dfiles, par, skysub_frame=None, sensfile=None, scale_cor # If there is only one frame being "combined" AND there's no reference image, then don't compute the translation. if self.numfiles == 1 and self.cubepar["reference_image"] is None: if self.align: - msgs.warn("Parameter 'align' should be False when there is only one frame and no reference image") + msgs.warning("Parameter 'align' should be False when there is only one frame and no reference image") msgs.info("Setting 'align' to False") self.align = False if self.ra_offsets is not None: if not self.align: - msgs.warn("When 'ra_offset' and 'dec_offset' are set, 'align' must be True.") + msgs.warning("When 'ra_offset' and 'dec_offset' are set, 'align' must be True.") msgs.info("Setting 'align' to True") self.align = True # If no ra_offsets or dec_offsets have been provided, initialise the lists @@ -538,7 +538,7 @@ def __init__(self, spec2dfiles, par, skysub_frame=None, sensfile=None, scale_cor msgs.info("Adopting the nearest grid point (NGP) algorithm to generate the datacube.") self.skip_subpix_weights = True else: - msgs.error(f"The following datacube method is not allowed: {self.method}") + raise PypeItError(f"The following datacube method is not allowed: {self.method}") # Get the detector number and string representation if det is None: @@ -557,7 +557,7 @@ def __init__(self, spec2dfiles, par, skysub_frame=None, sensfile=None, scale_cor # If a reference image has been set, check that it exists if self.cubepar['reference_image'] is not None: if not os.path.exists(self.cubepar['reference_image']): - msgs.error("Reference image does not exist:" + msgs.newline() + self.cubepar['reference_image']) + raise PypeItError("Reference image does not exist:" + msgs.newline() + self.cubepar['reference_image']) # Load the default scaleimg frame for the scale correction self.scalecorr_default = "none" @@ -579,26 +579,26 @@ def check_outputs(self): outfile = datacube.get_output_filename("", self.cubepar['output_filename'], self.combine) out_whitelight = datacube.get_output_whitelight_filename(outfile) if os.path.exists(outfile) and not self.overwrite: - msgs.error("Output filename already exists:"+msgs.newline()+outfile) + raise PypeItError("Output filename already exists:"+msgs.newline()+outfile) if os.path.exists(out_whitelight) and self.cubepar['save_whitelight'] and not self.overwrite: - msgs.error("Output filename already exists:"+msgs.newline()+out_whitelight) + raise PypeItError("Output filename already exists:"+msgs.newline()+out_whitelight) else: # Finally, if there's just one file, check if the output filename is given if self.numfiles == 1 and self.cubepar['output_filename'] != "": outfile = datacube.get_output_filename("", self.cubepar['output_filename'], True, -1) out_whitelight = datacube.get_output_whitelight_filename(outfile) if os.path.exists(outfile) and not self.overwrite: - msgs.error("Output filename already exists:" + msgs.newline() + outfile) + raise PypeItError("Output filename already exists:" + msgs.newline() + outfile) if os.path.exists(out_whitelight) and self.cubepar['save_whitelight'] and not self.overwrite: - msgs.error("Output filename already exists:" + msgs.newline() + out_whitelight) + raise PypeItError("Output filename already exists:" + msgs.newline() + out_whitelight) else: for ff in range(self.numfiles): outfile = datacube.get_output_filename(self.spec2d[ff], self.cubepar['output_filename'], self.combine, ff+1) out_whitelight = datacube.get_output_whitelight_filename(outfile) if os.path.exists(outfile) and not self.overwrite: - msgs.error("Output filename already exists:" + msgs.newline() + outfile) + raise PypeItError("Output filename already exists:" + msgs.newline() + outfile) if os.path.exists(out_whitelight) and self.cubepar['save_whitelight'] and not self.overwrite: - msgs.error("Output filename already exists:" + msgs.newline() + out_whitelight) + raise PypeItError("Output filename already exists:" + msgs.newline() + out_whitelight) def set_blaze_spline(self, wave_spl, spec_spl): """ @@ -635,8 +635,8 @@ def set_default_scalecorr(self): self.detname, chk_version=self.chk_version) except Exception as e: - msgs.warn(f'Loading spec2d file raised {type(e).__name__}:\n{str(e)}') - msgs.warn("Could not load scaleimg from spec2d file:" + msgs.newline() + + msgs.warning(f'Loading spec2d file raised {type(e).__name__}:\n{str(e)}') + msgs.warning("Could not load scaleimg from spec2d file:" + msgs.newline() + self.cubepar['scale_corr'] + msgs.newline() + "scale correction will not be performed unless you have specified the correct" + msgs.newline() + "scale_corr file in the spec2d block") @@ -697,8 +697,8 @@ def get_current_scalecorr(self, spec2DObj, scalecorr=None): spec2DObj_scl = spec2dobj.Spec2DObj.from_file(scalecorr, self.detname, chk_version=self.chk_version) except Exception as e: - msgs.warn(f'Loading spec2d file raised {type(e).__name__}:\n{str(e)}') - msgs.error("Could not load skysub image from spec2d file:" + msgs.newline() + scalecorr) + msgs.warning(f'Loading spec2d file raised {type(e).__name__}:\n{str(e)}') + raise PypeItError("Could not load skysub image from spec2d file:" + msgs.newline() + scalecorr) else: relScaleImg = spec2DObj_scl.scaleimg this_scalecorr = scalecorr @@ -731,7 +731,7 @@ def set_default_skysub(self): chk_version=self.chk_version) skysub_exptime = self.spec.get_meta_value([spec2DObj.head0], 'exptime') except: - msgs.error("Could not load skysub image from spec2d file:" + msgs.newline() + self.cubepar['skysub_frame']) + raise PypeItError("Could not load skysub image from spec2d file:" + msgs.newline() + self.cubepar['skysub_frame']) else: self.skysub_default = self.cubepar['skysub_frame'] self.skyImgDef = spec2DObj.sciimg / skysub_exptime # Sky counts/second @@ -802,7 +802,7 @@ def get_current_skysub(self, spec2DObj, exptime, opts_skysub=None): chk_version=self.chk_version) skysub_exptime = self.spec.get_meta_value([spec2DObj_sky.head0], 'exptime') except: - msgs.error("Could not load skysub image from spec2d file:" + msgs.newline() + opts_skysub) + raise PypeItError("Could not load skysub image from spec2d file:" + msgs.newline() + opts_skysub) skyImg = spec2DObj_sky.sciimg * exptime / skysub_exptime # Sky counts skyScl = spec2DObj_sky.scaleimg this_skysub = opts_skysub # User specified spec2d for sky subtraction @@ -833,7 +833,7 @@ def add_grating_corr(self, flatfile, waveimg, slits, spat_flexure=None): """ # Check if the Flat file exists if not os.path.exists(flatfile): - msgs.warn("Grating correction requested, but the following file does not exist:" + msgs.newline() + flatfile) + msgs.warning("Grating correction requested, but the following file does not exist:" + msgs.newline() + flatfile) return if flatfile not in self.flat_splines.keys(): msgs.info("Calculating relative sensitivity for grating correction") @@ -867,7 +867,7 @@ def run(self): details of this procedure, see the child routines. """ msgs.bug("This routine should be overridden by child classes.") - msgs.error("Cannot proceed without coding the run() routine.") + raise PypeItError("Cannot proceed without coding the run() routine.") class SlicerIFUCoAdd3D(CoAdd3D): @@ -945,7 +945,7 @@ def get_alignments(self, spec2DObj, slits, spat_flexure=None): alignments = alignframe.Alignments.from_file(alignfile, chk_version=self.chk_version) else: - msgs.warn(f'Processed alignment frame not recorded or not found!') + msgs.warning(f'Processed alignment frame not recorded or not found!') msgs.info("Using slit edges for astrometric transform") else: msgs.info("Using slit edges for astrometric transform") @@ -1052,7 +1052,7 @@ def load(self): wnonzero = (waveimg != 0.0) if not np.any(wnonzero): - msgs.error("The wavelength image contains only zeros - You need to check the data reduction.") + raise PypeItError("The wavelength image contains only zeros - You need to check the data reduction.") wave0 = waveimg[wnonzero].min() # Calculate the delta wave in every pixel on the slit waveimp = np.roll(waveimg, 1, axis=0) @@ -1090,10 +1090,10 @@ def load(self): # If the spatial scale has been set by the user, check that it doesn't exceed the pixel or slicer scales if self._dspat is not None: if pxscl > self._dspat: - msgs.warn("Spatial scale requested ({0:f} arcsec) is less than the pixel scale ({1:f} arcsec)".format( + msgs.warning("Spatial scale requested ({0:f} arcsec) is less than the pixel scale ({1:f} arcsec)".format( 3600.0 * self._dspat, 3600.0 * pxscl)) if slscl > self._dspat: - msgs.warn("Spatial scale requested ({0:f} arcsec) is less than the slicer scale ({1:f} arcsec)".format( + msgs.warning("Spatial scale requested ({0:f} arcsec) is less than the slicer scale ({1:f} arcsec)".format( 3600.0 * self._dspat, 3600.0 * slscl)) # Construct a good pixel mask @@ -1297,7 +1297,7 @@ def run_align(self): reference=self.cubepar['reference_image'], collapse=True, equinox=2000.0, specname=self.specname) if voxedge[2].size != 2: - msgs.error("Spectral range for WCS is incorrect for white light image") + raise PypeItError("Spectral range for WCS is incorrect for white light image") wl_imgs = datacube.generate_image_subpixel(image_wcs, voxedge, self.all_sci, self.all_ivar, self.all_wave, slitid_img_gpm, self.all_wghts, self.all_wcs, diff --git a/pypeit/core/arc.py b/pypeit/core/arc.py index 810f207645..eea697e546 100644 --- a/pypeit/core/arc.py +++ b/pypeit/core/arc.py @@ -343,7 +343,7 @@ def resize_mask2arc(shape_arc, slitmask_orig): (nspec_orig,nspat_orig) = slitmask_orig.shape if nspec_orig != nspec: if ((nspec_orig > nspec) & (nspec_orig % nspec != 0)) | ((nspec > nspec_orig) & (nspec % nspec_orig != 0)): - msgs.error('Problem with images sizes. arcimg size and calibration size need to be integer multiples of each other') + raise PypeItError('Problem with images sizes. arcimg size and calibration size need to be integer multiples of each other') else: msgs.info('Calibration images have different binning than the arcimg. Resizing calibs for arc spectrum extraction.') slitmask = utils.rebin_slice(slitmask_orig, (nspec, nspat)) @@ -803,7 +803,7 @@ def iter_continuum(spec, gpm=None, fwhm=4.0, sigthresh = 2.0, sigrej=3.0, niter_ #frac_mask = np.sum(np.invert(cont_mask))/float(nspec) nmask = np.sum(np.invert(peak_mask[gpm])) if nmask > max_nmask: - msgs.warn('Too many pixels {:d} masked in spectrum continuum definiton: frac_mask = {:5.3f} > {:5.3f} which is ' + msgs.warning('Too many pixels {:d} masked in spectrum continuum definiton: frac_mask = {:5.3f} > {:5.3f} which is ' 'max allowed. Only masking the {:d} largest values....'.format(nmask, nmask/nspec_available, max_mask_frac, max_nmask)) # Old #cont_mask = np.ones_like(cont_mask) & gpm @@ -818,7 +818,7 @@ def iter_continuum(spec, gpm=None, fwhm=4.0, sigthresh = 2.0, sigrej=3.0, niter_ ngood = np.sum(cont_mask) if ngood == 0: - msgs.warn("All pixels rejected for continuum. Returning a 0 array") + msgs.warning("All pixels rejected for continuum. Returning a 0 array") return np.zeros_like(spec), cont_mask samp_width = np.ceil(ngood/cont_samp).astype(int) @@ -1002,7 +1002,7 @@ def detect_lines(censpec, sigdetect=5.0, fwhm=4.0, fit_frac_fwhm=1.25, input_thr sigma_lower=3.0, sigma_upper=3.0, cenfunc= np.nanmedian, stdfunc = np.nanstd) if stddev == 0.0: - msgs.warn('stddev = 0.0, so resetting to 0.1') + msgs.warning('stddev = 0.0, so resetting to 0.1') stddev = 0.1 thresh = med + sigdetect * stddev else: @@ -1013,7 +1013,7 @@ def detect_lines(censpec, sigdetect=5.0, fwhm=4.0, fit_frac_fwhm=1.25, input_thr if input_thresh == 'None': thresh = None else: - msgs.error('Unrecognized value for thresh') + raise PypeItError('Unrecognized value for thresh') stddev = 1.0 # Find the peak locations @@ -1048,7 +1048,7 @@ def detect_lines(censpec, sigdetect=5.0, fwhm=4.0, fit_frac_fwhm=1.25, input_thr # requested, then grab and return only these lines if nfind is not None: if nfind > len(nsig): - msgs.warn('Requested {0} peaks but only found {1}. '.format(nfind, len(tampl)) + + msgs.warning('Requested {0} peaks but only found {1}. '.format(nfind, len(tampl)) + ' Returning all the peaks found.') else: ikeep = (nsig.argsort()[::-1])[0:nfind] diff --git a/pypeit/core/coadd.py b/pypeit/core/coadd.py index 6816259fcb..0f728da635 100644 --- a/pypeit/core/coadd.py +++ b/pypeit/core/coadd.py @@ -116,12 +116,12 @@ def renormalize_errors(chi, mask, clip=6.0, max_corr=5.0, title = '', debug=Fals chi2_sigrej = np.percentile(chi2[maskchi], 100.0*gauss_prob) sigma_corr = np.sqrt(chi2_sigrej) if sigma_corr < 1.0: - msgs.warn("Error renormalization found correction factor sigma_corr = {:f}".format(sigma_corr) + + msgs.warning("Error renormalization found correction factor sigma_corr = {:f}".format(sigma_corr) + " < 1." + msgs.newline() + " Errors are overestimated so not applying correction") sigma_corr = 1.0 if sigma_corr > max_corr: - msgs.warn(("Error renormalization found sigma_corr/sigma = {:f} > {:f}." + msgs.newline() + + msgs.warning(("Error renormalization found sigma_corr/sigma = {:f} > {:f}." + msgs.newline() + "Errors are severely underestimated." + msgs.newline() + "Setting correction to sigma_corr = {:4.2f}").format(sigma_corr, max_corr, max_corr)) sigma_corr = max_corr @@ -130,7 +130,7 @@ def renormalize_errors(chi, mask, clip=6.0, max_corr=5.0, title = '', debug=Fals renormalize_errors_qa(chi, maskchi, sigma_corr, title=title) else: - msgs.warn('No good pixels in error_renormalize. There are probably issues with your data') + msgs.warning('No good pixels in error_renormalize. There are probably issues with your data') sigma_corr = 1.0 return sigma_corr, maskchi @@ -168,7 +168,7 @@ def poly_model_eval(theta, func, model, wave, wave_min, wave_max): # Clip to avoid overflow. ymult = np.exp(np.clip(ymult, None, 0.8 * np.log(sys.float_info.max))) case _: - msgs.error('Unrecognized value of model requested') + raise PypeItError('Unrecognized value of model requested') return ymult @@ -418,7 +418,7 @@ def solve_poly_ratio(wave, flux, ivar, flux_ref, ivar_ref, norder, mask=None, ma """ if norder < 1: - msgs.error( + raise PypeItError( 'You cannot solve for the polynomial ratio for norder < 1. For rescaling by a ' 'constant use robust_median_ratio.' ) @@ -452,7 +452,7 @@ def solve_poly_ratio(wave, flux, ivar, flux_ref, ivar_ref, norder, mask=None, ma case 'exp': guess = np.append(np.log(ratio), np.zeros(norder)) case _: - msgs.error('Unrecognized model type') + raise PypeItError('Unrecognized model type') arg_dict = dict(flux=flux, ivar=ivar, mask=mask, flux_med=flux_med, ivar_med=ivar_med, flux_ref_med=flux_ref_med, ivar_ref_med=ivar_ref_med, ivar_ref=ivar_ref, @@ -533,10 +533,10 @@ def interp_oned(wave_new, wave_old, flux_old, ivar_old, gpm_old, log10_blaze_fun """ # Check input if wave_new.ndim != 1 or wave_old.ndim != 1: - msgs.error('All input vectors must be 1D.') + raise PypeItError('All input vectors must be 1D.') if flux_old.shape != wave_old.shape or ivar_old.shape != wave_old.shape \ or gpm_old.shape != wave_old.shape: - msgs.error('All vectors to interpolate must have the same size.') + raise PypeItError('All vectors to interpolate must have the same size.') # Do not interpolate if the wavelength is exactly same with wave_new if np.array_equal(wave_new, wave_old) and not sensfunc: @@ -643,11 +643,11 @@ def interp_spec(wave_new, waves, fluxes, ivars, gpms, log10_blaze_function=None, """ # Check input if wave_new.ndim > 2: - msgs.error('Invalid shape for wave_new; must be 1D or 2D') + raise PypeItError('Invalid shape for wave_new; must be 1D or 2D') if wave_new.ndim == 2 and fluxes.ndim != 1: - msgs.error('If new wavelength grid is 2D, all other input arrays must be 1D.') + raise PypeItError('If new wavelength grid is 2D, all other input arrays must be 1D.') if fluxes.shape != waves.shape or ivars.shape != waves.shape or gpms.shape != waves.shape: - msgs.error('Input spectral arrays must all have the same shape.') + raise PypeItError('Input spectral arrays must all have the same shape.') # First case: interpolate either an (nspec, nexp) array of spectra onto a # single wavelength grid @@ -769,7 +769,7 @@ def calc_snr(fluxes, ivars, gpms): sn_sigclip = stats.sigma_clip(sn_val_ma, sigma=3, maxiters=5) sn2_iexp = sn_sigclip.mean()**2 # S/N^2 value for each spectrum if sn2_iexp is np.ma.masked: - msgs.error(f'No unmasked value in iexp={iexp+1}/{nexp}. Check inputs.') + raise PypeItError(f'No unmasked value in iexp={iexp+1}/{nexp}. Check inputs.') else: sn2.append(sn2_iexp) rms_sn.append(np.sqrt(sn2_iexp)) # Root Mean S/N**2 value for all spectra @@ -845,16 +845,16 @@ def sn_weights(fluxes, ivars, gpms, sn_smooth_npix=None, weight_method='auto', v `numpy.ndarray`_ with the same shape as those in waves. """ if weight_method not in ['auto', 'constant', 'uniform', 'wave_dependent', 'relative', 'ivar']: - msgs.error('Unrecognized option for weight_method=%s').format(weight_method) + raise PypeItError('Unrecognized option for weight_method=%s').format(weight_method) nexp = len(fluxes) # Check that all the input lists have the same length if len(ivars) != nexp or len(gpms) != nexp: - msgs.error("Input lists of spectra must have the same length") + raise PypeItError("Input lists of spectra must have the same length") # Check that sn_smooth_npix if weight_method = constant or uniform if sn_smooth_npix is None and weight_method not in ['constant', 'uniform']: - msgs.error("sn_smooth_npix cannot be None unless the weight_method='constant' or weight_method='uniform'") + raise PypeItError("sn_smooth_npix cannot be None unless the weight_method='constant' or weight_method='uniform'") rms_sn, sn_val = calc_snr(fluxes, ivars, gpms) sn2 = np.square(rms_sn) @@ -1017,7 +1017,7 @@ def robust_median_ratio( flux_dat_median = np.median(flux[new_mask]) if (flux_ref_median < 0.0) or (flux_dat_median < 0.0): - msgs.warn('Negative median flux found. Not rescaling') + msgs.warning('Negative median flux found. Not rescaling') ratio = 1.0 else: if verbose: @@ -1025,12 +1025,12 @@ def robust_median_ratio( ratio = np.fmax(np.fmin(flux_ref_median/flux_dat_median, max_factor), 1.0/max_factor) else: if (np.sum(calc_mask) <= min_good*nspec): - msgs.warn( + msgs.warning( f'Found only {np.sum(calc_mask)} good pixels for computing median flux ratio.' + msgs.newline() + 'No median rescaling applied' ) if (snr_resc_med <= snr_do_not_rescale): - msgs.warn( + msgs.warning( f'Median flux ratio of pixels in reference spectrum {snr_resc_med} <= ' f'snr_do_not_rescale = {snr_do_not_rescale}.' + msgs.newline() + 'No median rescaling applied' @@ -1162,7 +1162,7 @@ def scale_spec(wave, flux, ivar, sn, wave_ref, flux_ref, ivar_ref, mask=None, ma elif method_used == 'hand': # Input? if hand_scale is None: - msgs.error("Need to provide hand_scale parameter, single value") + raise PypeItError("Need to provide hand_scale parameter, single value") flux_scale = flux * hand_scale ivar_scale = ivar * 1.0 / hand_scale ** 2 scale = np.full(flux.size, hand_scale) @@ -1171,7 +1171,7 @@ def scale_spec(wave, flux, ivar, sn, wave_ref, flux_ref, ivar_ref, mask=None, ma ivar_scale = ivar.copy() scale = np.ones_like(flux) else: - msgs.error("Scale method not recognized! Check documentation for available options") + raise PypeItError("Scale method not recognized! Check documentation for available options") # Finish if show: scale_spec_qa(wave, flux*mask, ivar*mask, wave_ref, flux_ref*mask_ref, ivar_ref*mask_ref, scale, method_used, mask = mask, mask_ref=mask_ref, @@ -1872,7 +1872,7 @@ def spec_reject_comb(wave_grid, wave_grid_mid, waves_list, fluxes_list, ivars_li iter += 1 if (iter == maxiter_reject) & (maxiter_reject != 0): - msgs.warn('Maximum number of iterations maxiter={:}'.format(maxiter_reject) + ' reached in spec_reject_comb') + msgs.warning('Maximum number of iterations maxiter={:}'.format(maxiter_reject) + ' reached in spec_reject_comb') out_gpms = np.copy(this_gpms) out_gpms_list = utils.array_to_explist(out_gpms, nspec_list=nspec_list) @@ -2741,7 +2741,7 @@ def ech_combspec(waves_arr_setup, fluxes_arr_setup, ivars_arr_setup, gpms_arr_se # if the wavelength grid is non-monotonic, resample onto a loglam grid wave_grid_diff_ord = np.diff(wave_grid_ord) if np.any(wave_grid_diff_ord < 0): - msgs.warn(f'This order ({iord}) has a non-monotonic wavelength solution. Resampling now: ') + msgs.warning(f'This order ({iord}) has a non-monotonic wavelength solution. Resampling now: ') wave_grid_ord = np.linspace(np.min(wave_grid_ord), np.max(wave_grid_ord), len(wave_grid_ord)) wave_grid_diff_ord = np.diff(wave_grid_ord) @@ -2879,14 +2879,14 @@ def get_wave_ind(wave_grid, wave_min, wave_max): diff[diff > 0] = np.inf if not np.any(diff < 0): ind_lower = 0 - msgs.warn('Your wave grid does not extend blue enough. Taking bluest point') + msgs.warning('Your wave grid does not extend blue enough. Taking bluest point') else: ind_lower = np.argmin(np.abs(diff)) diff = wave_max - wave_grid diff[diff > 0] = np.inf if not np.any(diff < 0): ind_upper = wave_grid.size-1 - msgs.warn('Your wave grid does not extend red enough. Taking reddest point') + msgs.warning('Your wave grid does not extend red enough. Taking reddest point') else: ind_upper = np.argmin(np.abs(diff)) diff --git a/pypeit/core/collate.py b/pypeit/core/collate.py index 4974990f85..1ddc6be057 100644 --- a/pypeit/core/collate.py +++ b/pypeit/core/collate.py @@ -63,7 +63,7 @@ def __init__(self, spec1d_obj, spec1d_header, spec1d_file, spectrograph, match_t try: self.coord = SkyCoord(spec1d_obj.RA, spec1d_obj.DEC, unit='deg') except Exception as e: - msgs.error(f"Cannot do ra/dec matching on {spec1d_obj.NAME}, could not read RA/DEC.") + raise PypeItError(f"Cannot do ra/dec matching on {spec1d_obj.NAME}, could not read RA/DEC.") else: self.coord = spec1d_obj['SPAT_PIXPOS'] @@ -164,7 +164,7 @@ def combine(self, other_source_object): if other_source_object._spectrograph.name != self._spectrograph.name or \ other_source_object.match_type != self.match_type: - msgs.error(f"Can't append incompatible source objects. {self.spectrograph.name}/{self.match_type} does not match {other_source_object.spectrograph.name}/{other_source_object.match_type}") + raise PypeItError(f"Can't append incompatible source objects. {self.spectrograph.name}/{self.match_type} does not match {other_source_object.spectrograph.name}/{other_source_object.match_type}") self.spec_obj_list += other_source_object.spec_obj_list self.spec1d_file_list += other_source_object.spec1d_file_list diff --git a/pypeit/core/combine.py b/pypeit/core/combine.py index 80a6d1537f..22db2479e1 100644 --- a/pypeit/core/combine.py +++ b/pypeit/core/combine.py @@ -108,7 +108,7 @@ def weighted_combine(weights, sci_list, var_list, inmask_stack, if nimgs == 1: # If only one image is passed in, simply return the input lists of images, but reshaped # to be (nspec, nspat) - msgs.warn('Cannot combine a single image. Returning input images') + msgs.warning('Cannot combine a single image. Returning input images') sci_list_out = [] for sci_stack in sci_list: sci_list_out.append(sci_stack.reshape(img_shape)) @@ -121,7 +121,7 @@ def weighted_combine(weights, sci_list, var_list, inmask_stack, if sigma_clip and nimgs >= 3: if sigma_clip_stack is None: - msgs.error('You must specify sigma_clip_stack; sigma-clipping is based on this array ' + raise PypeItError('You must specify sigma_clip_stack; sigma-clipping is based on this array ' 'and propagated to the arrays to be stacked.') if sigrej is None: # NOTE: If these are changed, make sure to update the doc-string! @@ -144,7 +144,7 @@ def weighted_combine(weights, sci_list, var_list, inmask_stack, mask_stack = np.logical_not(data_clipped.mask) # mask_stack = True are good values else: if sigma_clip and nimgs < 3: - msgs.warn('Sigma clipping requested, but you cannot sigma clip with less than 3 ' + msgs.warning('Sigma clipping requested, but you cannot sigma clip with less than 3 ' 'images. Proceeding without sigma clipping') mask_stack = inmask_stack # mask_stack = True are good values @@ -195,22 +195,22 @@ def img_list_error_check(sci_list, var_list): for img in sci_list: shape_sci_list.append(img.shape) if img.ndim < 2: - msgs.error('Dimensionality of an image in sci_list is < 2') + raise PypeItError('Dimensionality of an image in sci_list is < 2') shape_var_list = [] for img in var_list: shape_var_list.append(img.shape) if img.ndim < 2: - msgs.error('Dimensionality of an image in var_list is < 2') + raise PypeItError('Dimensionality of an image in var_list is < 2') for isci in shape_sci_list: if isci != shape_sci_list[0]: - msgs.error('An image in sci_list have different dimensions') + raise PypeItError('An image in sci_list have different dimensions') for ivar in shape_var_list: if ivar != shape_var_list[0]: - msgs.error('An image in var_list have different dimensions') + raise PypeItError('An image in var_list have different dimensions') if isci != ivar: - msgs.error('An image in sci_list had different dimensions than an image in var_list') + raise PypeItError('An image in sci_list had different dimensions than an image in var_list') shape = shape_sci_list[0] @@ -249,22 +249,22 @@ def broadcast_weights(weights, shape): elif len(shape) == 3: weights_stack = np.einsum('i,ijk->ijk', weights, np.ones(shape)) else: - msgs.error('Image shape is not supported') + raise PypeItError('Image shape is not supported') elif weights.ndim == 2: # Wavelength dependent weights per image if len(shape) == 2: if weights.shape != shape: - msgs.error('The shape of weights does not match the shape of the image stack') + raise PypeItError('The shape of weights does not match the shape of the image stack') weights_stack = weights elif len(shape) == 3: weights_stack = np.einsum('ij,k->ijk', weights, np.ones(shape[2])) elif weights.ndim == 3: # Full image stack of weights if weights.shape != shape: - msgs.error('The shape of weights does not match the shape of the image stack') + raise PypeItError('The shape of weights does not match the shape of the image stack') weights_stack = weights else: - msgs.error('Unrecognized dimensionality for weights') + raise PypeItError('Unrecognized dimensionality for weights') return weights_stack @@ -315,7 +315,7 @@ def broadcast_lists_of_weights(weights, shapes): elif weight.ndim == 2: weights_list.append(weight) else: - msgs.error('Weights must be a float or a 1D or 2D ndarray') + raise PypeItError('Weights must be a float or a 1D or 2D ndarray') return weights_list diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index cfd5c050e7..c6958f8110 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -526,16 +526,16 @@ def get_whitelight_pixels(all_wave, all_slitid, min_wl, max_wl): if all([isinstance(l, list) for l in list_inputs]): numframes = len(all_wave) if not all([len(l) == numframes for l in list_inputs]): - msgs.error("All input lists must have the same length") + raise PypeItError("All input lists must have the same length") # Store in the following variables _all_wave, _all_slitid = all_wave, all_slitid elif all([not isinstance(l, list) for l in list_inputs]): _all_wave, _all_slitid = [all_wave], [all_slitid] numframes = 1 else: - msgs.error("The input lists must either all be lists (of the same length) or all be numpy arrays") + raise PypeItError("The input lists must either all be lists (of the same length) or all be numpy arrays") if max_wl < min_wl: - msgs.error("The maximum wavelength must be greater than the minimum wavelength") + raise PypeItError("The maximum wavelength must be greater than the minimum wavelength") # Initialise the output out_slitid = [np.zeros(_all_slitid[0].shape, dtype=int) for _ in range(numframes)] # Loop over all frames and find the pixels that are within the wavelength range @@ -545,7 +545,7 @@ def get_whitelight_pixels(all_wave, all_slitid, min_wl, max_wl): ww = np.where((_all_wave[ff] > min_wl) & (_all_wave[ff] < max_wl)) out_slitid[ff][ww] = _all_slitid[ff][ww] else: - msgs.warn("Datacubes do not completely overlap in wavelength.") + msgs.warning("Datacubes do not completely overlap in wavelength.") out_slitid = _all_slitid min_wl, max_wl = None, None for ff in range(numframes): @@ -588,13 +588,13 @@ def get_whitelight_range(wavemin, wavemax, wl_range): wlrng = [wavemin, wavemax] if wl_range[0] is not None: if wl_range[0] < wavemin: - msgs.warn("The user-specified minimum wavelength ({0:.2f}) to use for the white light".format(wl_range[0]) + + msgs.warning("The user-specified minimum wavelength ({0:.2f}) to use for the white light".format(wl_range[0]) + msgs.newline() + "images is lower than the recommended value ({0:.2f}),".format(wavemin) + msgs.newline() + "which ensures that all spaxels cover the same wavelength range.") wlrng[0] = wl_range[0] if wl_range[1] is not None: if wl_range[1] > wavemax: - msgs.warn("The user-specified maximum wavelength ({0:.2f}) to use for the white light".format(wl_range[1]) + + msgs.warning("The user-specified maximum wavelength ({0:.2f}) to use for the white light".format(wl_range[1]) + msgs.newline() + "images is greater than the recommended value ({0:.2f}),".format(wavemax) + msgs.newline() + "which ensures that all spaxels cover the same wavelength range.") wlrng[1] = wl_range[1] @@ -631,10 +631,10 @@ def make_whitelight_fromcube(cube, bpmcube, wave=None, wavemin=None, wavemax=Non if wavemin is not None or wavemax is not None: # Make some checks on the input if wave is None: - msgs.error("wave variable must be supplied to create white light image with wavelength cuts") + raise PypeItError("wave variable must be supplied to create white light image with wavelength cuts") else: if wave.size != cube.shape[2]: - msgs.error("wave variable should have the same length as the third axis of cube.") + raise PypeItError("wave variable should have the same length as the third axis of cube.") # assign wavemin & wavemax if one is not provided if wavemin is None: wavemin = np.min(wave) @@ -748,19 +748,19 @@ def set_voxel_sampling(spatscale, specscale, dspat=None, dwv=None): # Make sure all frames have consistent pixel scales ratio = (spatscale[:, 0] - spatscale[0, 0]) / spatscale[0, 0] if np.any(np.abs(ratio) > 1E-4): - msgs.warn("The pixel scales of all input frames are not the same!") + msgs.warning("The pixel scales of all input frames are not the same!") spatstr = ", ".join(["{0:.6f}".format(ss) for ss in spatscale[:,0]*3600.0]) msgs.info("Pixel scales of all input frames:" + msgs.newline() + spatstr + "arcseconds") # Make sure all frames have consistent slicer scales ratio = (spatscale[:, 1] - spatscale[0, 1]) / spatscale[0, 1] if np.any(np.abs(ratio) > 1E-4): - msgs.warn("The slicer scales of all input frames are not the same!") + msgs.warning("The slicer scales of all input frames are not the same!") spatstr = ", ".join(["{0:.6f}".format(ss) for ss in spatscale[:,1]*3600.0]) msgs.info("Slicer scales of all input frames:" + msgs.newline() + spatstr + "arcseconds") # Make sure all frames have consistent wavelength sampling ratio = (specscale - specscale[0]) / specscale[0] if np.any(np.abs(ratio) > 1E-2): - msgs.warn("The wavelength samplings of the input frames are not the same!") + msgs.warning("The wavelength samplings of the input frames are not the same!") specstr = ", ".join(["{0:.6f}".format(ss) for ss in specscale]) msgs.info("Wavelength samplings of all input frames:" + msgs.newline() + specstr + "Angstrom") @@ -793,7 +793,7 @@ def check_inputs(list_inputs): # Several frames are being combined. Check the lists have the same length numframes = len(list_inputs[0]) if not all([len(l) == numframes for l in list_inputs]): - msgs.error("All input lists must have the same length") + raise PypeItError("All input lists must have the same length") # The inputs are good, return as is return tuple(list_inputs) elif all([not isinstance(l, list) for l in list_inputs]): @@ -803,7 +803,7 @@ def check_inputs(list_inputs): ret_list += ([l],) return ret_list else: - msgs.error("The input arguments should all be of type 'list', or all not be of type 'list':") + raise PypeItError("The input arguments should all be of type 'list', or all not be of type 'list':") def wcs_bounds(raImg, decImg, waveImg, slitid_img_gpm, ra_offsets=None, dec_offsets=None, @@ -1302,7 +1302,7 @@ def compute_weights(raImg, decImg, waveImg, sciImg, ivarImg, slitidImg, # If there's only one frame, use uniform weighting if numframes == 1: - msgs.warn("Only one frame provided. Using uniform weighting.") + msgs.warning("Only one frame provided. Using uniform weighting.") return np.ones_like(sciImg) # Check the WCS bounds @@ -1590,7 +1590,7 @@ def generate_cube_subpixel(output_wcs, bins, sciImg, ivarImg, waveImg, slitid_im """ # Check the inputs if whitelight_range is not None and outfile is None: - msgs.error("Must provide an outfile name if whitelight_range is set") + raise PypeItError("Must provide an outfile name if whitelight_range is set") # Subpixellate flxcube, varcube, bpmcube = subpixellate(output_wcs, bins, sciImg, ivarImg, waveImg, slitid_img_gpm, wghtImg, diff --git a/pypeit/core/extract.py b/pypeit/core/extract.py index 5e0295e0c3..3f1f526a74 100644 --- a/pypeit/core/extract.py +++ b/pypeit/core/extract.py @@ -132,7 +132,7 @@ def extract_optimal(imgminsky, ivar, mask, waveimg, skyimg, thismask, oprof, # Exit gracefully if we have no positive object profiles, since that means something was wrong with object fitting if not np.any(oprof > 0.0): - msgs.warn('Object profile is zero everywhere. This aperture is junk.') + msgs.warning('Object profile is zero everywhere. This aperture is junk.') return mincol = np.min(ispat) @@ -505,18 +505,18 @@ def extract_hist_spectrum(waveimg, frame, gpm=None, bins=1000): """ # Check the inputs if waveimg.shape != frame.shape: - msgs.error("Wavelength image is not the same shape as the input frame") + raise PypeItError("Wavelength image is not the same shape as the input frame") # Check the GPM _gpm = gpm if gpm is not None else waveimg > 0 if waveimg.shape != _gpm.shape: - msgs.error("Wavelength image is not the same shape as the GPM") + raise PypeItError("Wavelength image is not the same shape as the GPM") # Set the bins if isinstance(bins, int): _bins = np.linspace(np.min(waveimg[_gpm]), np.max(waveimg[_gpm]), bins) elif isinstance(bins, np.ndarray): _bins = bins else: - msgs.error("Argument 'bins' should be an integer or a numpy array") + raise PypeItError("Argument 'bins' should be an integer or a numpy array") # Construct a histogram and the normalisation hist, edge = np.histogram(waveimg[gpm], bins=_bins, weights=frame[gpm]) @@ -759,7 +759,7 @@ def return_gaussian(sigma_x, norm_obj, fwhm, med_sn2, obj_string, inf = np.isfinite(profile_model) == False ninf = np.sum(inf) if ninf != 0: - msgs.warn("Nan pixel values in object profile... setting them to zero") + msgs.warning("Nan pixel values in object profile... setting them to zero") profile_model[inf] = 0.0 if show_profile: qa_fit_profile(sigma_x, norm_obj, profile_model, title = title_string, l_limit = l_limit, r_limit = r_limit, @@ -892,7 +892,7 @@ def fit_profile(image, ivar, waveimg, thismask, spat_img, trace_in, wave, eligible_pixels = np.sum((wave >= wave_min) & (wave <= wave_max)) good_pix_frac = 0.05 if (np.sum(indsp) < good_pix_frac*eligible_pixels) or (eligible_pixels == 0): - msgs.warn('There are no pixels eligible to be fit for the object profile.' + msgs.newline() + + msgs.warning('There are no pixels eligible to be fit for the object profile.' + msgs.newline() + 'There is likely an issue in local_skysub_extract. Returning a Gassuain with fwhm={:5.3f}'.format(thisfwhm)) profile_model = return_gaussian(sigma_x, None, thisfwhm, 0.0, obj_string, False) return profile_model, trace_in, fwhmfit, 0.0 @@ -907,7 +907,7 @@ def fit_profile(image, ivar, waveimg, thismask, spat_img, trace_in, wave, try: cont_flux, _ = c_answer.value(wave[indsp]) except: - msgs.warn('Problem estimating S/N ratio of spectrum' + msgs.newline() + + msgs.warning('Problem estimating S/N ratio of spectrum' + msgs.newline() + 'There is likely an issue in local_skysub_extract. Returning a Gassuain with fwhm={:5.3f}'.format(thisfwhm)) profile_model = return_gaussian(sigma_x, None, thisfwhm, 0.0, obj_string, False) return profile_model, trace_in, fwhmfit, 0.0 @@ -955,7 +955,7 @@ def fit_profile(image, ivar, waveimg, thismask, spat_img, trace_in, wave, bounds_error=False,fill_value = 'extrapolate') sn2_img[totmask] = sn2_interp(waveimg[totmask]) else: - msgs.warn('All pixels are masked') + msgs.warning('All pixels are masked') msgs.info('sqrt(med(S/N)^2) = ' + "{:5.2f}".format(np.sqrt(med_sn2))) @@ -1310,13 +1310,13 @@ def fit_profile(image, ivar, waveimg, thismask, spat_img, trace_in, wave, nxinf = np.sum(np.isfinite(xnew) == False) if (nxinf != 0): - msgs.warn("Nan pixel values in trace correction") - msgs.warn("Returning original trace....") + msgs.warning("Nan pixel values in trace correction") + msgs.warning("Returning original trace....") xnew = trace_in inf = np.isfinite(profile_model) == False ninf = np.sum(inf) if (ninf != 0): - msgs.warn("Nan pixel values in object profile... setting them to zero") + msgs.warning("Nan pixel values in object profile... setting them to zero") profile_model[inf] = 0.0 # Normalize profile norm = np.outer(np.sum(profile_model, 1), np.ones(nspat)) diff --git a/pypeit/core/findobj_skymask.py b/pypeit/core/findobj_skymask.py index b5ec28ed2e..08ee18a1c1 100644 --- a/pypeit/core/findobj_skymask.py +++ b/pypeit/core/findobj_skymask.py @@ -123,7 +123,7 @@ def create_skymask(sobjs, thismask, slit_left, slit_righ, box_rad_pix=None, trim # TODO: There is this hard-coded check here, and then there is a similar # check in skysub.global_skysub. Do we need both? if np.sum(skymask_fwhm)/np.sum(thismask) < 0.10: - msgs.warn('More than 90% of usable area on this slit would be masked and not used by ' + msgs.warning('More than 90% of usable area on this slit would be masked and not used by ' 'global sky subtraction. Something is probably wrong with object finding for ' 'this slit. Not masking object for global sky subtraction.') skymask_fwhm = np.copy(thismask) @@ -405,7 +405,7 @@ def ech_fof_sobjs(sobjs:specobjs.SpecObjs, elif nfound==1: obj_id_init = np.ones(1,dtype='int') else: - msgs.error('No objects found in ech_fof_sobjs. Should not have called this routine') + raise PypeItError('No objects found in ech_fof_sobjs. Should not have called this routine') uni_obj_id_init, uni_ind_init = np.unique(obj_id_init, return_index=True) @@ -417,7 +417,7 @@ def ech_fof_sobjs(sobjs:specobjs.SpecObjs, for iord in range(norders): on_order = (obj_id_init == uni_obj_id_init[iobj]) & (sobjs.ECH_ORDER == order_vec[iord]) if (np.sum(on_order) > 1): - msgs.warn('Found multiple objects in a FOF group on order iord={:d}'.format(order_vec[iord]) + msgs.newline() + + msgs.warning('Found multiple objects in a FOF group on order iord={:d}'.format(order_vec[iord]) + msgs.newline() + 'Spawning new objects to maintain a single object per order.') off_order = (obj_id_init == uni_obj_id_init[iobj]) & (sobjs.ECH_ORDER != order_vec[iord]) ind = np.where(on_order)[0] @@ -514,7 +514,7 @@ def ech_fill_in_orders(sobjs:specobjs.SpecObjs, # Check standard star if std_trace is not None and len(std_trace) != norders: - msgs.warn('Standard star trace does not match the number of orders in the echelle data.' + msgs.warning('Standard star trace does not match the number of orders in the echelle data.' ' Will use the slit edges to trace the object in the missing orders.') # For traces @@ -657,7 +657,7 @@ def ech_fill_in_orders(sobjs:specobjs.SpecObjs, # Object is already on this order so no need to do anything pass elif num_on_order > 1: - msgs.error('Problem in echelle object finding. The same objid={:d} appears {:d} times on echelle orderindx ={:d}' + raise PypeItError('Problem in echelle object finding. The same objid={:d} appears {:d} times on echelle orderindx ={:d}' ' even after duplicate obj_ids the orders were removed. ' 'Report this bug to PypeIt developers'.format(uni_obj_id[iobj],num_on_order, iord)) # Return @@ -832,7 +832,7 @@ def ech_cutobj_on_snr( nobj_trim = np.sum(keep_obj) if nobj_trim == 0: - msgs.warn('No objects found') + msgs.warning('No objects found') sobjs_final = specobjs.SpecObjs() return sobjs_final @@ -929,7 +929,7 @@ def ech_pca_traces( # Checks if norders != spec_min_max.shape[1]: - msgs.error("Number of good orders does not match the number of orders in spec_min_max") + raise PypeItError("Number of good orders does not match the number of orders in spec_min_max") # Loop over the objects one by one and adjust/predict the traces pca_fits = np.zeros((nspec, norders, nobj_trim)) @@ -1029,7 +1029,7 @@ def ech_pca_traces( # Vette for sobj in sobjs_final: if not sobj.ready_for_extraction(): - msgs.error("Bad SpecObj. Can't proceed") + raise PypeItError("Bad SpecObj. Can't proceed") return sobjs_final @@ -1259,14 +1259,14 @@ def ech_objfind(image, ivar, slitmask, slit_left, slit_righ, slit_spat_id, order # TODO JFH Relaxing this strict requirement on the slitmask image for the time being #gdslit_spat = np.unique(slitmask[slitmask >= 0]).astype(int) # Unique sorts #if gdslit_spat.size != norders: - #msgs.error('Number of slitidsin slitmask and the number of left/right slits must be the same.') + #raise PypeItError('Number of slitidsin slitmask and the number of left/right slits must be the same.') if slit_righ.shape[1] != norders: - msgs.error('Number of left and right slits must be the same.') + raise PypeItError('Number of left and right slits must be the same.') if order_vec.size != norders: - msgs.error('Number of orders in order_vec and left/right slits must be the same.') + raise PypeItError('Number of orders in order_vec and left/right slits must be the same.') if spec_min_max.shape[1] != norders: - msgs.error('Number of orders in spec_min_max and left/right slits must be the same.') + raise PypeItError('Number of orders in spec_min_max and left/right slits must be the same.') if specobj_dict is None: specobj_dict = {'SLITID': 999, @@ -1817,12 +1817,12 @@ def objs_in_slit(image, ivar, thismask, slit_left, slit_righ, npeak_not_near_edge = np.sum(np.logical_not(near_edge_bpm)) if np.any(near_edge_bpm): - msgs.warn('Discarding {:d}'.format(np.sum(near_edge_bpm)) + + msgs.warning('Discarding {:d}'.format(np.sum(near_edge_bpm)) + ' at spatial pixels spat = {:}'.format(x_peaks_all[near_edge_bpm]) + ' which land within trim_edg = (left, right) = {:}'.format(trim_edg) + ' pixels from the slit boundary for this nsamp = {:5.2f}'.format(nsamp) + ' wide slit') - msgs.warn('You must decrease from the current value of trim_edg in order to keep them') - msgs.warn('Such edge objects are often spurious') + msgs.warning('You must decrease from the current value of trim_edg in order to keep them') + msgs.warning('Such edge objects are often spurious') # If the user requested the nperslit most significant peaks have been requested, then only return these @@ -1838,7 +1838,7 @@ def objs_in_slit(image, ivar, thismask, slit_left, slit_righ, nperslit_bpm = np.zeros(npeaks_all, dtype=bool) if np.any(nperslit_bpm): - msgs.warn('Discarding {:d}'.format(np.sum(nperslit_bpm)) + + msgs.warning('Discarding {:d}'.format(np.sum(nperslit_bpm)) + ' at spatial pixels spat = {:} and SNR = {:}'.format( x_peaks_all[nperslit_bpm], snr_peaks_all[nperslit_bpm]) + ' which are below SNR_thresh={:5.3f} set because the maximum number of objects '.format(snr_thresh_perslit) + @@ -1967,7 +1967,7 @@ def objs_in_slit(image, ivar, thismask, slit_left, slit_righ, elif std_trace is not None: # If no objects found, use the standard? trace_model = std_trace else: # If no objects or standard use the slit boundary - msgs.warn("No source to use as a trace. Using the slit boundary") + msgs.warning("No source to use as a trace. Using the slit boundary") trace_model = slit_left # Loop over hand_extract apertures and create and assign specobj @@ -2031,7 +2031,7 @@ def objs_in_slit(image, ivar, thismask, slit_left, slit_righ, close = np.abs(sobjs[reg_ind].SPAT_PIXPOS - spat_pixpos[ihand]) <= 0.6*spec_fwhm[ihand] if np.any(close): # Print out a warning - msgs.warn('Deleting object(s) {}'.format(sobjs[reg_ind[close]].NAME) + + msgs.warning('Deleting object(s) {}'.format(sobjs[reg_ind[close]].NAME) + ' because it collides with a user specified hand_extract aperture') keep[reg_ind[close]] = False @@ -2071,7 +2071,7 @@ def objs_in_slit(image, ivar, thismask, slit_left, slit_righ, # Vet if not sobj.ready_for_extraction(): # embed(header=utils.embed_header()) - msgs.error("Bad SpecObj. Can't proceed") + raise PypeItError("Bad SpecObj. Can't proceed") # Return return sobjs diff --git a/pypeit/core/fitting.py b/pypeit/core/fitting.py index eddc49ec07..e2b0901695 100644 --- a/pypeit/core/fitting.py +++ b/pypeit/core/fitting.py @@ -98,7 +98,7 @@ def to_hdu(self, **kwargs): See that func for Args and Returns """ if 'force_to_bintbl' in kwargs and not kwargs['force_to_bintbl']: - msgs.warn('PypeItFits objects must always be forced to a BinaryTableHDU for writing.') + msgs.warning('PypeItFits objects must always be forced to a BinaryTableHDU for writing.') kwargs['force_to_bintbl'] = True return super(PypeItFit, self).to_hdu(**kwargs) @@ -140,7 +140,7 @@ def fit(self): self.fitc = np.zeros(self.order[0] + 1, self.order[1] + 1).astype(float) else: self.fitc = np.zeros(self.order[0] + 1).astype(float) - msgs.warn('Input gpm is masked everywhere. Fit is probably probelmatic') + msgs.warning('Input gpm is masked everywhere. Fit is probably probelmatic') self.success = 0 return self.success @@ -186,7 +186,7 @@ def fit(self): xv, y_out, self.order[0], w=np.sqrt(w_out) if w_out is not None else None) # numpy convention else: - msgs.error("Fitting function '{0:s}' is not implemented yet" + msgs.newline() + + raise PypeItError("Fitting function '{0:s}' is not implemented yet" + msgs.newline() + "Please choose from 'polynomial', 'legendre', 'chebyshev','polynomial2d', 'legendre2d'") self.success = 1 @@ -286,7 +286,7 @@ def evaluate_fit(fitc, func, x, x2=None, minx=None, return (np.polynomial.legendre.legval2d(xv, x2v, fitc) if func[:-2] == "legendre" else np.polynomial.chebyshev.chebval2d(xv, x2v, fitc)) else: - msgs.error("Function {0:s} has not yet been implemented for 2d fits".format(func)) + raise PypeItError("Function {0:s} has not yet been implemented for 2d fits".format(func)) # TODO: Why is this return here? The code will never reach this point # because of the if/elif/else above. What should the behavior be, raise # an exception or return None? @@ -298,7 +298,7 @@ def evaluate_fit(fitc, func, x, x2=None, minx=None, return (np.polynomial.legendre.legval(xv, fitc) if func == "legendre" else np.polynomial.chebyshev.chebval(xv, fitc)) else: - msgs.error("Fitting function '{0:s}' is not implemented yet" + msgs.newline() + + raise PypeItError("Fitting function '{0:s}' is not implemented yet" + msgs.newline() + "Please choose from 'polynomial', 'legendre', 'chebyshev', 'polynomial2d', 'legendre2d', 'chebyshev2d'") @@ -427,9 +427,9 @@ def robust_fit(xarray, yarray, order, x2=None, function='polynomial', #pypeitFit = None while (not qdone) and (iIter < maxiter): if np.sum(this_gpm) <= np.sum(order) + 1: - msgs.warn("More parameters than data points - fit might be undesirable") + msgs.warning("More parameters than data points - fit might be undesirable") if not np.any(this_gpm): - msgs.warn("All points were masked. Returning current fit and masking all points. Fit is likely undesirable") + msgs.warning("All points were masked. Returning current fit and masking all points. Fit is likely undesirable") pypeitFit = PypeItFit(xval=xarray.astype(float), yval=yarray.astype(float), func=function, order=np.atleast_1d(order), x2=x2.astype(float) if x2 is not None else x2, @@ -448,7 +448,7 @@ def robust_fit(xarray, yarray, order, x2=None, function='polynomial', # Update the iteration iIter += 1 if (iIter == maxiter) & (maxiter != 0) & verbose: - msgs.warn(f'Maximum number of iterations maxiter={maxiter} reached in robust_polyfit_djs') + msgs.warning(f'Maximum number of iterations maxiter={maxiter} reached in robust_polyfit_djs') # Do the final fit pypeitFit = PypeItFit(xval=xarray.astype(float), yval=yarray.astype(float), @@ -601,7 +601,7 @@ def robust_optimize(ydata, fitfunc, arg_dict, maxiter=10, inmask=None, invvar=No elif (len(ret_tuple) == 3): result, ymodel, invvar_use = ret_tuple else: - msgs.error('Invalid return value from fitfunc') + raise PypeItError('Invalid return value from fitfunc') # Update the init_from_last = result thismask_iter = thismask.copy() @@ -618,10 +618,10 @@ def robust_optimize(ydata, fitfunc, arg_dict, maxiter=10, inmask=None, invvar=No iIter += 1 if (iIter == maxiter) & (maxiter != 0): - msgs.warn('Maximum number of iterations maxiter={:}'.format(maxiter) + ' reached in robust_optimize') + msgs.warning('Maximum number of iterations maxiter={:}'.format(maxiter) + ' reached in robust_optimize') outmask = np.copy(thismask) if np.sum(outmask) == 0: - msgs.warn('All points were rejected!!! The fits will be zero everywhere.') + msgs.warning('All points were rejected!!! The fits will be zero everywhere.') # Perform a final fit using the final outmask if new pixels were rejected on the last iteration if qdone is False: @@ -820,7 +820,7 @@ def polyfit2d_general(x, y, z, deg, w=None, function='polynomial', vander = np.polynomial.legendre.legvander2d(xv, yv, deg) if function == 'legendre' \ else np.polynomial.chebyshev.chebvander2d(xv, yv, deg) else: - msgs.error("Not ready for this type of {:s}".format(function)) + raise PypeItError("Not ready for this type of {:s}".format(function)) # Weights if w is not None: w = np.asarray(w) + 0.0 @@ -1114,7 +1114,7 @@ def bspline_profile(xdata, ydata, invvar, profile_basis, ingpm=None, upper=5, lo # Checks nx = xdata.size if ydata.size != nx: - msgs.error('Dimensions of xdata and ydata do not agree.') + raise PypeItError('Dimensions of xdata and ydata do not agree.') # TODO: invvar and profile_basis should be optional @@ -1134,7 +1134,7 @@ def bspline_profile(xdata, ydata, invvar, profile_basis, ingpm=None, upper=5, lo npoly = int(profile_basis.size / nx) if profile_basis.size != nx * npoly: - msgs.error('Profile basis is not a multiple of the number of data points.') + raise PypeItError('Profile basis is not a multiple of the number of data points.') # Init yfit = np.zeros(ydata.shape) @@ -1159,14 +1159,14 @@ def bspline_profile(xdata, ydata, invvar, profile_basis, ingpm=None, upper=5, lo maskwork = outmask & ingpm & (invvar > 0) if not maskwork.any(): - msgs.error('No valid data points in bspline_profile!.') + raise PypeItError('No valid data points in bspline_profile!.') # Init bspline class sset = bspline.bspline(xdata[maskwork], nord=nord, npoly=npoly, bkpt=bkpt, fullbkpt=fullbkpt, funcname='Bspline longslit special', **kwargs_bspline) if maskwork.sum() < sset.nord: if not quiet: - msgs.warn('Number of good data points fewer than nord.') + msgs.warning('Number of good data points fewer than nord.') # TODO: Why isn't maskwork returned? return sset, outmask, yfit, reduced_chi, 4 @@ -1200,14 +1200,14 @@ def bspline_profile(xdata, ydata, invvar, profile_basis, ingpm=None, upper=5, lo if error != 0: bf1, laction, uaction = sset.action(xdata) if np.any(bf1 == -2) or bf1.size != nx * nord: - msgs.error("BSPLINE_ACTION failed!") + raise PypeItError("BSPLINE_ACTION failed!") action = np.copy(action_multiple) for ipoly in range(npoly): action[:, np.arange(nord) * npoly + ipoly] *= bf1 del bf1 # Clear the memory if np.any(np.logical_not(np.isfinite(action))): - msgs.error('Infinities in action matrix. B-spline fit faults.') + raise PypeItError('Infinities in action matrix. B-spline fit faults.') error, yfit = sset.workit(xdata, ydata, invvar * maskwork, action, laction, uaction) @@ -1215,7 +1215,7 @@ def bspline_profile(xdata, ydata, invvar, profile_basis, ingpm=None, upper=5, lo if error == -2: if not quiet: - msgs.warn('All break points lost!! Bspline fit failed.') + msgs.warning('All break points lost!! Bspline fit failed.') exit_status = 3 return sset, np.zeros(xdata.shape, dtype=bool), np.zeros(xdata.shape), reduced_chi, \ exit_status diff --git a/pypeit/core/flat.py b/pypeit/core/flat.py index bac66b4c9a..21ca1cae37 100644 --- a/pypeit/core/flat.py +++ b/pypeit/core/flat.py @@ -186,7 +186,7 @@ def construct_illum_profile(norm_spec, spat_coo, slitwidth, spat_gpm=None, spat_ """ if illum_rej is None and illum_iter > 0: - msgs.warn('Cannot use iterative rejection to construct the illumination function if the ' + msgs.warning('Cannot use iterative rejection to construct the illumination function if the ' 'rejection is not provided. Continuing without iteration.') _spat_gpm = np.ones(norm_spec.shape, dtype=bool) if spat_gpm is None else np.copy(spat_gpm) @@ -348,9 +348,9 @@ def smooth_scale(arr, wave_ref=None, polydeg=None, sn_smooth_npix=None): """ # Do some checks on the input if polydeg is not None and wave_ref is None: - msgs.error("Must provide a wavelength array if polydeg is not None") + raise PypeItError("Must provide a wavelength array if polydeg is not None") if polydeg is None and sn_smooth_npix is None: - msgs.error("Must provide either polydeg or sn_smooth_npix") + raise PypeItError("Must provide either polydeg or sn_smooth_npix") # Smooth the relative sensitivity array if polydeg is not None: gd = (arr != 0) @@ -514,7 +514,7 @@ def tweak_slit_edges_gradient(left, right, spat_coo, norm_flat, maxfrac=0.1, deb # Check input nspec = len(left) if len(right) != nspec: - msgs.error('Input left and right traces must have the same length!') + raise PypeItError('Input left and right traces must have the same length!') # Median slit width slitwidth = np.median(right - left) @@ -534,7 +534,7 @@ def tweak_slit_edges_gradient(left, right, spat_coo, norm_flat, maxfrac=0.1, deb # Check if the shift is within the allowed range if np.abs(left_shift) > maxfrac: - msgs.warn('Left slit edge shift of {0:.1f}% exceeds the maximum allowed of {1:.1f}%'.format( + msgs.warning('Left slit edge shift of {0:.1f}% exceeds the maximum allowed of {1:.1f}%'.format( 100*left_shift, 100*maxfrac) + msgs.newline() + 'The left edge will not be tweaked.') left_shift = 0.0 @@ -542,7 +542,7 @@ def tweak_slit_edges_gradient(left, right, spat_coo, norm_flat, maxfrac=0.1, deb msgs.info('Tweaking left slit boundary by {0:.1f}%'.format(100 * left_shift) + ' ({0:.2f} pixels)'.format(left_shift * slitwidth)) if np.abs(right_shift) > maxfrac: - msgs.warn('Right slit edge shift of {0:.1f}% exceeds the maximum allowed of {1:.1f}%'.format( + msgs.warning('Right slit edge shift of {0:.1f}% exceeds the maximum allowed of {1:.1f}%'.format( 100*right_shift, 100*maxfrac) + msgs.newline() + 'The right edge will not be tweaked.') right_shift = 0.0 @@ -632,7 +632,7 @@ def tweak_slit_edges_threshold(left, right, spat_coo, norm_flat, thresh=0.93, ma # Check input nspec = len(left) if len(right) != nspec: - msgs.error('Input left and right traces must have the same length!') + raise PypeItError('Input left and right traces must have the same length!') # Median slit width slitwidth = np.median(right - left) @@ -648,7 +648,7 @@ def tweak_slit_edges_threshold(left, right, spat_coo, norm_flat, thresh=0.93, ma # TODO: Set a parameter for this ileft = (spat_coo > 0.1) & (spat_coo < 0.4) if not np.any(ileft): - msgs.error('No coordinates toward the left of the slit center. Slit boundaries are ' + raise PypeItError('No coordinates toward the left of the slit center. Slit boundaries are ' 'likely in error, and you probably have a bad (very short) slit. Slit center ' 'at center row is {0:.1f}.'.format((left[nspec//2] + right[nspec//2])/2)) left_thresh = thresh * np.amax(norm_flat[ileft]) @@ -672,10 +672,10 @@ def tweak_slit_edges_threshold(left, right, spat_coo, norm_flat, thresh=0.93, ma norm_flat[np.invert(masked_flat.mask)], marker='.', s=10, color='k', lw=0) plt.show() - msgs.error('Tweak left edge has failed! Bad continuous region.') + raise PypeItError('Tweak left edge has failed! Bad continuous region.') i = contiguous_region.stop-1 if i >= 0 and norm_flat[i-1] > norm_flat[i]: - msgs.warn('When adjusting left edge, found noisy illumination profile structure.') + msgs.warning('When adjusting left edge, found noisy illumination profile structure.') if debug: plt.scatter(spat_coo[masked_flat.mask], norm_flat[masked_flat.mask], marker='.', s=10, color='C3', lw=0) @@ -684,7 +684,7 @@ def tweak_slit_edges_threshold(left, right, spat_coo, norm_flat, thresh=0.93, ma plt.scatter(spat_coo[i], norm_flat[i], marker='o', facecolor='none', s=50, color='C1') plt.show() if norm_flat[i+1] < left_thresh: - msgs.warn('Left slit boundary tweak limited by maximum allowed shift: {:.1f}%'.format( + msgs.warning('Left slit boundary tweak limited by maximum allowed shift: {:.1f}%'.format( 100*maxfrac)) left_shift = maxfrac else: @@ -701,7 +701,7 @@ def tweak_slit_edges_threshold(left, right, spat_coo, norm_flat, thresh=0.93, ma # TODO: Set a parameter for this iright = (spat_coo > 0.6) & (spat_coo < 0.9) if not np.any(iright): - msgs.error('No coordinates toward the right of the slit center. Slit boundaries are ' + raise PypeItError('No coordinates toward the right of the slit center. Slit boundaries are ' 'likely in error, and you probably have a bad (very short) slit. Slit center ' 'at center row is {0:.1f}.'.format((left[nspec//2] + right[nspec//2])/2)) right_thresh = thresh * np.amax(norm_flat[iright]) @@ -726,10 +726,10 @@ def tweak_slit_edges_threshold(left, right, spat_coo, norm_flat, thresh=0.93, ma norm_flat[np.invert(masked_flat.mask)], marker='.', s=10, color='k', lw=0) plt.show() - msgs.error('Tweak right edge has failed! Bad continuous region.') + raise PypeItError('Tweak right edge has failed! Bad continuous region.') i = contiguous_region.start if i < norm_flat.size-1 and norm_flat[i+1] > norm_flat[i]: - msgs.warn('When adjusting right edge, found noisy illumination profile structure.') + msgs.warning('When adjusting right edge, found noisy illumination profile structure.') if debug: plt.scatter(spat_coo[masked_flat.mask], norm_flat[masked_flat.mask], marker='.', s=10, color='C3', lw=0) @@ -738,7 +738,7 @@ def tweak_slit_edges_threshold(left, right, spat_coo, norm_flat, thresh=0.93, ma plt.scatter(spat_coo[i], norm_flat[i], marker='o', facecolor='none', s=50, color='C1') plt.show() if norm_flat[i-1] < right_thresh: - msgs.warn('Right slit boundary tweak limited by maximum allowed shift: {:.1f}%'.format( + msgs.warning('Right slit boundary tweak limited by maximum allowed shift: {:.1f}%'.format( 100*maxfrac)) right_shift = maxfrac else: @@ -777,9 +777,9 @@ def flatfield(sciframe, flatframe, varframe=None): science frame. """ if flatframe.shape != sciframe.shape: - msgs.error('Shape of flat frame does not match science frame.') + raise PypeItError('Shape of flat frame does not match science frame.') if varframe is not None and varframe.shape != sciframe.shape: - msgs.error('Shape of variance frame does not match science frame.') + raise PypeItError('Shape of variance frame does not match science frame.') # New image retframe = np.zeros_like(sciframe) diff --git a/pypeit/core/flexure.py b/pypeit/core/flexure.py index 48e6253372..01f385e99b 100644 --- a/pypeit/core/flexure.py +++ b/pypeit/core/flexure.py @@ -111,7 +111,7 @@ def spat_flexure_shift(sciimg, slits, bpm=None, maxlag=20, sigdetect=10., debug= _, _, pix_max, _, _, _, _, _ = arc.detect_lines(xcorr_max, cont_subtract=False, input_thresh=0., nfind=1, debug=debug) # No peak? -- e.g. data fills the entire detector if (len(pix_max) == 0) or pix_max[0] == -999.0: - msgs.warn('No peak found in the x-correlation between the traced slits and the science/calib image.' + msgs.warning('No peak found in the x-correlation between the traced slits and the science/calib image.' ' Assuming there is NO SPATIAL FLEXURE.'+msgs.newline() + 'If a flexure is expected, ' 'consider either changing the maximum lag for the cross-correlation, ' 'or the "spat_flexure_sigdetect" parameter, or use the manual flexure correction.') @@ -178,7 +178,7 @@ def spat_flexure_qa(img, slits, shift, gpm=None, vrange=None, outfile=None): # check that vrange is a tuple if vrange is not None and not isinstance(vrange, tuple): - msgs.warn('vrange must be a tuple with the min and max values for the imshow plot. Ignoring vrange.') + msgs.warning('vrange must be a tuple with the min and max values for the imshow plot. Ignoring vrange.') vrange = None # TODO: should we use initial or tweaked slits in this plot? @@ -347,9 +347,9 @@ def spec_flex_shift(obj_skyspec, sky_file=None, arx_skyspec=None, arx_fwhm_pix=N # Check input mode if sky_file is None and arx_skyspec is None: - msgs.error("sky_file or arx_skyspec must be provided") + raise PypeItError("sky_file or arx_skyspec must be provided") elif sky_file is not None and arx_skyspec is not None: - msgs.warn("sky_file and arx_skyspec both provided. Using arx_skyspec.") + msgs.warning("sky_file and arx_skyspec both provided. Using arx_skyspec.") sky_file = None # Arxiv sky spectrum @@ -362,7 +362,7 @@ def spec_flex_shift(obj_skyspec, sky_file=None, arx_skyspec=None, arx_fwhm_pix=N msgs.info("Computing the spectral FWHM for the provided arxiv sky spectrum") arx_fwhm_pix = autoid.measure_fwhm(arx_skyspec.flux.value, sigdetect=4., fwhm=4.) if arx_fwhm_pix is None: - msgs.error('Failed to measure the spectral FWHM of the archived sky spectrum. ' + raise PypeItError('Failed to measure the spectral FWHM of the archived sky spectrum. ' 'Not enough sky lines detected. Provide a value using arx_fwhm_pix') # initialize smooth_fwhm_pix @@ -374,7 +374,7 @@ def spec_flex_shift(obj_skyspec, sky_file=None, arx_skyspec=None, arx_fwhm_pix=N if smooth_fwhm_pix is None: # smooth_fwhm_pix is None if spec_fwhm_pix<0, i.e., the wavelength calibration is bad - msgs.warn('No flexure correction could be computed for this slit/object') + msgs.warning('No flexure correction could be computed for this slit/object') return None if smooth_fwhm_pix > 0: @@ -392,7 +392,7 @@ def spec_flex_shift(obj_skyspec, sky_file=None, arx_skyspec=None, arx_fwhm_pix=N # Rebin both spectra onto overlapped wavelength range if len(keep_idx) <= 50: - msgs.warn("Not enough overlap between sky spectra") + msgs.warning("Not enough overlap between sky spectra") return None # rebin onto object ALWAYS @@ -422,14 +422,14 @@ def spec_flex_shift(obj_skyspec, sky_file=None, arx_skyspec=None, arx_fwhm_pix=N norm = np.sum(obj_skyspec_flux)/obj_skyspec.npix norm2 = np.sum(arx_skyspec.flux.value)/arx_skyspec.npix if norm <= 0: - msgs.warn("Bad normalization of object in flexure algorithm") - msgs.warn("Will try the median") + msgs.warning("Bad normalization of object in flexure algorithm") + msgs.warning("Will try the median") norm = np.median(obj_skyspec_flux) if norm <= 0: - msgs.warn("Improper sky spectrum for flexure. Is it too faint??") + msgs.warning("Improper sky spectrum for flexure. Is it too faint??") return None if norm2 <= 0: - msgs.warn('Bad normalization of archive in flexure. You are probably using wavelengths ' + msgs.warning('Bad normalization of archive in flexure. You are probably using wavelengths ' 'well beyond the archive.') return None obj_skyspec_flux = obj_skyspec_flux / norm @@ -475,11 +475,11 @@ def spec_flex_shift(obj_skyspec, sky_file=None, arx_skyspec=None, arx_fwhm_pix=N # We use the int of abs(shift) to avoid to trigger the error/warning for differences <1pixel # TODO :: I'm not convinced that we need int here... if int(abs(shift)) > mxshft: - msgs.warn(f"Computed shift {shift:.1f} pix is " + msgs.warning(f"Computed shift {shift:.1f} pix is " f"larger than specified maximum {mxshft} pix.") if excess_shft == "crash": - msgs.error(f"Flexure compensation failed for one of your{msgs.newline()}" + raise PypeItError(f"Flexure compensation failed for one of your{msgs.newline()}" f"objects. Either adjust the \"spec_maxshift\"{msgs.newline()}" f"FlexurePar Keyword, or see the flexure documentation{msgs.newline()}" f"for information on how to bypass this error using the{msgs.newline()}" @@ -487,21 +487,21 @@ def spec_flex_shift(obj_skyspec, sky_file=None, arx_skyspec=None, arx_fwhm_pix=N "https://pypeit.readthedocs.io/en/release/flexure.html") elif excess_shft == "set_to_zero": - msgs.warn("Flexure compensation failed for one of your objects.") - msgs.warn("Setting the flexure correction shift to 0 pixels.") + msgs.warning("Flexure compensation failed for one of your objects.") + msgs.warning("Setting the flexure correction shift to 0 pixels.") # Return the usual dictionary, but with a shift == 0 shift = 0.0 elif excess_shft == "continue": - msgs.warn("Applying flexure shift larger than specified max!") + msgs.warning("Applying flexure shift larger than specified max!") elif excess_shft == "use_median": - msgs.warn("Will try to use a flexure shift from other slit/object. " + msgs.warning("Will try to use a flexure shift from other slit/object. " "If not available, flexure correction will not be applied.") return None else: - msgs.error(f"FlexurePar Keyword excessive_shift = \"{excess_shft}\" " + raise PypeItError(f"FlexurePar Keyword excessive_shift = \"{excess_shft}\" " "not recognized.") msgs.info(f"Flexure correction of {shift:.3f} pixels") @@ -509,7 +509,7 @@ def spec_flex_shift(obj_skyspec, sky_file=None, arx_skyspec=None, arx_fwhm_pix=N fit = fitting.PypeItFit(xval=subpix_grid, yval=0.0*subpix_grid, func='polynomial', order=np.atleast_1d(2)) fit.fit() - msgs.warn('Flexure compensation failed for one of your objects') + msgs.warning('Flexure compensation failed for one of your objects') return None return dict(polyfit=fit, shift=shift, subpix=subpix_grid, @@ -559,7 +559,7 @@ def get_fwhm_gauss_smooth(arx_skyspec, obj_skyspec, arx_fwhm_pix, spec_fwhm_pix= spec_fwhm_pix = autoid.measure_fwhm(obj_skyspec.flux.value, sigdetect=4., fwhm=4.) msgs.info('Measuring spectral FWHM using the boxcar extracted sky spectrum.') if spec_fwhm_pix is None: - msgs.warn('Failed to measure the spectral FWHM using the boxcar extracted sky spectrum. ' + msgs.warning('Failed to measure the spectral FWHM using the boxcar extracted sky spectrum. ' 'Not enough sky lines detected.') return None # object sky spectral dispersion (Angstrom/pixel) @@ -575,7 +575,7 @@ def get_fwhm_gauss_smooth(arx_skyspec, obj_skyspec, arx_fwhm_pix, spec_fwhm_pix= msgs.info(f"Resolution (FWHM) of Archive={arx_fwhm:.2f} Ang and Observation={spec_fwhm:.2f} Ang") if spec_fwhm <= 0: - msgs.warn('Negative spectral FWHM, likely due to a bad wavelength calibration.') + msgs.warning('Negative spectral FWHM, likely due to a bad wavelength calibration.') return None # Determine fwhm of the smoothing gaussian @@ -588,9 +588,9 @@ def get_fwhm_gauss_smooth(arx_skyspec, obj_skyspec, arx_fwhm_pix, spec_fwhm_pix= smooth_fwhm = np.sqrt(obj_med_fwhm2-arx_med_fwhm2) # Ang smooth_fwhm_pix = smooth_fwhm / arx_disp else: - msgs.warn("Prefer archival sky spectrum to have higher resolution") + msgs.warning("Prefer archival sky spectrum to have higher resolution") smooth_fwhm_pix = 0. - msgs.warn("New Sky has higher resolution than Archive. Not smoothing") + msgs.warning("New Sky has higher resolution than Archive. Not smoothing") return smooth_fwhm_pix @@ -686,7 +686,7 @@ def spec_flex_shift_global(slit_specs, islit, sky_file, empty_flex_dict, else: # No success, come back to it later return_later_slits.append(islit) - msgs.warn("Flexure shift calculation failed for this slit.") + msgs.warning("Flexure shift calculation failed for this slit.") msgs.info("Will come back to this slit to attempt " "to use saved estimates from other slits") @@ -790,13 +790,13 @@ def spec_flex_shift_local(slits, slitord, specobjs, islit, sky_file, empty_flex_ else: # No success, come back to it later return_later_sobjs.append(ss) - msgs.warn("Flexure shift calculation failed for this spectrum.") + msgs.warning("Flexure shift calculation failed for this spectrum.") msgs.info("Will come back to this spectrum to attempt " "to use saved estimates from other slits/objects") # Check if we need to go back if (len(return_later_sobjs) > 0) and (len(flex_dict['shift']) > 0): - msgs.warn(f'Flexure shift calculation failed for {len(return_later_sobjs)} ' + msgs.warning(f'Flexure shift calculation failed for {len(return_later_sobjs)} ' f'object(s) in slit {slits.spat_id[islit]}') # get the median shift among all objects in this slit idx_med_shift = np.where(flex_dict['shift'] == np.percentile(flex_dict['shift'], 50, @@ -929,13 +929,13 @@ def spec_flexure_slit(slits, slitord, slit_bpm, sky_file, method="boxcar", speco # Check if we need to go back to some failed slits if len(return_later_slits) > 0: - msgs.warn(f'Flexure shift calculation failed for {len(return_later_slits)} slits') + msgs.warning(f'Flexure shift calculation failed for {len(return_later_slits)} slits') # take the median value to deal with the cases when there are more than one shift per slit (e.g., local flexure) saved_shifts = np.array([np.percentile(flex['shift'], 50, method='nearest') if len(flex['shift']) > 0 else None for flex in flex_list]) if np.all(saved_shifts == None): # If all the elements in saved_shifts are None means that there are no saved shifts available - msgs.warn(f'No previously saved flexure shift estimates available. ' + msgs.warning(f'No previously saved flexure shift estimates available. ' f'Flexure corrections cannot be performed.') for islit in range(slits.nslits): # we append an empty dictionary @@ -1083,14 +1083,14 @@ def get_archive_spectrum(sky_file, obj_skyspec=None, spec_fwhm_pix=None): # get arxiv sky spectrum resolution (FWHM in pixels) arx_fwhm_pix = autoid.measure_fwhm(sky_spectrum.flux.value, sigdetect=4., fwhm=4.) if arx_fwhm_pix is None: - msgs.error('Failed to measure the spectral FWHM of the archived sky spectrum. ' + raise PypeItError('Failed to measure the spectral FWHM of the archived sky spectrum. ' 'Not enough sky lines detected.') elif obj_skyspec is not None: if spec_fwhm_pix is None: # measure spec_fwhm_pix spec_fwhm_pix = autoid.measure_fwhm(obj_skyspec.flux.value, sigdetect=4., fwhm=4.) if spec_fwhm_pix is None: - msgs.warn('Failed to measure the spectral FWHM using the boxcar extracted sky spectrum. ' + msgs.warning('Failed to measure the spectral FWHM using the boxcar extracted sky spectrum. ' 'Choose one of the provided sky files.') # get the spectral resolution of obj_skyspec # obj_skyspec spectral dispersion (Angstrom/pixel) @@ -1109,7 +1109,7 @@ def get_archive_spectrum(sky_file, obj_skyspec=None, spec_fwhm_pix=None): sky_spectrum = xspectrum1d.XSpectrum1D.from_tuple((wave_sky, flux_sky)) arx_fwhm_pix = spec_fwhm_pix else: - msgs.error('Archived sky spectrum cannot be loaded. ') + raise PypeItError('Archived sky spectrum cannot be loaded. ') return sky_spectrum, arx_fwhm_pix @@ -1289,7 +1289,7 @@ def spec_flexure_qa(slitords, bpm, basename, flex_list, dwv = 20.*units.AA gdsky = np.where((sky_lines > min_wave) & (sky_lines < max_wave))[0] if len(gdsky) == 0: - msgs.warn("No sky lines for Flexure QA") + msgs.warning("No sky lines for Flexure QA") continue if len(gdsky) > 6: idx = np.array([0, 1, len(gdsky)//2, len(gdsky)//2+1, -2, -1]) @@ -1379,10 +1379,10 @@ def calculate_image_phase(imref, imshift, gpm_ref=None, gpm_shift=None, maskval= try: from skimage.registration import optical_flow_tvl1, phase_cross_correlation except ImportError: - msgs.warn("scikit-image is not installed. Adopting a basic image cross-correlation") + msgs.warning("scikit-image is not installed. Adopting a basic image cross-correlation") return calculate_image_offset(imref, imshift) if imref.shape != imshift.shape: - msgs.warn("Input images shapes are not equal. Adopting a basic image cross-correlation") + msgs.warning("Input images shapes are not equal. Adopting a basic image cross-correlation") return calculate_image_offset(imref, imshift) # Set the masks if gpm_ref is None: @@ -1525,7 +1525,7 @@ def sky_em_residuals(wave:np.ndarray, flux:np.ndarray, p, pcov = fitting.fit_gauss(wave[mw], flux[mw], w_out=1./np.sqrt(ivar[mw]), guesses=p0, nparam=4) except RuntimeError as e: - msgs.warn('First attempt at Gaussian fit failed, ending with RuntimeError. Original ' + msgs.warning('First attempt at Gaussian fit failed, ending with RuntimeError. Original ' f'exception: {e.args[0]} Assuming this is because it hit the maximum ' 'number of function evaluations. Trying again with a maximum of 10000.') # Try again with larger limit on the number of function evaluations @@ -1831,7 +1831,7 @@ def update_fit(self): all_sky[mm] = m all_ivar[mm] = 1e6 if (np.sum(mm) > 10): - msgs.warn('Removing more than 10 pixels of data') + msgs.warning('Removing more than 10 pixels of data') _,diff,diff_err,_,_ = sky_em_residuals(all_wave, all_sky, all_ivar, self.sky_table['Wave']) diff --git a/pypeit/core/flux_calib.py b/pypeit/core/flux_calib.py index 9827263dc7..19c994efc5 100644 --- a/pypeit/core/flux_calib.py +++ b/pypeit/core/flux_calib.py @@ -170,7 +170,7 @@ def find_standard_file(ra, dec, toler=20.*units.arcmin, check=False, to_pkg=None stds_path = dataPaths.standards / sset # This creates a new PypeItDataPath object star_file = stds_path.get_file_path(f"{sset}_info.txt") if not star_file.is_file(): - msgs.warn(f"File does not exist!: {star_file}") + msgs.warning(f"File does not exist!: {star_file}") continue star_tbl = table.Table.read(star_file, comment='#', format='ascii') @@ -221,7 +221,7 @@ def find_standard_file(ra, dec, toler=20.*units.arcmin, check=False, to_pkg=None elif sset == 'esofil': # NOTE: `cal_file` is a pathlib.Path object if not cal_file.name.startswith('f'): - msgs.error('The ESO reference standard filename must start with the string ' + raise PypeItError('The ESO reference standard filename must start with the string ' '`f`; make sure it is the case. Also make sure that the flux ' 'units in the file are in 10**(-16) erg/s/cm2/AA.') # TODO let's add the star_mag here and get a uniform set of tags in the std_dict @@ -262,7 +262,7 @@ def find_standard_file(ra, dec, toler=20.*units.arcmin, check=False, to_pkg=None std_dict['wave'] = waves * units.AA std_dict['flux'] = flam * units.erg / units.s / units.cm ** 2 / units.AA else: - msgs.error(f'Do not know how to parse {sset} file.') + raise PypeItError(f'Do not know how to parse {sset} file.') msgs.info("Fluxes are flambda, normalized to 1e-17") return std_dict @@ -283,7 +283,7 @@ def find_standard_file(ra, dec, toler=20.*units.arcmin, check=False, to_pkg=None if check: return False - msgs.error(f"No standard star was found within a tolerance of {toler}{msgs.newline()}" + raise PypeItError(f"No standard star was found within a tolerance of {toler}{msgs.newline()}" f"Closest standard was {closest['name']} at separation {closest['sep'].to('arcmin')}") return None @@ -461,7 +461,7 @@ def get_standard_spectrum(star_type=None, star_mag=None, ra=None, dec=None): std_dict['std_ra'] = ra std_dict['std_dec'] = dec else: - msgs.error('Insufficient information provided for fluxing. ' + raise PypeItError('Insufficient information provided for fluxing. ' 'Either the coordinates of the standard or a stellar type and magnitude are needed.') return std_dict @@ -507,15 +507,15 @@ def load_extinction_data(longitude, latitude, extinctfilepar, msgs.info(f"Using {extinct_file} for extinction corrections.") else: # Crash with a helpful error message - msgs.warn(f"No observatory extinction file was found within {toler}{msgs.newline()}" + msgs.warning(f"No observatory extinction file was found within {toler}{msgs.newline()}" f"of observation at lon = {longitude:.1f} lat = {latitude:.1f} You may{msgs.newline()}" f"select an included extinction file (e.g., KPNO) for use by{msgs.newline()}" f"adding the following to the Sensitivity Input File{msgs.newline()}" "(for pypeit_sensfunc):") msgs.pypeitpar(['sensfunc', 'UVIS', 'extinct_file = kpnoextinct.dat']) - msgs.warn("or the following to the Flux File (for pypeit_flux_calib):") + msgs.warning("or the following to the Flux File (for pypeit_flux_calib):") msgs.pypeitpar(['fluxcalib', 'extinct_file = kpnoextinct.dat']) - msgs.error(f"See instructions at{msgs.newline()}" + raise PypeItError(f"See instructions at{msgs.newline()}" f"https://pypeit.readthedocs.io/en/latest/fluxing.html#extinction-correction{msgs.newline()}" f"for using extinction files and how to install a user-supplied{msgs.newline()}" "file, if desired.") @@ -559,7 +559,7 @@ def extinction_correction(wave, airmass, extinct): """ # Checks if airmass < 1.: - msgs.error("Bad airmass value in extinction_correction") + raise PypeItError("Bad airmass value in extinction_correction") # Interpolate f_mag_ext = interpolate.interp1d(extinct['wave'], extinct['mag_ext'], bounds_error=False, fill_value=0.) @@ -569,13 +569,13 @@ def extinction_correction(wave, airmass, extinct): gdv = np.where(mag_ext > 0.)[0] if len(gdv) == 0: - msgs.warn("No valid extinction data available at this wavelength range. Extinction correction not applied") + msgs.warning("No valid extinction data available at this wavelength range. Extinction correction not applied") elif gdv[0] != 0: # Low wavelengths mag_ext[0:gdv[0]] = mag_ext[gdv[0]] - msgs.warn("Extrapolating at low wavelengths using last valid value") + msgs.warning("Extrapolating at low wavelengths using last valid value") elif gdv[-1] != (mag_ext.size - 1): # High wavelengths mag_ext[gdv[-1] + 1:] = mag_ext[gdv[-1]] - msgs.warn("Extrapolating at high wavelengths using last valid value") + msgs.warning("Extrapolating at high wavelengths using last valid value") else: msgs.info("Extinction data covered the whole spectra. Applying correction...") # Evaluate @@ -805,10 +805,10 @@ def get_sensfunc_factor(wave, wave_zp, zeropoint, exptime, tellmodel=None, delta _delta_wave = delta_wave elif isinstance(delta_wave, np.ndarray): if wave.size != delta_wave.size: - msgs.error('The wavelength vector and delta_wave vector must be the same size') + raise PypeItError('The wavelength vector and delta_wave vector must be the same size') _delta_wave = delta_wave else: - msgs.warn('Invalid type for delta_wave - using a default value') + msgs.warning('Invalid type for delta_wave - using a default value') _delta_wave = wvutils.get_delta_wave(wave, wave_mask) else: # If delta_wave is not passed in, then we will use the native wavelength sampling of the spectrum @@ -824,12 +824,12 @@ def get_sensfunc_factor(wave, wave_zp, zeropoint, exptime, tellmodel=None, delta if extrap_sens: zeropoint_obs[wave_mask] \ = interpolate.interp1d(wave_zp, zeropoint, bounds_error=False)(wave[wave_mask]) - msgs.warn("Your data extends beyond the bounds of your sensfunc. You should be " + msgs.warning("Your data extends beyond the bounds of your sensfunc. You should be " "adjusting the par['sensfunc']['extrap_blu'] and/or " "par['sensfunc']['extrap_red'] to extrapolate further and recreate your " "sensfunc. But we are extrapolating per your direction. Good luck!") else: - msgs.error("Your data extends beyond the bounds of your sensfunc. " + msgs.newline() + + raise PypeItError("Your data extends beyond the bounds of your sensfunc. " + msgs.newline() + "Adjust the par['sensfunc']['extrap_blu'] and/or " "par['sensfunc']['extrap_red'] to extrapolate further and recreate " "your sensfunc.") @@ -842,16 +842,16 @@ def get_sensfunc_factor(wave, wave_zp, zeropoint, exptime, tellmodel=None, delta # Did the user request a telluric correction? if tellmodel is not None: # This assumes there is a separate telluric key in this dict. - #msgs.warn("Telluric corrections via this method are deprecated") + #msgs.warning("Telluric corrections via this method are deprecated") msgs.info('Applying telluric correction') sensfunc_obs = sensfunc_obs * (tellmodel > 1e-10) / (tellmodel + (tellmodel < 1e-10)) if extinct_correct: if longitude is None or latitude is None: - msgs.error('You must specify longitude and latitude if we are extinction correcting') + raise PypeItError('You must specify longitude and latitude if we are extinction correcting') # Apply Extinction if optical bands msgs.info("Applying extinction correction") - msgs.warn("Extinction correction applied only if the spectra covers <10000Ang.") + msgs.warning("Extinction correction applied only if the spectra covers <10000Ang.") extinct = load_extinction_data(longitude, latitude, extinctfilepar) ext_corr = extinction_correction(wave * units.AA, airmass, extinct) senstot = sensfunc_obs * ext_corr @@ -977,7 +977,7 @@ def fit_zeropoint(wave, Nlam_star, Nlam_ivar_star, gpm_star, std_dict, # Do we need to extrapolate? TODO Replace with a model or a grey body? ## TODO This is an ugly hack. Why are we only triggering this if the extrapolated star is negative. if np.min(flux_true) <= 0.: - msgs.warn('Your spectrum extends beyond calibrated standard star, extrapolating the spectra with polynomial.') + msgs.warning('Your spectrum extends beyond calibrated standard star, extrapolating the spectra with polynomial.') pypeitFit = fitting.robust_fit(std_dict['wave'].value, std_dict['flux'].value,8,function='polynomial', maxiter=50, lower=3.0, upper=3.0, maxrej=3, grow=0, sticky=True, use_mad=True) @@ -1514,7 +1514,7 @@ def standard_zeropoint(wave, Nlam, Nlam_ivar, Nlam_gpm, flam_true, mask_recomb=N Good pixel mask for fitted sensitivity function with same shape as wave (nspec,) """ if np.any(np.logical_not(np.isfinite(Nlam_ivar))): - msgs.warn("NaN are present in the inverse variance") + msgs.warning("NaN are present in the inverse variance") ivar_bpm = np.logical_not(np.isfinite(Nlam_ivar) & (Nlam_ivar > 0)) # check masks @@ -1566,7 +1566,7 @@ def standard_zeropoint(wave, Nlam, Nlam_ivar, Nlam_gpm, flam_true, mask_recomb=N zeropoint_clean[ivar_bpm] = zeropoint_poly[ivar_bpm] else: ## if half more than half of your spectrum is masked (or polycorrect=False) then do not correct it with polyfit - msgs.warn('No polynomial corrections performed on Hydrogen Recombination line regions') + msgs.warning('No polynomial corrections performed on Hydrogen Recombination line regions') # ToDo # Compute an effective resolution for the standard. This could be improved @@ -1578,8 +1578,8 @@ def standard_zeropoint(wave, Nlam, Nlam_ivar, Nlam_gpm, flam_true, mask_recomb=N std_pix = np.median(np.abs(wave[zeropoint_data_gpm] - np.roll(wave[zeropoint_data_gpm], 1))) std_res = np.median(wave[zeropoint_data_gpm]/resolution) # median resolution in units of Angstrom. if (nresln * std_res) < std_pix: - msgs.warn("Bspline breakpoints spacing shoud be larger than 1pixel") - msgs.warn("Changing input nresln to fix this") + msgs.warning("Bspline breakpoints spacing shoud be larger than 1pixel") + msgs.warning("Changing input nresln to fix this") nresln = std_res / std_pix # Output some helpful information for double-checking input params are correct @@ -1640,7 +1640,7 @@ def standard_zeropoint(wave, Nlam, Nlam_ivar, Nlam_gpm, flam_true, mask_recomb=N else: ## if half more than half of your spectrum is masked (or polycorrect=False) then do not correct it with polyfit zeropoint_bspl_clean = zeropoint_bspl.copy() - msgs.warn('No polynomial corrections performed on Hydrogen Recombination line regions') + msgs.warning('No polynomial corrections performed on Hydrogen Recombination line regions') # Calculate zeropoint zeropoint_fit = zeropoint_poly if polyfunc else zeropoint_bspl_clean @@ -1677,7 +1677,7 @@ def load_filter_file(filter): # Check if filter not in allowed_options: - msgs.error("PypeIt is not ready for filter = {}".format(filter)) + raise PypeItError("PypeIt is not ready for filter = {}".format(filter)) trans_file = dataPaths.filters.get_file_path('filtercurves.fits') trans = io.fits_open(trans_file) @@ -1752,7 +1752,7 @@ def scale_in_filter(wave, flux, gpm, scale_dict): scale = np.power(10.0,(Dm/2.5)) msgs.info("Scaling spectrum by {}".format(scale)) else: - msgs.error("Bad magnitude type") + raise PypeItError("Bad magnitude type") return scale diff --git a/pypeit/core/framematch.py b/pypeit/core/framematch.py index 94200c405f..0d4a8bcc13 100644 --- a/pypeit/core/framematch.py +++ b/pypeit/core/framematch.py @@ -96,9 +96,9 @@ def valid_frametype(frametype, quiet=False, raise_error=False): if not good_frametype: _f = None if not quiet and not raise_error: - _f = msgs.warn + _f = msgs.warning elif raise_error: - _f = msgs.error + _f = raise PypeItError if _f is not None: _f(f'{frametype} is not a valid PypeIt frame type.') return good_frametype @@ -174,9 +174,9 @@ def check_frame_exptime(exptime, exprng): # # Check here that there are more than 1 files and that the # # number of files is even # if len(files) == 1: -# msgs.warn('Cannot perform ABBA reduction on targets with 1 file') +# msgs.warning('Cannot perform ABBA reduction on targets with 1 file') # elif len(files) % 2 != 0: -# msgs.warn('Expected an even number of files associated with target ' + key) +# msgs.warning('Expected an even number of files associated with target ' + key) # # # TODO: Check for increasing time? Files are read in numerical # # sequential order -- should be in order of increasing time @@ -204,18 +204,18 @@ def check_frame_exptime(exptime, exprng): # BB_sep = ABBA_coords[1].separation(ABBA_coords[2]).arcsec # if AA_sep > max_nod_sep or BB_sep > max_nod_sep: # if AA_sep > max_nod_sep: -# msgs.warn('Separation between 1st and 4th frame in presumed ABBA sequence ' +# msgs.warning('Separation between 1st and 4th frame in presumed ABBA sequence ' # 'have a large separation ({0}).'.format(AA_sep)) # if BB_sep > max_nod_sep: -# msgs.warn('Separation between 2nd and 3rd frame in presumed ABBA sequence ' +# msgs.warning('Separation between 2nd and 3rd frame in presumed ABBA sequence ' # 'have a large separation ({0}).'.format(BB_sep)) -# msgs.warn('Check ABBA identification for target {0} group {1}:'.format( +# msgs.warning('Check ABBA identification for target {0} group {1}:'.format( # target, group) + msgs.newline() + 'A:' + file_groups[group][0] # + msgs.newline() + 'B:' + file_groups[group][1] # + msgs.newline() + 'B:' + file_groups[group][2] # + msgs.newline() + 'A:' + file_groups[group][3]) # else: -# msgs.error('BUG: This should never be reached.') +# raise PypeItError('BUG: This should never be reached.') # # # Flip group from ABBA to BABA, or AB to BA # AB_idx_flip = np.copy(value_groups[group]) diff --git a/pypeit/core/gui/edge_inspector.py b/pypeit/core/gui/edge_inspector.py index d0df02b1aa..2f1b2b9f0f 100644 --- a/pypeit/core/gui/edge_inspector.py +++ b/pypeit/core/gui/edge_inspector.py @@ -67,7 +67,7 @@ def __init__(self, edges): if self.edges.par['left_right_pca'] \ else self.edges.pca.reference_row else: - msgs.warn('Edges object does not include a PCA decomposition of the traces.') + msgs.warning('Edges object does not include a PCA decomposition of the traces.') self.reference_row = self.edges.nspec // 2 # NOTE: line properties match what is used for the Pointer self.ref_row_line = image_ax.axhline(self.reference_row, color='C1', lw=0.5) @@ -170,7 +170,7 @@ def update_traces(self, *args): if np.any(_remove) or np.any(_add): success = self.edges.sync() if not success: - msgs.warn('Unable to synchronize left-right traces!') + msgs.warning('Unable to synchronize left-right traces!') # Remove the trace lines from the plot # TODO: There may be an easier way to do this, but I couldn't find it. diff --git a/pypeit/core/gui/identify.py b/pypeit/core/gui/identify.py index b59153384a..a048866dec 100644 --- a/pypeit/core/gui/identify.py +++ b/pypeit/core/gui/identify.py @@ -258,14 +258,14 @@ def initialise(cls, arccen, lamps, slits, slit=0, par=None, wv_calib_all=None, if wv_calib_all is not None: wv_calib = wv_calib_all.wv_fits[slit] if wv_calib.spat_id != slits.spat_id[slit]: - msgs.warn("Wavelength calibration slits did not match!") + msgs.warning("Wavelength calibration slits did not match!") msgs.info("Best-fitting wavelength solution will not be loaded.") wv_calib = None msgs.info(f"Loading lamps from wavelength solution: {wv_calib_all.lamps}") lamps = wv_calib_all.lamps.split(",") # Must specify `wv_calib = None` otherwise else: - msgs.warn("No wavelength calibration supplied!") + msgs.warning("No wavelength calibration supplied!") msgs.info("No wavelength solution will be loaded.") wv_calib = None @@ -726,12 +726,12 @@ def make_order_vec(self, order_str): """ mtch = re.search(r"(\d+):(\d+)", order_str) if mtch is None: - msgs.warn(f"Input string {order_str} is not in the correct format, e.g. (45:122)") + msgs.warning(f"Input string {order_str} is not in the correct format, e.g. (45:122)") return None start_order = int(mtch.groups()[0]) end_order = int(mtch.groups()[1]) if start_order > end_order: - msgs.warn(f"The start order {start_order} must be less than the end order {end_order}") + msgs.warning(f"The start order {start_order} must be less than the end order {end_order}") return None order_vec = np.arange(start_order, end_order+1) return order_vec @@ -792,7 +792,7 @@ def store_solution(self, final_fit, binspec, rmstol=0.15, self.save_IDs() # Solution if 'rms' not in final_fit.keys(): - msgs.warn("No wavelength solution available") + msgs.warning("No wavelength solution available") return elif final_fit['rms'] < rmstol or multi: ans = '' @@ -815,7 +815,7 @@ def store_solution(self, final_fit, binspec, rmstol=0.15, #better try again... Return to the start of the loop continue if len(order_vec) != len(wvcalib.wv_fits): - msgs.warn(f'The number of orders in this list, {order_vec} '+msgs.newline()+ + msgs.warning(f'The number of orders in this list, {order_vec} '+msgs.newline()+ f'does not match the number of traces: {len(wvcalib.wv_fits)}' + msgs.newline() + 'Please try again...') continue @@ -838,11 +838,11 @@ def store_solution(self, final_fit, binspec, rmstol=0.15, if multi: # check that specdata is defined if specdata_multi is None: - msgs.warn('Skipping arxiv save because arc line spectra are not defined by pypeit/scripts/identify.py') + msgs.warning('Skipping arxiv save because arc line spectra are not defined by pypeit/scripts/identify.py') # check that the number of spectra in specdata is the same as the number of wvcalib solutions elif specdata_multi is not None and np.shape(specdata_multi)[0] != len(wvcalib.wv_fits): - msgs.warn('Skipping arxiv save because there are not enough orders for full template') - msgs.warn('To generate a valid arxiv to save, please rerun with the "--slits all" option.') + msgs.warning('Skipping arxiv save because there are not enough orders for full template') + msgs.warning('To generate a valid arxiv to save, please rerun with the "--slits all" option.') else: norder = np.shape(specdata_multi)[0] wavelengths = np.copy(specdata_multi) @@ -850,7 +850,7 @@ def store_solution(self, final_fit, binspec, rmstol=0.15, if fits_dicts is not None: fitdict = fits_dicts[iord] else: - msgs.warn('skipping saving fits because fits_dicts is not defined by pypeit/scripts/identify.py') + msgs.warning('skipping saving fits because fits_dicts is not defined by pypeit/scripts/identify.py') fitdict = None if fitdict is not None and fitdict['full_fit'] is not None: wavelengths[iord,:] = fitdict['full_fit'].eval(np.arange(specdata_multi[iord,:].size) / @@ -894,7 +894,7 @@ def store_solution(self, final_fit, binspec, rmstol=0.15, if not force_save: while ow_wvcalib != 'y' and ow_wvcalib != 'n': print('') - msgs.warn('Do you want to overwrite existing Calibrations/WaveCalib*.fits file? ' + msgs.newline() + + msgs.warning('Do you want to overwrite existing Calibrations/WaveCalib*.fits file? ' + msgs.newline() + 'NOTE: To use this WaveCalib file the user will need to delete the other files in Calibrations/ ' + msgs.newline() + ' and re-run run_pypeit. ') print('') diff --git a/pypeit/core/gui/skysub_regions.py b/pypeit/core/gui/skysub_regions.py index b8e268197c..67b58dfd24 100644 --- a/pypeit/core/gui/skysub_regions.py +++ b/pypeit/core/gui/skysub_regions.py @@ -586,7 +586,7 @@ def get_result(self): # Generate the mask inmask = skysub.generate_mask(self.pypeline, self._skyreg, self.slits, self.slits_left, self.slits_right) if np.all(np.logical_not(inmask)): - msgs.warn("Sky regions are empty - A sky regions calibration frame will not be generated") + msgs.warning("Sky regions are empty - A sky regions calibration frame will not be generated") return None # Build the Sky Regions calibration frame @@ -603,7 +603,7 @@ def get_outname(self): outfil = self._outname if os.path.exists(self._outname) and not self._overwrite: outfil = 'temp.fits' - msgs.warn(f"A SkyRegions file already exists and you have not forced an overwrite:\n{self._outname}") + msgs.warning(f"A SkyRegions file already exists and you have not forced an overwrite:\n{self._outname}") msgs.info(f"Adopting the following output filename: {outfil}") return outfil diff --git a/pypeit/core/mosaic.py b/pypeit/core/mosaic.py index dd18c627d1..7c09378c16 100644 --- a/pypeit/core/mosaic.py +++ b/pypeit/core/mosaic.py @@ -77,11 +77,11 @@ def build_image_mosaic_transform(shape, shift, rotation=0., binning=(1.,1.)): :func:`pypeit.core.transform.affine_transform_series`. """ if len(shape) != 2: - msgs.error('Shape must be a two-tuple.') + raise PypeItError('Shape must be a two-tuple.') if len(shift) != 2: - msgs.error('Shift must be a two-tuple.') + raise PypeItError('Shift must be a two-tuple.') if len(binning) != 2: - msgs.error('Binning must be a two-tuple.') + raise PypeItError('Binning must be a two-tuple.') tform = [] if np.absolute(rotation) > 0: @@ -150,7 +150,7 @@ def prepare_mosaic(shape, tforms, buffer=0, inplace=False): # Set the mosaic image shape if buffer < 0: - msgs.error('Mosaic image buffer must be >= 0.') + raise PypeItError('Mosaic image buffer must be >= 0.') mosaic_shape = tuple(*np.ceil(np.diff(box, axis=0) + 2*buffer).astype(int)) # Adjust the image transformations to be within the limits of the mosaic @@ -257,22 +257,22 @@ def build_image_mosaic(imgs, tforms, ivar=None, bpm=None, mosaic_shape=None, cva # Check the input nimg = len(imgs) if len(tforms) != nimg: - msgs.error('Number of image transformations does not match number of images to mosaic.') + raise PypeItError('Number of image transformations does not match number of images to mosaic.') if ivar is not None and len(ivar) != nimg: - msgs.error('If providing any, must provide inverse-variance for each image in the mosaic.') + raise PypeItError('If providing any, must provide inverse-variance for each image in the mosaic.') if bpm is not None and len(bpm) != nimg: - msgs.error('If providing any, must provide bad-pixel masks for each image in the mosaic.') + raise PypeItError('If providing any, must provide bad-pixel masks for each image in the mosaic.') if overlap not in ['combine', 'error']: - msgs.error(f'Unknown value for overlap ({overlap}), must be "combine" or "error".') + raise PypeItError(f'Unknown value for overlap ({overlap}), must be "combine" or "error".') if any([not np.issubdtype(img.dtype, np.floating) for img in imgs]): - msgs.warn('Images must be floating type, and will be recast before transforming.') + msgs.warning('Images must be floating type, and will be recast before transforming.') # Get the output shape, if necessary if mosaic_shape is None: shape = imgs[0].shape if not np.all([img.shape == shape for img in imgs]): - msgs.error('If output mosaic shape is not provided, all input images must have the ' + raise PypeItError('If output mosaic shape is not provided, all input images must have the ' 'same shape!') mosaic_shape, _tforms = prepare_mosaic(shape, tforms) else: @@ -324,7 +324,7 @@ def build_image_mosaic(imgs, tforms, ivar=None, bpm=None, mosaic_shape=None, cva has_overlap = np.any(mosaic_npix > 1) if has_overlap and overlap == 'error': # Input images should not be allowed to overlap - msgs.error('Mosaic has pixels with contributions by more than one input image!') + raise PypeItError('Mosaic has pixels with contributions by more than one input image!') filled = mosaic_npix > 0 mosaic_data[np.logical_not(filled)] = cval diff --git a/pypeit/core/parse.py b/pypeit/core/parse.py index 1b5f5ec9f4..5dfeb615df 100644 --- a/pypeit/core/parse.py +++ b/pypeit/core/parse.py @@ -155,7 +155,7 @@ def parse_binning(binning:str): elif 'x' in binning: binspectral, binspatial = [int(item) for item in binning.split('x')] # LRIS elif binning == 'None': - msgs.warn("Assuming unbinned, i.e. 1x1") + msgs.warning("Assuming unbinned, i.e. 1x1") binspectral, binspatial = 1,1 else: binspectral, binspatial = [int(item) for item in binning.strip().split(' ')] # Gemini @@ -164,7 +164,7 @@ def parse_binning(binning:str): elif isinstance(binning, np.ndarray): binspectral, binspatial = binning else: - msgs.error("Unable to parse input binning: {}".format(binning)) + raise PypeItError("Unable to parse input binning: {}".format(binning)) # Return return binspectral, binspatial @@ -410,7 +410,7 @@ def parse_image_location(inp, spec): """ if ';' in inp: - msgs.error(f'Image location string provided ({inp}) includes a semi-colon!') + raise PypeItError(f'Image location string provided ({inp}) includes a semi-colon!') # Split the components of the string _inp = inp.split(':') @@ -424,14 +424,14 @@ def parse_image_location(inp, spec): det = tuple(-d for d in det) if len(det) > 1 and det not in spec.allowed_mosaics: - msgs.error(f'{det} is not a valid mosaic for {spec.name}.') + raise PypeItError(f'{det} is not a valid mosaic for {spec.name}.') elif len(det) > 1 and det in spec.allowed_mosaics: # we use detname, which is a string (e.g., 'DET01', 'MSC01') detname = spec.get_det_name(det) elif len(det) == 1: detname = spec.get_det_name(det[0]) else: - msgs.error(f'Unable to parse detector identifier in: {inp}') + raise PypeItError(f'Unable to parse detector identifier in: {inp}') return (neg, detname) + tuple(float(p) for p in _inp[1:]) diff --git a/pypeit/core/pixels.py b/pypeit/core/pixels.py index 65b51db151..38ecdfe133 100644 --- a/pypeit/core/pixels.py +++ b/pypeit/core/pixels.py @@ -30,7 +30,7 @@ def phys_to_pix(array, pixlocn, axis): The pixel locations of the input array (as seen on a computer screen) """ if len(array.shape) > 2: - msgs.error('Input array must have two dimensions or less!') + raise PypeItError('Input array must have two dimensions or less!') _array = np.atleast_2d(array) doravel = len(array.shape) != 2 @@ -102,11 +102,11 @@ def ximg_and_edgemask(lord_in, rord_in, slitpix, trim_edg=(3,3), xshift=0.): badp = xsize <= 0. if np.any(badp): meds = np.median(xsize) - msgs.warn('Something goofy in slit # {:d}'.format(islit)) - msgs.warn('Probably a bad slit (e.g. a star box)') - msgs.warn('It is best to expunge this slit') - msgs.warn('Proceed at your own risk, with a slit width of {}'.format(meds)) - msgs.warn('Or set meds to your liking') + msgs.warning('Something goofy in slit # {:d}'.format(islit)) + msgs.warning('Probably a bad slit (e.g. a star box)') + msgs.warning('It is best to expunge this slit') + msgs.warning('Proceed at your own risk, with a slit width of {}'.format(meds)) + msgs.warning('Or set meds to your liking') #rord[:, islit] = lord[:, islit] + meds # Loop down the slit diff --git a/pypeit/core/procimg.py b/pypeit/core/procimg.py index 795a198446..cd58e35c18 100644 --- a/pypeit/core/procimg.py +++ b/pypeit/core/procimg.py @@ -158,13 +158,13 @@ def lacosmic(sciframe, saturation=None, nonlinear=1., bpm=None, varframe=None, m # Check input if saturation is not None and isinstance(saturation, np.ndarray) \ and saturation.shape != sciframe.shape: - msgs.error('Detector pixel saturation array has incorrect shape.') + raise PypeItError('Detector pixel saturation array has incorrect shape.') if isinstance(nonlinear, np.ndarray) and nonlinear.shape != sciframe.shape: - msgs.error('Detector nonlinear pixel scale array has incorrect shape.') + raise PypeItError('Detector nonlinear pixel scale array has incorrect shape.') if bpm is not None and bpm.shape != sciframe.shape: - msgs.error('Bad-pixel mask must match shape of science frame.') + raise PypeItError('Bad-pixel mask must match shape of science frame.') if varframe is not None and varframe.shape != sciframe.shape: - msgs.error('Variance frame must match shape of science frame.') + raise PypeItError('Variance frame must match shape of science frame.') msgs.info("Detecting cosmic rays with the L.A.Cosmic algorithm") @@ -397,9 +397,9 @@ def cr_screen(a, mask_value=0.0, spatial_axis=1): """ # Check input if len(a.shape) != 2: - msgs.error('Input array must be two-dimensional.') + raise PypeItError('Input array must be two-dimensional.') if spatial_axis not in [0,1]: - msgs.error('Spatial axis must be 0 or 1.') + raise PypeItError('Spatial axis must be 0 or 1.') # Mask the pixels equal to mask value: should use np.isclose() _a = np.ma.MaskedArray(a, mask=(a==mask_value)) @@ -458,7 +458,7 @@ def gain_frame(amp_img, gain): `numpy.ndarray`_: Image with the gain for each pixel. """ # TODO: Remove this or actually do it. - # msgs.warn("Should probably be measuring the gain across the amplifier boundary") + # msgs.warning("Should probably be measuring the gain across the amplifier boundary") # Build and return the gain image gain_img = np.zeros_like(amp_img, dtype=float) for i,_gain in enumerate(gain): @@ -519,15 +519,15 @@ def rn2_frame(datasec_img, ronoise, units='e-', gain=None, digitization=False): """ # Check units if units not in ['e-', 'ADU']: - msgs.error(f"Unknown units: {units}. Must be 'e-' or 'ADU'.") + raise PypeItError(f"Unknown units: {units}. Must be 'e-' or 'ADU'.") if gain is None and (digitization or units == 'ADU'): - msgs.error('If including digitization error or return units in ADU, must provide gain.') + raise PypeItError('If including digitization error or return units in ADU, must provide gain.') # Determine the number of amplifiers from the datasec image _datasec_img = datasec_img.astype(int) numamplifiers = np.amax(_datasec_img) if numamplifiers == 0: - msgs.error('Amplifier identification image (datasec_img) does not have any values larger ' + raise PypeItError('Amplifier identification image (datasec_img) does not have any values larger ' 'than 0! The image should indicate the 1-indexed integer of the amplifier ' 'used to read each pixel.') @@ -535,7 +535,7 @@ def rn2_frame(datasec_img, ronoise, units='e-', gain=None, digitization=False): _ronoise = np.atleast_1d(ronoise) if isinstance(ronoise, (list, np.ndarray)) \ else np.array([ronoise]) if len(_ronoise) != numamplifiers: - msgs.error('Must provide a read-noise for each amplifier.') + raise PypeItError('Must provide a read-noise for each amplifier.') # Get the amplifier indices indx = np.logical_not(_datasec_img == 0) @@ -552,7 +552,7 @@ def rn2_frame(datasec_img, ronoise, units='e-', gain=None, digitization=False): # Check the number of gain values _gain = np.atleast_1d(gain) if isinstance(gain, (list, np.ndarray)) else np.array([gain]) if len(_gain) != numamplifiers: - msgs.error('Must provide a gain for each amplifier.') + raise PypeItError('Must provide a gain for each amplifier.') if digitization: # Add in the digitization error @@ -635,15 +635,15 @@ def subtract_overscan(rawframe, datasec_img, oscansec_img, method='savgol', para """ # Check input if method.lower() not in ['polynomial', 'chebyshev', 'savgol', 'median', 'odd_even']: - msgs.error(f'Unrecognized overscan subtraction method: {method}') + raise PypeItError(f'Unrecognized overscan subtraction method: {method}') if rawframe.ndim != 2: - msgs.error('Input raw frame must be 2D.') + raise PypeItError('Input raw frame must be 2D.') if datasec_img.shape != rawframe.shape: - msgs.error('Datasec image must have the same shape as the raw frame.') + raise PypeItError('Datasec image must have the same shape as the raw frame.') if oscansec_img.shape != rawframe.shape: - msgs.error('Overscan section image must have the same shape as the raw frame.') + raise PypeItError('Overscan section image must have the same shape as the raw frame.') if var is not None and var.shape != rawframe.shape: - msgs.error('Variance image must have the same shape as the raw frame.') + raise PypeItError('Variance image must have the same shape as the raw frame.') # Copy the data so that the subtraction is not done in place no_overscan = rawframe.copy() @@ -656,18 +656,18 @@ def subtract_overscan(rawframe, datasec_img, oscansec_img, method='savgol', para for amp in amps: # Pull out the overscan data if np.sum(oscansec_img == amp) == 0: - msgs.error(f'No overscan region for amplifier {amp+1}!') + raise PypeItError(f'No overscan region for amplifier {amp+1}!') overscan, os_slice = rect_slice_with_mask(rawframe, oscansec_img, amp) if var is not None: osvar = var[os_slice] # Pull out the real data if np.sum(datasec_img == amp) == 0: - msgs.error(f'No data region for amplifier {amp+1}!') + raise PypeItError(f'No data region for amplifier {amp+1}!') data, data_slice = rect_slice_with_mask(rawframe, datasec_img, amp) # Shape along at least one axis must match if not np.any([dd == do for dd, do in zip(data.shape, overscan.shape)]): - msgs.error('Overscan sections do not match amplifier sections for ' + raise PypeItError('Overscan sections do not match amplifier sections for ' 'amplifier {0}'.format(amp)) compress_axis = 1 if data.shape[0] == overscan.shape[0] else 0 @@ -882,10 +882,10 @@ def subtract_pattern(rawframe, datasec_img, oscansec_img, frequency=None, axis=1 bounds=([0, -np.inf],[np.inf, np.inf]) ) except ValueError: - msgs.warn("Input data invalid for pattern subtraction of row {0:d}/{1:d}".format(ii + 1, overscan.shape[0])) + msgs.warning("Input data invalid for pattern subtraction of row {0:d}/{1:d}".format(ii + 1, overscan.shape[0])) continue except RuntimeError: - msgs.warn("Pattern subtraction fit failed for row {0:d}/{1:d}".format(ii + 1, overscan.shape[0])) + msgs.warning("Pattern subtraction fit failed for row {0:d}/{1:d}".format(ii + 1, overscan.shape[0])) continue amps_fit[ii] = popt[0] # Construct a model of the amplitudes as a fucntion of spectral pixel @@ -912,10 +912,10 @@ def subtract_pattern(rawframe, datasec_img, oscansec_img, frequency=None, axis=1 bounds=([-np.inf], [np.inf]) ) except ValueError: - msgs.warn("Input data invalid for pattern subtraction of row {0:d}/{1:d}".format(ii + 1, overscan.shape[0])) + msgs.warning("Input data invalid for pattern subtraction of row {0:d}/{1:d}".format(ii + 1, overscan.shape[0])) continue except RuntimeError: - msgs.warn("Pattern subtraction fit failed for row {0:d}/{1:d}".format(ii + 1, overscan.shape[0])) + msgs.warning("Pattern subtraction fit failed for row {0:d}/{1:d}".format(ii + 1, overscan.shape[0])) continue # Calculate the model pattern, given the amplitude, frequency and phase information model_pattern[ii, :] = cosfunc_full(xdata_all, amp_mod[ii], frq_mod[ii], popt[0]) @@ -951,7 +951,7 @@ def pattern_frequency(frame, axis=1): if axis == 0: arr = frame.T elif axis != 1: - msgs.error("frame must be a 2D image, and axis must be 0 or 1") + raise PypeItError("frame must be a 2D image, and axis must be 0 or 1") # Calculate the output image dimensions of the model signal # Subtract the DC offset @@ -1005,11 +1005,11 @@ def replace_columns(img, bad_cols, replace_with='mean', copy=False): """ # Check if img.ndim != 2: - msgs.error('Images must be 2D!') + raise PypeItError('Images must be 2D!') if bad_cols.size != img.shape[1]: - msgs.error('Bad column array has incorrect length!') + raise PypeItError('Bad column array has incorrect length!') if np.all(bad_cols): - msgs.error('All columns are bad!') + raise PypeItError('All columns are bad!') _img = img.copy() if copy else img @@ -1044,7 +1044,7 @@ def replace_columns(img, bad_cols, replace_with='mean', copy=False): for l,r in zip(ledges, redges): replace_column_linear(_img, l, r) else: - msgs.error('Unknown replace_columns method. Must be mean or linear.') + raise PypeItError('Unknown replace_columns method. Must be mean or linear.') return _img @@ -1133,7 +1133,7 @@ def trim_frame(frame, mask): """ # TODO: Should check for this failure mode earlier if np.any(mask[np.logical_not(np.all(mask,axis=1)),:][:,np.logical_not(np.all(mask,axis=0))]): - msgs.error('Data section is oddly shaped. Trimming does not exclude all ' + raise PypeItError('Data section is oddly shaped. Trimming does not exclude all ' 'pixels outside the data sections.') return frame[np.logical_not(np.all(mask,axis=1)),:][:,np.logical_not(np.all(mask,axis=0))] @@ -1245,13 +1245,13 @@ def base_variance(rn_var, darkcurr=None, exptime=None, proc_var=None, count_scal # Check input if count_scale is not None and isinstance(count_scale, np.ndarray) \ and count_scale.shape != rn_var.shape: - msgs.error('Count scale and readnoise variance have different shape.') + raise PypeItError('Count scale and readnoise variance have different shape.') if proc_var is not None and isinstance(proc_var, np.ndarray) \ and proc_var.shape != rn_var.shape: - msgs.error('Processing variance and readnoise variance have different shape.') + raise PypeItError('Processing variance and readnoise variance have different shape.') if darkcurr is not None and isinstance(darkcurr, np.ndarray) \ and darkcurr.shape != rn_var.shape: - msgs.error('Dark image and readnoise variance have different shape.') + raise PypeItError('Dark image and readnoise variance have different shape.') # Build the variance # - First term is the read-noise @@ -1374,12 +1374,12 @@ def variance_model(base, counts=None, count_scale=None, noise_floor=None): """ # Check input if noise_floor is not None and noise_floor > 0. and counts is None: - msgs.error('To impose a noise floor, must provide counts.') + raise PypeItError('To impose a noise floor, must provide counts.') if counts is not None and counts.shape != base.shape: - msgs.error('Counts image and base-level variance have different shape.') + raise PypeItError('Counts image and base-level variance have different shape.') if count_scale is not None and isinstance(count_scale, np.ndarray) \ and count_scale.shape != base.shape: - msgs.error('Count scale and base-level variance have different shape.') + raise PypeItError('Count scale and base-level variance have different shape.') # Clip the counts _counts = None if counts is None else np.clip(counts, 0, None) @@ -1434,7 +1434,7 @@ def nonlinear_counts(counts, ampimage, nonlinearity_coeffs): msgs.info('Applying a non-linearity correction to the counts.') # Check the input if counts.shape != ampimage.shape: - msgs.error('Counts and amplifier image have different shapes.') + raise PypeItError('Counts and amplifier image have different shapes.') _nonlinearity_coeffs = np.asarray(nonlinearity_coeffs) # Setup the output array corr_counts = counts.copy() diff --git a/pypeit/core/pydl.py b/pypeit/core/pydl.py index 97c093ccea..078f215b4c 100644 --- a/pypeit/core/pydl.py +++ b/pypeit/core/pydl.py @@ -374,7 +374,7 @@ def __init__(self, *args, **kwargs): #invvar = np.ones(xpos.shape, dtype=xpos.dtype) if 'func' in kwargs: if kwargs['func'] not in allowed_functions: - msgs.error('Unrecognized function.') + raise PypeItError('Unrecognized function.') self.func = kwargs['func'] else: self.func = 'legendre' @@ -454,7 +454,7 @@ def __init__(self, *args, **kwargs): self.outmask[iTrace, :] = pypeitFit.gpm else: - msgs.error('Wrong number of arguments to TraceSet!') + raise PypeItError('Wrong number of arguments to TraceSet!') def xy(self, xpos=None, ignore_jump=False): """Convert from a trace set to an array of x,y positions. @@ -685,7 +685,7 @@ def djs_reject(data, model, outmask=None, inmask=None, # ToDO It would be nice to come up with a way to use MAD but also use the errors in the rejection, i.e. compute the rejection threhsold using the mad. if upper is None and lower is None and maxdev is None: - msgs.warn('upper, lower, and maxdev are all set to None. No rejection performed since no rejection criteria were specified.') + msgs.warning('upper, lower, and maxdev are all set to None. No rejection performed since no rejection criteria were specified.') if (use_mad and (invvar is not None)): raise ValueError('use_mad can only be set to True innvar = None. This code only computes a mad' @@ -696,7 +696,7 @@ def djs_reject(data, model, outmask=None, inmask=None, # ToDo JFH: I think it would actually make more sense for outmask be a required input parameter (named lastmask or something like that). if outmask is None: outmask = np.ones(data.shape, dtype='bool') - msgs.warn('outmask was not specified as an input parameter. Cannot asess convergence of rejection -- qdone is automatically True') + msgs.warning('outmask was not specified as an input parameter. Cannot asess convergence of rejection -- qdone is automatically True') else: if data.shape != outmask.shape: raise ValueError('Dimensions of data and outmask do not agree.') @@ -1073,7 +1073,7 @@ def __init__(self, ra, dec, minSize): else: cosDecMin = np.cos(np.deg2rad(self.decBounds[0])) if cosDecMin <= 0.0: - msgs.error("cosDecMin={0:f} not positive in setchunks().".format(cosDecMin)) + raise PypeItError("cosDecMin={0:f} not positive in setchunks().".format(cosDecMin)) self.raRange, self.raOffset = self.rarange(ra, minSize/cosDecMin) self.raMin, self.raMax = self.getraminmax(ra, self.raOffset) # @@ -1095,7 +1095,7 @@ def __init__(self, ra, dec, minSize): else: cosDecMin = np.cos(np.deg2rad(self.decBounds[i+1])) if cosDecMin <= 0.0: - msgs.error("cosDecMin={0:f} not positive in setchunks().".format(cosDecMin)) + raise PypeItError("cosDecMin={0:f} not positive in setchunks().".format(cosDecMin)) # # Get raBounds array for this declination array, leave an extra # cell on each end @@ -1173,7 +1173,7 @@ def assign(self, ra, dec, marginSize): to it. """ if marginSize >= self.minSize: - msgs.error("marginSize>=minSize ({0:f}={1:f}) in chunks.assign().".format(marginSize, self.minSize)) + raise PypeItError("marginSize>=minSize ({0:f}={1:f}) in chunks.assign().".format(marginSize, self.minSize)) chunkDone = [[False for j in range(self.nRa[i])] for i in range(self.nDec)] for i in range(ra.size): currRa = np.fmod(ra[i] + self.raOffset, 360.0) @@ -1225,7 +1225,7 @@ def getbounds(self, ra, dec, marginSize): (self.decBounds[self.nDec]-self.decBounds[0]))) decChunkMax = decChunkMin if decChunkMin < 0 or decChunkMin > self.nDec - 1: - msgs.error("decChunkMin out of range in chunks.getbounds().") + raise PypeItError("decChunkMin out of range in chunks.getbounds().") # # Set minimum and maximum bounds of dec # @@ -1245,7 +1245,7 @@ def getbounds(self, ra, dec, marginSize): (self.raBounds[i][self.nRa[i]] - self.raBounds[i][0]))) raChunkMax[i-decChunkMin] = raChunkMin[i-decChunkMin] if raChunkMin[i-decChunkMin] < 0 or raChunkMin[i-decChunkMin] > self.nRa[i]-1: - msgs.error("raChunkMin out of range in chunks.getbounds().") + raise PypeItError("raChunkMin out of range in chunks.getbounds().") # # Set minimum and maximum bounds of ra # @@ -1288,7 +1288,7 @@ def get(self, ra, dec): float(self.nRa[decChunk]) / (self.raBounds[decChunk][self.nRa[decChunk]] - self.raBounds[decChunk][0]))) if raChunk < 0 or raChunk > self.nRa[decChunk]-1: - msgs.error("raChunk out of range in chunks.get()") + raise PypeItError("raChunk out of range in chunks.get()") else: raChunk = -1 return (raChunk, decChunk) @@ -1350,7 +1350,7 @@ def friendsoffriends(self, ra, dec, linkSep): else: mapGroups[i] = mapGroups[mapGroups[i]] else: - msgs.error("MapGroups[{0:d}]={1:d} in chunks.friendsoffriends().".format(i, mapGroups[i])) + raise PypeItError("MapGroups[{0:d}]={1:d} in chunks.friendsoffriends().".format(i, mapGroups[i])) for i in range(nPoints): inGroup[i] = mapGroups[inGroup[i]] firstGroup = np.zeros(nPoints, dtype='i4') - 1 @@ -1417,9 +1417,9 @@ def __init__(self, coordinates, distance, separation='euclid'): elif separation == 'sphereradec': self.separation = self.sphereradec else: - msgs.error("Unknown separation function: {0}.".format(separation)) + raise PypeItError("Unknown separation function: {0}.".format(separation)) else: - msgs.error("Improper type for separation!") + raise PypeItError("Improper type for separation!") # # Save information about the coordinates. # @@ -1524,7 +1524,7 @@ def spheregroup(ra, dec, linklength, chunksize=None): Raises ------ - msgs.error + raise PypeItError If the array of coordinates only contains one point. Notes @@ -1535,14 +1535,14 @@ def spheregroup(ra, dec, linklength, chunksize=None): """ npoints = ra.size if npoints == 1: - msgs.error("Cannot group only one point!") + raise PypeItError("Cannot group only one point!") # # Define the chunksize # if chunksize is not None: if chunksize < 4.0*linklength: chunksize = 4.0*linklength - msgs.warn("chunksize changed to {0:.2f}.".format(chunksize)) + msgs.warning("chunksize changed to {0:.2f}.".format(chunksize)) else: chunksize = max(4.0*linklength, 0.1) # @@ -1626,7 +1626,7 @@ def spherematch(ra1, dec1, ra2, dec2, matchlength, chunksize=None, # Check input size # if ra1.size == 1: - msgs.error("Change the order of the sets of coordinates!") + raise PypeItError("Change the order of the sets of coordinates!") # # Initialize chunks # diff --git a/pypeit/core/scattlight.py b/pypeit/core/scattlight.py index 394af7166d..b794cc3851 100644 --- a/pypeit/core/scattlight.py +++ b/pypeit/core/scattlight.py @@ -230,7 +230,7 @@ def scattered_light(frame, bpm, offslitmask, x0, bounds, detpad=300, debug=False msgs.info("Generating best-fitting scattered light model") scatt_img = scattered_light_model(res_lsq.x, _frame_pad)[detpad:-detpad, detpad:-detpad] else: - msgs.warn("Scattered light model fitting failed") + msgs.warning("Scattered light model fitting failed") scatt_img = np.zeros_like(frame) if debug: @@ -282,7 +282,7 @@ def mask_slit_regions(offslitmask, centrace, mask_regions=None): """ # Check if there are regions to be masked if mask_regions is None: - msgs.warn("There are no inter-slit regions specified that need to be masked") + msgs.warning("There are no inter-slit regions specified that need to be masked") return offslitmask elif isinstance(mask_regions, int): # Convert this to a list @@ -342,7 +342,7 @@ def fine_correction(frame, bpm, offslitmask, method='median', polyord=2, debug=F A 2D image (nspec, nspat) of the fine correction to the scattered light determined from the input frame. """ if method not in ['median', 'poly']: - msgs.error("Unrecognized method to determine the fine correction to the scattered light: {:s}".format(method)) + raise PypeItError("Unrecognized method to determine the fine correction to the scattered light: {:s}".format(method)) msgs.info("Performing a fine correction to the scattered light using the {:s} method".format(method)) nspec, nspat = frame.shape if method == 'median': diff --git a/pypeit/core/skysub.py b/pypeit/core/skysub.py index a6704253e7..4183fc415c 100644 --- a/pypeit/core/skysub.py +++ b/pypeit/core/skysub.py @@ -133,14 +133,14 @@ def global_skysub(image, ivar, tilts, thismask, slit_left, slit_righ, inmask=Non inmask = (ivar > 0.0) & thismask & np.isfinite(image) & np.isfinite(ivar) elif inmask.dtype != bool: # Check that it's of type bool - msgs.error("Type of inmask should be bool and is of type: {:}".format(inmask.dtype)) + raise PypeItError("Type of inmask should be bool and is of type: {:}".format(inmask.dtype)) # Sky pixels for fitting gpm = thismask & (ivar > 0.0) & inmask & np.logical_not(edgmask) \ & np.isfinite(image) & np.isfinite(ivar) bad_pixel_frac = np.sum(thismask & np.logical_not(gpm))/np.sum(thismask) if bad_pixel_frac > max_mask_frac: - msgs.warn(f'This slit/order has {100.0*bad_pixel_frac:.3f}% of the pixels masked, which ' + msgs.warning(f'This slit/order has {100.0*bad_pixel_frac:.3f}% of the pixels masked, which ' f'exceeds the threshold of {100.0*max_mask_frac:.3f}%.' + msgs.newline() + 'There is likely a problem with this slit. Giving up on ' 'global sky-subtraction.') @@ -170,7 +170,7 @@ def global_skysub(image, ivar, tilts, thismask, slit_left, slit_righ, inmask=Non kwargs_bspline={'bkspace':bsp}, kwargs_reject={'groupbadpix': True, 'maxrej': 10}) if exit_status != 0: - msgs.warn('Global sky-subtraction did not exit cleanly for initial positive sky fit.' + msgs.warning('Global sky-subtraction did not exit cleanly for initial positive sky fit.' + msgs.newline() + 'Initial masking based on positive sky fit will be skipped') else: res = (sky[pos_sky] - np.exp(lsky_fit)) * np.sqrt(sky_ivar[pos_sky]) @@ -198,7 +198,7 @@ def global_skysub(image, ivar, tilts, thismask, slit_left, slit_righ, inmask=Non # better understand what this functionality is doing, but it makes the rejection much more quickly approach a small # chi^2 if exit_status == 1: - msgs.warn('Maximum iterations reached in bspline_profile global sky-subtraction for npoly={:d}.'.format(npoly_fit) + + msgs.warning('Maximum iterations reached in bspline_profile global sky-subtraction for npoly={:d}.'.format(npoly_fit) + msgs.newline() + 'Redoing sky-subtraction without polynomial degrees of freedom') poly_basis = np.ones_like(sky) @@ -339,7 +339,7 @@ def skyoptimal(piximg, data, ivar, oprof, sigrej=3.0, npoly=1, spatial_img=None, relative=relative, kwargs_reject={'groupbadpix': True, 'maxrej': 5}) else: - msgs.warn('All pixels are masked in skyoptimal. Not performing local sky subtraction.') + msgs.warning('All pixels are masked in skyoptimal. Not performing local sky subtraction.') return np.zeros_like(piximg), np.zeros_like(piximg), gpm chi2 = (data[good] - yfit1) ** 2 * ivar[good] @@ -358,7 +358,7 @@ def skyoptimal(piximg, data, ivar, oprof, sigrej=3.0, npoly=1, spatial_img=None, relative=relative, kwargs_reject={'groupbadpix': True, 'maxrej': 1}) else: - msgs.warn('All pixels are masked in skyoptimal after first round of rejection. Not performing local sky subtraction.') + msgs.warning('All pixels are masked in skyoptimal after first round of rejection. Not performing local sky subtraction.') return np.zeros_like(piximg), np.zeros_like(piximg), gpm ncoeff = npoly + nobj @@ -448,7 +448,7 @@ def optimal_bkpts(bkpts_optimal, bsp_min, piximg, sampmask, samp_frac=0.80, samplmax = np.ma.max(piximg_temp,fill_value=-np.inf,axis=1) samplmax = samplmax[np.invert(samplmax.mask)].data if samplmax.size != samplmin.size: - msgs.error('This should not happen') + raise PypeItError('This should not happen') nbkpt = samplmax.size # Determine the sampling. dsamp represents the gap in spectral pixel (wavelength) coverage between # subsequent spectral direction pixels in the piximg, i.e. it is the difference between the minimum @@ -739,9 +739,9 @@ def local_skysub_extract(sciimg, sciivar, tilts, waveimg, global_sky, thismask, """ # Check input if model_noise and base_var is None: - msgs.error('Must provide base_var to iteratively update and improve the noise model.') + raise PypeItError('Must provide base_var to iteratively update and improve the noise model.') if base_var is not None and base_var.shape != sciimg.shape: - msgs.error('Base variance array does not match science image array shape.') + raise PypeItError('Base variance array does not match science image array shape.') # TODO Force traces near edges to always be extracted with a Gaussian profile. @@ -823,7 +823,7 @@ def local_skysub_extract(sciimg, sciivar, tilts, waveimg, global_sky, thismask, max_spat_img = max_spat1[:, None] localmask = (spat_img > min_spat_img) & (spat_img < max_spat_img) & thismask if np.sum(localmask) == 0: - msgs.error('There are no pixels on the localmask for group={}. ' + raise PypeItError('There are no pixels on the localmask for group={}. ' 'Something is very wrong with either your slit edges or your object traces'.format(group)) npoly = skysub_npoly(localmask) @@ -907,8 +907,8 @@ def local_skysub_extract(sciimg, sciivar, tilts, waveimg, global_sky, thismask, #sobjs[iobj].maskwidth = maskwidth if sobjs[iobj].prof_nsigma is None else \ # sobjs[iobj].prof_nsigma * (sobjs[iobj].FWHM / 2.3548) else: - msgs.warn("Bad extracted wavelengths in local_skysub_extract") - msgs.warn("Skipping this profile fit and continuing.....") + msgs.warning("Bad extracted wavelengths in local_skysub_extract") + msgs.warning("Skipping this profile fit and continuing.....") # Fit the local sky sky_bmodel = np.array(0.0) @@ -929,12 +929,12 @@ def local_skysub_extract(sciimg, sciivar, tilts, waveimg, global_sky, thismask, fullbkpt=fullbkpt, sigrej=sigrej_eff, npoly=npoly) iterbsp = iterbsp + 1 if (not sky_bmodel.any()) & (iterbsp <= 3): - msgs.warn('***************************************') - msgs.warn('WARNING: bspline sky-subtraction failed') - msgs.warn('Increasing bkpt spacing by 20%. Retry') - msgs.warn( + msgs.warning('***************************************') + msgs.warning('WARNING: bspline sky-subtraction failed') + msgs.warning('Increasing bkpt spacing by 20%. Retry') + msgs.warning( 'Old bsp = {:5.2f}'.format(bsp_now) + '; New bsp = {:5.2f}'.format(1.2 ** (iterbsp) * bsp)) - msgs.warn('***************************************') + msgs.warning('***************************************') if sky_bmodel.any(): skyimage.flat[isub] = sky_bmodel @@ -979,14 +979,14 @@ def local_skysub_extract(sciimg, sciivar, tilts, waveimg, global_sky, thismask, elif no_local_sky: pass else: - msgs.warn('ERROR: Bspline sky subtraction failed after 4 iterations of bkpt spacing') - msgs.warn(' Moving on......') + msgs.warning('ERROR: Bspline sky subtraction failed after 4 iterations of bkpt spacing') + msgs.warning(' Moving on......') # obj_profiles = np.zeros_like(obj_profiles) isub, = np.where(localmask.flatten()) # Just replace with the global sky skyimage.flat[isub] = global_sky.flat[isub] if iiter == niter: - msgs.warn('WARNING: LOCAL SKY SUBTRACTION NOT PERFORMED') + msgs.warning('WARNING: LOCAL SKY SUBTRACTION NOT PERFORMED') outmask_extract = outmask.copy() if use_2dmodel_mask else inmask.copy() @@ -1301,7 +1301,7 @@ def ech_local_skysub_extract(sciimg, sciivar, fullmask, tilts, waveimg, # Find the spat IDs if norders != len(slitids): - msgs.error('The number of orders in the sobjs object does not match the number of good slits in the ' + raise PypeItError('The number of orders in the sobjs object does not match the number of good slits in the ' 'slitmask image! There is a problem with the object/slitmask masking. This routine ' 'requires that all masking is performed in the calling routine.') @@ -1309,7 +1309,7 @@ def ech_local_skysub_extract(sciimg, sciivar, fullmask, tilts, waveimg, nleft = left.shape[1] nrigh = right.shape[1] if nleft != nrigh or norders != nleft or norders != nrigh: - msgs.error('The number of left and right edges must be the same as the number of orders. ' + raise PypeItError('The number of left and right edges must be the same as the number of orders. ' 'There is likely a problem with your masking') # Now assign the order_sn, and generate an order_vec aligned with the slitids @@ -1322,7 +1322,7 @@ def ech_local_skysub_extract(sciimg, sciivar, fullmask, tilts, waveimg, ind = (sobjs.SLITID == slitid) & (sobjs.ECH_OBJID == uni_objid[iobj]) # Check for a missed order and fault if they exist if np.sum(ind) == 0: - msgs.error('There is a missing order for object {0:d} on slit {1:d}!'.format(iobj, slitid)) + raise PypeItError('There is a missing order for object {0:d} on slit {1:d}!'.format(iobj, slitid)) if iobj == 0: order_vec[islit] = sobjs[ind].ECH_ORDER order_snr[islit,iobj] = sobjs[ind].ech_snr @@ -1331,10 +1331,10 @@ def ech_local_skysub_extract(sciimg, sciivar, fullmask, tilts, waveimg, if (np.sum(sobjs.sign > 0) % norders) == 0: nobjs = int((np.sum(sobjs.sign > 0)/norders)) else: - msgs.error('Number of specobjs in sobjs is not an integer multiple of the number or orders!') + raise PypeItError('Number of specobjs in sobjs is not an integer multiple of the number or orders!') # Enforce that every object in sobj has an specobj on every good order if np.any(np.isnan(order_snr)): - msgs.error('There are missing orders for one or more objects in sobjs. There is a problem with how you have ' + raise PypeItError('There are missing orders for one or more objects in sobjs. There is a problem with how you have ' 'masked objects in sobjs or slits in slitmask in the calling routine') diff --git a/pypeit/core/slitdesign_matching.py b/pypeit/core/slitdesign_matching.py index f12ce01b45..084fd137d3 100644 --- a/pypeit/core/slitdesign_matching.py +++ b/pypeit/core/slitdesign_matching.py @@ -56,8 +56,8 @@ def best_offset(x_det, x_model, step=1, xlag_range=None): # we keep only the x_model values that are in the current detector wkeep =(x_model > min_x_det+xlag_range[0]) & (x_model < max_x_det+xlag_range[1]) if x_model[wkeep].size<2: - msgs.warn('Working between {} and {}'.format(min_x_det+xlag_range[0], max_x_det+xlag_range[1])) - msgs.warn('Not enough lines to run!!!') + msgs.warning('Working between {} and {}'.format(min_x_det+xlag_range[0], max_x_det+xlag_range[1])) + msgs.warning('Not enough lines to run!!!') sdev = 1e10 return 0. x_model_trim = x_model[wkeep] @@ -256,9 +256,9 @@ def slit_match(x_det, x_model, step=1, xlag_range=[-50,50], sigrej=3, print_matc # Both duplicates and matches with high RMS are considered bad dupl = dupl | out if edge is not None: - msgs.warn('{} duplicate match(es) for {} edges'.format(dupl[dupl == 1].size, edge)) + msgs.warning('{} duplicate match(es) for {} edges'.format(dupl[dupl == 1].size, edge)) else: - msgs.warn('{} duplicate match(es)'.format(dupl[dupl == 1].size)) + msgs.warning('{} duplicate match(es)'.format(dupl[dupl == 1].size)) # I commented the 3 lines below because I don't really need to trim the duplicate matches. I just # propagate the flag. # good = dupl == 0 diff --git a/pypeit/core/telluric.py b/pypeit/core/telluric.py index c04e777580..aace313b69 100644 --- a/pypeit/core/telluric.py +++ b/pypeit/core/telluric.py @@ -179,7 +179,7 @@ def read_telluric_pca(filename, wave_min=None, wave_max=None, pad_frac=0.10): ncomp = hdul[0].header.get('NCOMP') # check that the telgrid file is the correct one for this method if ncomp is None: - msgs.error("Could NOT read the number of PCA components of the telluric model. " + raise PypeItError("Could NOT read the number of PCA components of the telluric model. " "Are you using a grid-based model instead? If so, you should " " set teltype=grid") bounds = hdul[2].data @@ -248,7 +248,7 @@ def read_telluric_grid(filename, wave_min=None, wave_max=None, pad_frac=0.10): # check that the telgrid file is the correct one for this method if hdul[0].header.get('PRES0') is None: - msgs.error("Could NOT read the atmospheric information from the telluric model. " + raise PypeItError("Could NOT read the atmospheric information from the telluric model. " "Are you using a PCA-based model instead? If so, you should " " set teltype=pca") @@ -293,7 +293,7 @@ def interp_telluric_grid(theta, tell_dict): available wavelengths in ``tell_dict``. """ if len(theta) != 4: - msgs.error('Input parameter vector must have 4 and only 4 values.') + raise PypeItError('Input parameter vector must have 4 and only 4 values.') pg = tell_dict['pressure_grid'] tg = tell_dict['temp_grid'] hg = tell_dict['h2o_grid'] @@ -329,13 +329,13 @@ def conv_telluric(tell_model, dloglam, res): """ # Check the input values if res <= 0.0: - msgs.error('Resolution must be positive.') + raise PypeItError('Resolution must be positive.') if dloglam == 0.0: - msgs.error('The telluric model grid has zero spacing in log wavelength. This is not supported.') + raise PypeItError('The telluric model grid has zero spacing in log wavelength. This is not supported.') pix_per_sigma = 1.0/res/(dloglam*np.log(10.0))/(2.0 * np.sqrt(2.0 * np.log(2))) # number of dloglam pixels per 1 sigma dispersion sig2pix = 1.0/pix_per_sigma # number of sigma per 1 pix if sig2pix > 2.0: - msgs.warn('The telluric model grid is not sampled finely enough to properly convolve to the desired resolution. ' + msgs.warning('The telluric model grid is not sampled finely enough to properly convolve to the desired resolution. ' 'Skipping resolution convolution for now. Create a higher resolution telluric model grid') return tell_model @@ -904,7 +904,7 @@ def init_sensfunc_model(obj_params, iord, wave, counts_per_ang, ivar, gpm, tellm flam_true_gpm = (wave >= obj_params['std_dict']['wave'].value.min()) \ & (wave <= obj_params['std_dict']['wave'].value.max()) if np.any(np.logical_not(flam_true_gpm)): - msgs.warn('Your data extends beyond the range covered by the standard star spectrum. ' + msgs.warning('Your data extends beyond the range covered by the standard star spectrum. ' 'Proceeding by masking these regions, but consider using another standard star') N_lam = counts_per_ang/obj_params['exptime'] zeropoint_data, zeropoint_data_gpm \ @@ -1153,7 +1153,7 @@ def init_star_model(obj_params, iord, wave, flux, ivar, mask, tellmodel): coeff, wave_min, wave_max = fit_tuple if(wave_min != wave.min()) or (wave_max != wave.max()): - msgs.error('Problem with the wave_min or wave_max') + raise PypeItError('Problem with the wave_min or wave_max') # Polynomial coefficient bounds bounds_obj = [(np.fmin(np.abs(this_coeff)*obj_params['delta_coeff_bounds'][0], obj_params['minmax_coeff_bounds'][0]), np.fmax(np.abs(this_coeff)*obj_params['delta_coeff_bounds'][1], obj_params['minmax_coeff_bounds'][1])) @@ -1273,7 +1273,7 @@ def init_poly_model(obj_params, iord, wave, flux, ivar, mask, tellmodel): coeff, wave_min, wave_max = fit_tuple if(wave_min != wave.min()) or (wave_max != wave.max()): - msgs.error('Problem with the wave_min or wave_max') + raise PypeItError('Problem with the wave_min or wave_max') # Polynomial model polymodel = coadd.poly_model_eval(coeff, obj_params['func'], obj_params['model'], wave, wave_min, wave_max) @@ -1473,7 +1473,7 @@ def sensfunc_telluric(wave, counts, counts_ivar, counts_mask, exptime, airmass, # Create the polyorder_vec if np.size(polyorder) > 1: if np.size(polyorder) != norders: - msgs.error('polyorder must have either have norder elements or be a scalar') + raise PypeItError('polyorder must have either have norder elements or be a scalar') # TODO: Should this be np.asarray? polyorder_vec = np.array(polyorder) else: @@ -1530,7 +1530,7 @@ def create_bal_mask(wave, bal_wv_min_max): """ if np.size(bal_wv_min_max) % 2 !=0: - msgs.error('bal_wv_min_max must be a list/array with even numbers.') + raise PypeItError('bal_wv_min_max must be a list/array with even numbers.') bal_bpm = np.zeros_like(wave, dtype=bool) nbal = int(np.size(bal_wv_min_max) / 2) @@ -1762,7 +1762,7 @@ def star_telluric(spec1dfile, telgridfile, telloutfile, outfile, star_type=None, # Create the polyorder_vec if np.size(polyorder) > 1: if np.size(polyorder) != norders: - msgs.error('polyorder must have either have norder elements or be a scalar') + raise PypeItError('polyorder must have either have norder elements or be a scalar') polyorder_vec = np.array(polyorder) else: polyorder_vec = np.full(norders, polyorder) @@ -1864,7 +1864,7 @@ def poly_telluric(spec1dfile, telgridfile, telloutfile, outfile, z_obj=0.0, func # Create the polyorder_vec if np.size(polyorder) > 1: if np.size(polyorder) != norders: - msgs.error('polyorder must have either have norder elements or be a scalar') + raise PypeItError('polyorder must have either have norder elements or be a scalar') polyorder_vec = np.array(polyorder) else: polyorder_vec = np.full(norders, polyorder) @@ -2554,14 +2554,14 @@ def run(self, only_orders=None): if self.ech_orders is not None and len(self.ech_orders) == self.norders: indx_only = np.where(np.isin(self.ech_orders, only_orders))[0] if (indx_only.size == 0) and (only_orders is not None): - msgs.warn(f'All the orders provided in `only_orders` are not among the expected orders. ' + msgs.warning(f'All the orders provided in `only_orders` are not among the expected orders. ' f'Using all orders available in the data.') elif indx_only.size > 0: good_orders = indx_only msgs.info(f'Working only on the following orders: {self.ech_orders[indx_only]}') if len(indx_only) != len(only_orders): missing_orders = list(set(only_orders) - set(self.ech_orders[indx_only])) - msgs.warn(f'Some orders provided in `only_orders` are not among the expected orders. ' + msgs.warning(f'Some orders provided in `only_orders` are not among the expected orders. ' f'Ignoring orders: {missing_orders}') # Run the fits @@ -2702,7 +2702,7 @@ def assign_output(self, iord): # # if inmask is not None: # if wave_inmask is None: -# msgs.error('If you are specifying a mask you need to pass in the corresponding ' +# raise PypeItError('If you are specifying a mask you need to pass in the corresponding ' # 'wavelength grid') # # # TODO we shoudld consider refactoring the interpolator to take a @@ -2721,7 +2721,7 @@ def assign_output(self, iord): # elif mask.ndim == inmask.ndim: # inmask_out = inmask_int # else: -# msgs.error('Unrecognized shape for data mask') +# raise PypeItError('Unrecognized shape for data mask') # return (mask & inmask_out) # else: # return mask diff --git a/pypeit/core/trace.py b/pypeit/core/trace.py index 0bb5d13892..701726eaa5 100644 --- a/pypeit/core/trace.py +++ b/pypeit/core/trace.py @@ -79,9 +79,9 @@ def detect_slit_edges(flux, bpm=None, median_iterations=0, min_sqm=30., sobel_mo """ # Checks if flux.ndim != 2: - msgs.error('Trace image must be 2D.') + raise PypeItError('Trace image must be 2D.') if bpm is not None and bpm.shape != flux.shape: - msgs.error('Mismatch in mask and trace image shapes.') + raise PypeItError('Mismatch in mask and trace image shapes.') # Specify how many times to repeat the median filter. Even better # would be to fit the filt/sqrt(abs(binarr)) array with a Gaussian @@ -173,13 +173,13 @@ def identify_traces(edge_img, max_spatial_separation=4, follow_span=10, minimum_ msgs.info('Finding unique traces among detected edges.') # Check the input if edge_img.ndim > 2: - msgs.error('Provided edge image must be 2D.') + raise PypeItError('Provided edge image must be 2D.') if not np.all(np.isin(np.unique(edge_img), [-1,0,1])): - msgs.error('Edge image must only have -1, 0, or 1 values.') + raise PypeItError('Edge image must only have -1, 0, or 1 values.') # No edges were detected. if np.all(edge_img == 0): - msgs.warn('No edges were found!') + msgs.warning('No edges were found!') return np.zeros_like(edge_img, dtype=int) # Find the left and right coordinates @@ -331,20 +331,20 @@ def atleast_one_edge(edge_img, bpm=None, flux_valid=True, buffer=0, copy=False): # No traces and fluxes are invalid. # TODO: This used to just be a warning, but I'm having it stop # the code if no traces are found and the flux is low. - msgs.error('Unable to trace any edges! Image flux is low; check trace image is correct.') + raise PypeItError('Unable to trace any edges! Image flux is low; check trace image is correct.') # Use the mask to determine the first and last valid pixel column sum_bpm = np.zeros(edge_img.shape[1]) if bpm is None else np.sum(bpm, axis=0) if nleft == 0: # Add a left edge trace at the first valid column - msgs.warn('No left edge found. Adding one at the detector edge.') + msgs.warning('No left edge found. Adding one at the detector edge.') gdi0 = np.min(np.where(sum_bpm[buffer:] == 0)[0]) + buffer _edge_img[:,gdi0] = -1 if nright == 0: # Add a right edge trace at the last valid column - msgs.warn('No right edge found. Adding one at the detector edge.') + msgs.warning('No right edge found. Adding one at the detector edge.') gdi1 = np.max(np.where(sum_bpm[:-buffer] == 0)[0]) _edge_img[:,gdi1] = 1 @@ -415,7 +415,7 @@ def handle_orphan_edges(edge_img, sobel_sig, bpm=None, flux_valid=True, buffer=0 if nright > 1: # To get here, nleft must be 1. This is mainly in here for # LRISb, which is a real pain.. - msgs.warn('Only one left edge, and multiple right edges.') + msgs.warning('Only one left edge, and multiple right edges.') msgs.info('Restricting right edge detection to the most significantly detected edge.') # Find the most significant right trace best_trace = np.argmin([-np.median(sobel_sig[_edge_img==t]) for t in range(nright)])+1 @@ -427,7 +427,7 @@ def handle_orphan_edges(edge_img, sobel_sig, bpm=None, flux_valid=True, buffer=0 return _edge_img # To get here, nright must be 1. - msgs.warn('Only one right edge, and multiple left edges.') + msgs.warning('Only one right edge, and multiple left edges.') msgs.info('Restricting left edge detection to the most significantly detected edge.') # Find the most significant left trace best_trace = np.argmax([np.median(sobel_sig[_edge_img == -t]) for t in range(nleft)])+1 @@ -1466,7 +1466,7 @@ def peak_trace(flux, ivar=None, bpm=None, trace_map=None, extract_width=None, sm min_pkdist_frac_fwhm=min_pkdist_frac_fwhm, debug=show_peaks) if len(_cen) == 0 or not np.any(best): - msgs.warn('No good {0}s found!'.format(l)) + msgs.warning('No good {0}s found!'.format(l)) continue msgs.info('Found {0} good {1}(s) in the rectified, collapsed image'.format( len(_cen[best]),l)) @@ -1484,7 +1484,7 @@ def peak_trace(flux, ivar=None, bpm=None, trace_map=None, extract_width=None, sm clipped_peak = sigma_clip(peak[best], sigma_lower=peak_clip, sigma_higher=np.inf) peak_mask = np.ma.getmaskarray(clipped_peak) if np.any(peak_mask): - msgs.warn('Clipping {0} detected peak(s) with aberrant amplitude(s).'.format( + msgs.warning('Clipping {0} detected peak(s) with aberrant amplitude(s).'.format( np.sum(peak_mask))) loc = loc[np.invert(peak_mask)] _cen = _cen[np.invert(peak_mask)] diff --git a/pypeit/core/tracewave.py b/pypeit/core/tracewave.py index 622094a9be..b7e1339b25 100644 --- a/pypeit/core/tracewave.py +++ b/pypeit/core/tracewave.py @@ -117,7 +117,7 @@ def tilts_find_lines(arc_spec, slit_cen, tracethresh=10.0, sig_neigh=5.0, nfwhm_ lines_spec = arcdet[aduse] nlines = len(lines_spec) if nlines == 0: - msgs.warn('No arc lines were deemed usable on this slit; line tilts cannot be computed.' + msgs.warning('No arc lines were deemed usable on this slit; line tilts cannot be computed.' ' This may be a bad slit, which you can remove. Otherwise, try lowering ' 'the tracethresh parameter.') return None, None, None @@ -489,7 +489,7 @@ def trace_tilts_work(arcimg, lines_spec, lines_spat, thismask, slit_cen, inmask= # cause a full fault of the code, we need to make sure the user # sees these kinds of critical failures instead of them getting # buried in all the other messages. - msgs.warn('Too many lines rejected in this slit/order.' + msgs.newline() + msgs.warning('Too many lines rejected in this slit/order.' + msgs.newline() + 'Would reject {0}/{1} lines (more than 95%).'.format(nlines - nuse, nlines) + msgs.newline() + 'Proceeding without rejection, but reduction likely bogus.') use_tilt = np.ones(nlines, dtype=bool) @@ -629,7 +629,7 @@ def trace_tilts(arcimg, lines_spec, lines_spat, thismask, slit_cen, inmask=None, if nuse < 2: # DP: Added this because sometime there are < 2 usable arc lines for tilt tracing, PCA fit does not work # and the reduction crushes - msgs.warn('Less than 2 usable arc lines for tilts. NO PCA modeling!') + msgs.warning('Less than 2 usable arc lines for tilts. NO PCA modeling!') return trace_dict0 else: bpm = np.ones(trace_dict0['tilts_sub_fit'].shape, dtype=bool) diff --git a/pypeit/core/transform.py b/pypeit/core/transform.py index d1b916b697..2fcc3ad187 100644 --- a/pypeit/core/transform.py +++ b/pypeit/core/transform.py @@ -98,7 +98,7 @@ def affine_transform_matrix(scale=None, rotation=None, translation=None): elif _s.size == 2: sx, sy = scale else: - msgs.error('Scale must be a single scalar or a two-element array.') + raise PypeItError('Scale must be a single scalar or a two-element array.') tform[0,0] = float(sx) tform[1,1] = float(sy) if rotation is not None: @@ -107,7 +107,7 @@ def affine_transform_matrix(scale=None, rotation=None, translation=None): if translation is not None: _t = np.atleast_1d(translation) if _t.size != 2: - msgs.error('Translation must be a two-element array.') + raise PypeItError('Translation must be a two-element array.') tform[0:2,2] = translation return tform @@ -172,9 +172,9 @@ def coordinate_transform_2d(coo, matrix, inverse=False): """ _coo = np.atleast_2d(coo) if _coo.ndim != 2: - msgs.error('Coordinate array must be 2D.') + raise PypeItError('Coordinate array must be 2D.') if _coo.shape[1] != 2: - msgs.error('Coordinate array must have 2D coordinates along the last axis.') + raise PypeItError('Coordinate array must have 2D coordinates along the last axis.') ncoo = _coo.shape[0] _m = np.linalg.inv(matrix) if inverse else matrix return (np.column_stack((_coo, np.ones(ncoo, dtype=_coo.dtype))) @ _m.T)[:,:2] diff --git a/pypeit/core/wave.py b/pypeit/core/wave.py index bd623d5711..913e47bfe6 100644 --- a/pypeit/core/wave.py +++ b/pypeit/core/wave.py @@ -116,7 +116,7 @@ def geomotion_velocity(time, skycoord, frame="heliocentric"): # Check that the RA/DEC of the object is ICRS compatible if not skycoord.is_transformable_to(ICRS()): - msgs.error("Cannot transform RA/DEC of object to the ICRS") + raise PypeItError("Cannot transform RA/DEC of object to the ICRS") # Calculate ICRS position and velocity of Earth's geocenter ep, ev = solar_system.get_body_barycentric_posvel('earth', time) diff --git a/pypeit/core/wavecal/autoid.py b/pypeit/core/wavecal/autoid.py index 415b02c239..02b14bc892 100644 --- a/pypeit/core/wavecal/autoid.py +++ b/pypeit/core/wavecal/autoid.py @@ -508,10 +508,10 @@ def reidentify(spec, spec_arxiv_in, wave_soln_arxiv_in, line_list, if spec.ndim == 1: nspec = spec.size else: - msgs.error('spec must be a one dimensional numpy array ') + raise PypeItError('spec must be a one dimensional numpy array ') if spec_arxiv_in.ndim != wave_soln_arxiv_in.ndim: - msgs.error('spec arxiv and wave_soln_arxiv must have the same dimensions') + raise PypeItError('spec arxiv and wave_soln_arxiv must have the same dimensions') if spec_arxiv_in.ndim == 1: spec_arxiv1 = spec_arxiv_in.reshape(spec_arxiv_in.size,1) @@ -520,7 +520,7 @@ def reidentify(spec, spec_arxiv_in, wave_soln_arxiv_in, line_list, spec_arxiv1 = spec_arxiv_in.copy() wave_soln_arxiv1 = wave_soln_arxiv_in.copy() else: - msgs.error('Unrecognized shape for spec_arxiv. It must be either a one dimensional or two dimensional numpy array') + raise PypeItError('Unrecognized shape for spec_arxiv. It must be either a one dimensional or two dimensional numpy array') # TODO: JFH I would like to take these calls out. This reidentify code should only ever be run by comparing # data with the same binning. That would then allow me to drop the requirement that this code operate @@ -536,7 +536,7 @@ def reidentify(spec, spec_arxiv_in, wave_soln_arxiv_in, line_list, xrng = np.arange(nspec) if nspec_arxiv != nspec: - msgs.error('Spectrum sizes do not match. Something is very wrong!') + raise PypeItError('Spectrum sizes do not match. Something is very wrong!') use_spec = spec # Continuum subtract the arc spectrum @@ -603,7 +603,7 @@ def reidentify(spec, spec_arxiv_in, wave_soln_arxiv_in, line_list, msgs.info(f'shift = {shift_vec[iarxiv]:5.3f}, stretch = {stretch_vec[iarxiv]:5.3f}, cc = {ccorr_vec[iarxiv]:5.3f}') # If cc < cc_thresh or if this optimization failed, don't reidentify from this arxiv spectrum if success != 1: - msgs.warn('Global cross-correlation failed or cc 0.)[0] if len(gd_det) < 2: - msgs.warn("Not enough useful IDs") + msgs.warning("Not enough useful IDs") wvcalib[str(slit)] = None continue # Fit @@ -1300,7 +1300,7 @@ def full_template(spec, lamps, par, ok_mask, det, binspectral, nsnippet=2, slit_ try: sv_IDs.append(patt_dict['IDs']) except KeyError: - msgs.warn("Failed to perform wavelength calibration in reidentify..") + msgs.warning("Failed to perform wavelength calibration in reidentify..") sv_IDs.append(np.zeros_like(detections)) else: # Save now in case the next one barfs @@ -1311,7 +1311,7 @@ def full_template(spec, lamps, par, ok_mask, det, binspectral, nsnippet=2, slit_ IDs = np.concatenate(sv_IDs) gd_det = np.where(IDs > 0.)[0] if len(gd_det) < 2: - msgs.warn("Not enough useful IDs") + msgs.warning("Not enough useful IDs") wvcalib[str(slit)] = None continue # get n_final for this slit @@ -1409,21 +1409,21 @@ def echelle_wvcalib(spec, orders, spec_arxiv, wave_arxiv, lamps, par, # Check input if not isinstance(par, pypeitpar.WavelengthSolutionPar): - msgs.error('Input parameters must be provided by a WavelengthSolutionPar object.') + raise PypeItError('Input parameters must be provided by a WavelengthSolutionPar object.') if spec.ndim != 2: - msgs.error('Input spec must be a 2D numpy array!') + raise PypeItError('Input spec must be a 2D numpy array!') nspec, norders = spec.shape if orders.size != norders: - msgs.error('Number of provided orders does not match the number of provided spectra.') + raise PypeItError('Number of provided orders does not match the number of provided spectra.') # Mask info ok_mask = np.arange(norders) if ok_mask is None else ok_mask if np.amax(ok_mask) >= norders: - msgs.error('Spectrum selected by ok_mask is beyond the limits of the provided ' + raise PypeItError('Spectrum selected by ok_mask is beyond the limits of the provided ' 'spec array.') # Load the line lists @@ -1443,12 +1443,12 @@ def echelle_wvcalib(spec, orders, spec_arxiv, wave_arxiv, lamps, par, continue # ToDO should we still be populating wave_calib with an empty dict here? if iord not in ok_mask: - msgs.warn(f"Skipping order = {orders[iord]} ({iord+1}/{norders}) because masked") + msgs.warning(f"Skipping order = {orders[iord]} ({iord+1}/{norders}) because masked") wv_calib[str(iord)] = None all_patt_dict[str(iord)] = None continue if np.all(spec_arxiv[:, iord] == 0.0): - msgs.warn(f"Order = {orders[iord]} ({iord+1}/{norders}) cannot be reidentified " + msgs.warning(f"Order = {orders[iord]} ({iord+1}/{norders}) cannot be reidentified " f"because this order is not present in the arxiv") wv_calib[str(iord)] = None all_patt_dict[str(iord)] = None @@ -1476,7 +1476,7 @@ def echelle_wvcalib(spec, orders, spec_arxiv, wave_arxiv, lamps, par, if not all_patt_dict[str(iord)]['acceptable']: wv_calib[str(iord)] = None bad_orders = np.append(bad_orders, iord) - msgs.warn(msgs.newline() + '---------------------------------------------------' + msgs.newline() + + msgs.warning(msgs.newline() + '---------------------------------------------------' + msgs.newline() + f'Reidentify report for order = {orders[iord]:d} ({iord+1:d}/{norders:d}):' + msgs.newline() + f' Cross-correlation failed' + msgs.newline() + '---------------------------------------------------') @@ -1497,14 +1497,14 @@ def echelle_wvcalib(spec, orders, spec_arxiv, wave_arxiv, lamps, par, # This pattern wasn't good enough wv_calib[str(iord)] = None bad_orders = np.append(bad_orders, iord) - msgs.warn(msgs.newline() + '---------------------------------------------------' + msgs.newline() + + msgs.warning(msgs.newline() + '---------------------------------------------------' + msgs.newline() + f'Reidentify report for order = {orders[iord]:d} ({iord+1:d}/{norders:d}):' + msgs.newline() + f' Final fit failed' + msgs.newline() + '---------------------------------------------------') continue # Is the RMS below the threshold? if final_fit['rms'] > rms_thresh: - msgs.warn(msgs.newline() + '---------------------------------------------------' + msgs.newline() + + msgs.warning(msgs.newline() + '---------------------------------------------------' + msgs.newline() + f'Reidentify report for order = {orders[iord]:d} ({iord+1:d}/{norders:d}):' + msgs.newline() + f' Poor RMS ({final_fit["rms"]:.3f})! Need to add additional spectra to arxiv to improve fits' + msgs.newline() + '---------------------------------------------------') @@ -1563,7 +1563,7 @@ def report_final(nslits, all_patt_dict, detections, continue st = str(slit) if slit not in ok_mask or slit in bad_slits or all_patt_dict[st] is None or wv_calib[st] is None: - msgs.warn(badmsg) + msgs.warning(badmsg) continue if all_patt_dict[st]['sign'] == +1: @@ -1660,11 +1660,11 @@ def __init__(self, spec, lamps, par, ech_fixed_format=False, ok_mask=None, # Check input if not isinstance(par, pypeitpar.WavelengthSolutionPar): - msgs.error('Input parameters must be provided by a WavelengthSolutionPar object.') + raise PypeItError('Input parameters must be provided by a WavelengthSolutionPar object.') # TODO: Do we need ech_fix_format if we have # spectrograph.pypeline, assuming we keep passing spectrograph? if ech_fixed_format and orders is None: - msgs.error('If the specrograph is a fixed-format echelle (ech_fix_format is True), ' + raise PypeItError('If the specrograph is a fixed-format echelle (ech_fix_format is True), ' 'the orders must be provided.') # TODO: What does and does not need to be an attribute? @@ -1682,10 +1682,10 @@ def __init__(self, spec, lamps, par, ech_fixed_format=False, ok_mask=None, self.nspec = spec.size self.nslits = 1 else: - msgs.error('Input spec must be a 1D or 2D numpy array!') + raise PypeItError('Input spec must be a 1D or 2D numpy array!') if orders is not None and orders.size != self.nslits: - msgs.error('Number of provided orders does not match the number of provided spectra.') + raise PypeItError('Number of provided orders does not match the number of provided spectra.') self.par = par self.lamps = lamps @@ -1694,7 +1694,7 @@ def __init__(self, spec, lamps, par, ech_fixed_format=False, ok_mask=None, # Mask info self.ok_mask = np.arange(self.nslits) if ok_mask is None else ok_mask if np.amax(ok_mask) >= self.nslits: - msgs.error('Spectrum selected by ok_mask is beyond the limits of the provided ' + raise PypeItError('Spectrum selected by ok_mask is beyond the limits of the provided ' 'spec array.') # List of bad slits self.bad_slits = [] @@ -1735,7 +1735,7 @@ def __init__(self, spec, lamps, par, ech_fixed_format=False, ok_mask=None, narxiv -=1 #if self.ech_fix_format and (self.nslits != narxiv): - # msgs.error('You have set ech_fix_format = True, but nslits={:d} != narxiv={:d}'.format(self.nslits,narxiv) + '.' + + # raise PypeItError('You have set ech_fix_format = True, but nslits={:d} != narxiv={:d}'.format(self.nslits,narxiv) + '.' + # msgs.newline() + 'The number of orders identified does not match the number of solutions in the arxiv') # @@ -1796,7 +1796,7 @@ def __init__(self, spec, lamps, par, ech_fixed_format=False, ok_mask=None, if not self.all_patt_dict[str(slit)]['acceptable']: self.wv_calib[str(slit)] = None self.bad_slits = np.append(self.bad_slits, slit) - msgs.warn('---------------------------------------------------' + msgs.newline() + + msgs.warning('---------------------------------------------------' + msgs.newline() + 'Reidentify report for slit {0:d}/{1:d}'.format(slit, self.nslits-1) + order_str + msgs.newline() + ' Cross-correlation failed' + msgs.newline() + '---------------------------------------------------') @@ -1814,14 +1814,14 @@ def __init__(self, spec, lamps, par, ech_fixed_format=False, ok_mask=None, # This pattern wasn't good enough self.wv_calib[str(slit)] = None self.bad_slits = np.append(self.bad_slits, slit) - msgs.warn('---------------------------------------------------' + msgs.newline() + + msgs.warning('---------------------------------------------------' + msgs.newline() + 'Reidentify report for slit {0:d}/{1:d}'.format(slit, self.nslits-1) + order_str + msgs.newline() + ' Final fit failed' + msgs.newline() + '---------------------------------------------------') continue # Is the RMS below the threshold? if final_fit['rms'] > rms_thresh: - msgs.warn('---------------------------------------------------' + msgs.newline() + + msgs.warning('---------------------------------------------------' + msgs.newline() + 'Reidentify report for slit {0:d}/{1:d}'.format(slit, self.nslits-1) + order_str + msgs.newline() + ' Poor RMS ({0:.3f})! Need to add additional spectra to arxiv to improve fits'.format( final_fit['rms']) + msgs.newline() + @@ -2111,7 +2111,7 @@ def run_brute(self, min_nlines=10): # Were there enough lines? This mainly deals with junk slits if self._all_tcent.size < min_nlines: - msgs.warn("Not enough lines to identify in slit {0:d}!".format(slit+1)) + msgs.warning("Not enough lines to identify in slit {0:d}!".format(slit+1)) self._det_weak[str(slit)] = [None,None] self._det_stro[str(slit)] = [None,None] # Remove from ok mask @@ -2149,7 +2149,7 @@ def run_brute(self, min_nlines=10): obad_slits = bad_slits.copy() cntr += 1 if cntr > 10: - msgs.warn("Breaking while loop before convergence. Check the wavelength solution!") + msgs.warning("Breaking while loop before convergence. Check the wavelength solution!") break # With these updates to the fits of each slit, determine the final fit. @@ -2221,7 +2221,7 @@ def run_kdtree(self, polygon=4, detsrch=7, lstsrch=10, pixtol=5): wvutils.arc_lines_from_spec(self._spec[:, slit], sigdetect=sigdetect, fwhm=fwhm, nonlinear_counts = self._nonlinear_counts) if self._all_tcent.size == 0: - msgs.warn("No lines to identify in slit {0:d}!".format(slit+ 1)) + msgs.warning("No lines to identify in slit {0:d}!".format(slit+ 1)) continue # Save the detections @@ -2254,7 +2254,7 @@ def run_kdtree(self, polygon=4, detsrch=7, lstsrch=10, pixtol=5): patternp, indexp = kdtree_generator.hexagon(use_tcentp, detsrch, maxlinear) patternm, indexm = kdtree_generator.hexagon(use_tcentm, detsrch, maxlinear) else: - msgs.warn("Patterns can only be generated with 3 <= polygon <= 6") + msgs.warning("Patterns can only be generated with 3 <= polygon <= 6") return None dettreep = scipy.spatial.cKDTree(patternp, leafsize=30) @@ -2431,7 +2431,7 @@ def cross_match(self, good_fit, detections): if bs not in self._ok_mask: continue if detections[str(bs)][0] is None: # No detections at all; slit is hopeless - msgs.warn('Slit {:d}'.format(bs) + ' has no arc line detections. Likely this slit is junk!') + msgs.warning('Slit {:d}'.format(bs) + ' has no arc line detections. Likely this slit is junk!') self._bad_slits.append(bs) continue @@ -2457,7 +2457,7 @@ def cross_match(self, good_fit, detections): cc_thresh=cc_thresh, fwhm=fwhm, debug=self._debug, stretch_func=self._par['stretch_func']) if success != 1: - msgs.warn('cross-correlation failed or cc rms_thresh: - msgs.warn('---------------------------------------------------' + msgs.newline() + + msgs.warning('---------------------------------------------------' + msgs.newline() + 'Cross-match report for slit {0:d}/{1:d}:'.format(bs + 1, self._nslit) + msgs.newline() + ' Poor RMS ({0:.3f})! Will try cross matching iteratively'.format(final_fit['rms']) + msgs.newline() + '---------------------------------------------------') @@ -2708,7 +2708,7 @@ def cross_match(self, good_fit, detections): # # Check if a solution was found # if not patt_dict['acceptable']: # new_bad_slits = np.append(new_bad_slits, slit) - # msgs.warn('---------------------------------------------------' + msgs.newline() + + # msgs.warning('---------------------------------------------------' + msgs.newline() + # 'Cross-match report for slit {0:d}/{1:d}:'.format(slit, self._nslit-1) + msgs.newline() + # ' Lines could not be identified! Will try cross matching iteratively' + msgs.newline() + # '---------------------------------------------------') @@ -2717,13 +2717,13 @@ def cross_match(self, good_fit, detections): # if final_fit is None: # # This pattern wasn't good enough # new_bad_slits = np.append(new_bad_slits, slit) - # msgs.warn('---------------------------------------------------' + msgs.newline() + + # msgs.warning('---------------------------------------------------' + msgs.newline() + # 'Cross-match report for slit {0:d}/{1:d}:'.format(slit, self._nslit-1) + msgs.newline() + # ' Fit was not good enough! Will try cross matching iteratively' + msgs.newline() + # '---------------------------------------------------') # continue # if final_fit['rms'] > rms_thresh: - # msgs.warn('---------------------------------------------------' + msgs.newline() + + # msgs.warning('---------------------------------------------------' + msgs.newline() + # 'Cross-match report for slit {0:d}/{1:d}:'.format(slit, self._nslit-1) + msgs.newline() + # ' Poor RMS ({0:.3f})! Will try cross matching iteratively'.format(final_fit['rms']) + msgs.newline() + # '---------------------------------------------------') @@ -2801,13 +2801,13 @@ def get_use_tcent_old(self, corr, cut=True, arr_err=None, weak=False): arr = self._all_tcent_weak.copy()[self._icut_weak] err = self._all_ecent_weak.copy()[self._icut_weak] else: - msgs.error('CODING ERROR: Cut must be True') + raise PypeItError('CODING ERROR: Cut must be True') else: if cut: arr = self._all_tcent.copy()[self._icut] err = self._all_ecent.copy()[self._icut] else: - msgs.error('CODING ERROR: Cut must be True') + raise PypeItError('CODING ERROR: Cut must be True') else: arr, err = arr_err[0], arr_err[1] # Return the appropriate tcent @@ -2885,7 +2885,7 @@ def results_brute(self, tcent_ecent, poly=3, pix_tol=0.5, detsrch=5, lstsrch=5, elif poly == 4: from pypeit.core.wavecal.patterns import quadrangles as generate_patterns else: - msgs.warn("Pattern matching is only available for trigons and tetragons.") + msgs.warning("Pattern matching is only available for trigons and tetragons.") return None, None if wavedata is None: @@ -3215,14 +3215,14 @@ def report_prelim(self, slit, best_patt_dict, best_final_fit): good_fit = False # Report on the best preliminary result if best_final_fit is None: - msgs.warn('---------------------------------------------------' + msgs.newline() + + msgs.warning('---------------------------------------------------' + msgs.newline() + 'Preliminary report for slit {0:d}/{1:d}:'.format(slit+1, self._nslit) + msgs.newline() + ' No matches! Attempting to cross match.' + msgs.newline() + '---------------------------------------------------') self._all_patt_dict[str(slit)] = None self._all_final_fit[str(slit)] = None elif best_final_fit['rms'] > rms_thresh: - msgs.warn('---------------------------------------------------' + msgs.newline() + + msgs.warning('---------------------------------------------------' + msgs.newline() + 'Preliminary report for slit {0:d}/{1:d}:'.format(slit+1, self._nslit) + msgs.newline() + ' Poor RMS ({0:.3f})! Attempting to cross match.'.format(best_final_fit['rms']) + msgs.newline() + '---------------------------------------------------') @@ -3258,10 +3258,10 @@ def report_final(self): badmsg = '---------------------------------------------------' + msgs.newline() +\ 'Final report for slit {0:d}/{1:d}:'.format(slit+1, self._nslit) + msgs.newline() if slit not in self._ok_mask: - msgs.warn(badmsg + 'Masked slit ignored') + msgs.warning(badmsg + 'Masked slit ignored') continue if self._all_patt_dict[str(slit)] is None: - msgs.warn(badmsg + ' Wavelength calibration not performed!') + msgs.warning(badmsg + ' Wavelength calibration not performed!') continue st = str(slit) if self._all_patt_dict[st]['sign'] == +1: diff --git a/pypeit/core/wavecal/waveio.py b/pypeit/core/wavecal/waveio.py index 30220407da..580d0b198f 100644 --- a/pypeit/core/wavecal/waveio.py +++ b/pypeit/core/wavecal/waveio.py @@ -31,7 +31,7 @@ def load_wavelength_calibration(filename: pathlib.Path) -> dict: Lists read from the json file are returnes as numpy arrays. """ if not filename.is_file(): - msgs.error(f"File does not exist: {filename}") + raise PypeItError(f"File does not exist: {filename}") # Force any possible pathlib.Path object to string before `loadjson` wv_calib = linetools.utils.loadjson(str(filename)) @@ -159,7 +159,7 @@ def load_reid_arxiv(arxiv_file): wv_calib_arxiv[str(irow)]['order'] = wv_tbl['order'][irow] else: - msgs.error(f"Not ready for this `reid_arxiv` extension: {arxiv_fmt}") + raise PypeItError(f"Not ready for this `reid_arxiv` extension: {arxiv_fmt}") return wv_calib_arxiv, par @@ -223,7 +223,7 @@ def load_line_lists(lamps, all=False, include_unknown:bool=False, restrict_on_in msgs.info(f"Arc lamps used: {', '.join(lamps)}") # Read standard files # NOTE: If one of the `lamps` does not exist, dataPaths.linelist.get_file_path() - # will exit with msgs.error(). + # will exit with raise PypeItError(). lists = [load_line_list(dataPaths.linelist.get_file_path(f'{lamp}_lines.dat')) for lamp in lamps] # Stack diff --git a/pypeit/core/wavecal/wv_fitting.py b/pypeit/core/wavecal/wv_fitting.py index e4d99a2839..10211bdb8c 100644 --- a/pypeit/core/wavecal/wv_fitting.py +++ b/pypeit/core/wavecal/wv_fitting.py @@ -149,7 +149,7 @@ def to_hdu(self, **kwargs): """ if 'force_to_bintbl' in kwargs: if not kwargs['force_to_bintbl']: - msgs.warn(f'{self.__class__.__name__} objects must always use ' + msgs.warning(f'{self.__class__.__name__} objects must always use ' 'force_to_bintbl = True!') kwargs.pop('force_to_bintbl') return super().to_hdu(force_to_bintbl=True, **kwargs) @@ -278,7 +278,7 @@ def fit_slit(spec, patt_dict, tcent, line_lists, vel_tol = 1.0, outroot=None, sl # Check that patt_dict and tcent refer to each other if patt_dict['mask'].shape != tcent.shape: - msgs.error('patt_dict and tcent do not refer to each other. Something is very wrong') + raise PypeItError('patt_dict and tcent do not refer to each other. Something is very wrong') # Perform final fit to the line IDs if thar: @@ -391,7 +391,7 @@ def iterative_fitting(spec, tcent, ifit, IDs, llist, dispersion, maxiter = xfit.size - n_order - 2 # if xfit.size == 0: - msgs.warn("All points rejected !!") + msgs.warning("All points rejected !!") return None # Fit pypeitFit = fitting.robust_fit(xfit/xnspecmin1, yfit, n_order, function=func, maxiter=maxiter, @@ -399,7 +399,7 @@ def iterative_fitting(spec, tcent, ifit, IDs, llist, dispersion, minx=fmin, maxx=fmax, weights=wfit) # Junk fit? if pypeitFit is None: - msgs.warn("Bad fit!!") + msgs.warning("Bad fit!!") return None # RMS is computed from `yfit`, which is the wavelengths of the lines. Convert to pixels. @@ -434,7 +434,7 @@ def iterative_fitting(spec, tcent, ifit, IDs, llist, dispersion, # Final fit (originals can now be rejected) if len(ifit) <= n_final: n_order = len(ifit)-1 - msgs.warn(f'Not enough lines for n_final! Fit order = {n_order}') + msgs.warning(f'Not enough lines for n_final! Fit order = {n_order}') xfit, yfit, wfit = tcent[ifit], all_ids[ifit], weights[ifit] pypeitFit = fitting.robust_fit(xfit/xnspecmin1, yfit, n_order, function=func, diff --git a/pypeit/core/wavecal/wvutils.py b/pypeit/core/wavecal/wvutils.py index fd7d0213c0..866706db41 100644 --- a/pypeit/core/wavecal/wvutils.py +++ b/pypeit/core/wavecal/wvutils.py @@ -78,7 +78,7 @@ def get_delta_wave(wave, wave_gpm, frac_spec_med_filter=0.03): """ # Check input if wave.ndim != 1: - msgs.error('Input wavelength array must be 1D.') + raise PypeItError('Input wavelength array must be 1D.') nspec = wave.size # This needs to be an odd number @@ -123,14 +123,14 @@ def get_sampling(waves, pix_per_R=3.0): elif waves.ndim == 2: waves_out = utils.array_to_explist(waves) else: - msgs.error('Array inputs can only be 1D or 2D') + raise PypeItError('Array inputs can only be 1D or 2D') elif isinstance(waves, list): ndim = np.array([wave.ndim for wave in waves], dtype=int) if np.any(ndim > 1): - msgs.error('Input list can only contain 1D arrays') + raise PypeItError('Input list can only contain 1D arrays') waves_out = waves else: - msgs.error('Input must be a list or numpy.ndarray') + raise PypeItError('Input must be a list or numpy.ndarray') wave_diff_flat = [] dloglam_flat = [] @@ -144,7 +144,7 @@ def get_sampling(waves, pix_per_R=3.0): dloglam = np.median(dloglam_flat) # Check that this won't introduce a divide by zero if dloglam == 0.0: - msgs.error('The wavelength sampling has zero spacing in log wavelength. This is not supported.') + raise PypeItError('The wavelength sampling has zero spacing in log wavelength. This is not supported.') # Compute a guess of the resolution resln_guess = 1.0 / (pix_per_R* dloglam * np.log(10.0)) pix_per_sigma = 1.0 / resln_guess / (dloglam * np.log(10.0)) / (2.0 * np.sqrt(2.0 * np.log(2))) @@ -245,7 +245,7 @@ def get_wave_grid(waves=None, gpms=None, wave_method='linear', iref=0, wave_grid if wave_method in ['velocity', 'log10']: if dv is not None and dloglam is not None: - msgs.error('You can only specify dv or dloglam but not both') + raise PypeItError('You can only specify dv or dloglam but not both') elif dv is not None: dloglam_pix = dv/c_kms/np.log(10.0) elif dloglam is not None: @@ -301,7 +301,7 @@ def get_wave_grid(waves=None, gpms=None, wave_method='linear', iref=0, wave_grid wave_grid = wave_tmp else: - msgs.error("Bad method for wavelength grid: {:s}".format(wave_method)) + raise PypeItError("Bad method for wavelength grid: {:s}".format(wave_method)) if wave_method in ['iref', 'concatenate', 'user_input']: @@ -455,7 +455,7 @@ def zerolag_shift_stretch(theta, y1, y2, stretch_func = 'quadratic'): corr_zero = np.sum(y1*y2_corr) corr_denom = np.sqrt(np.sum(y1*y1)*np.sum(y2_corr*y2_corr)) if corr_denom == 0.0: - msgs.warn('The shifted and stretched spectrum is zero everywhere. Cross-correlation cannot be performed. There is likely a bug somewhere') + msgs.warning('The shifted and stretched spectrum is zero everywhere. Cross-correlation cannot be performed. There is likely a bug somewhere') raise PypeItError() corr_norm = corr_zero / corr_denom return -corr_norm @@ -515,7 +515,7 @@ def get_xcorr_arc(inspec1, sigdetect=5.0, input_thresh=None, sig_ceil=10.0, perc ampl_clip = np.clip(ampl, None, ceil_upper) if ampl_clip.size == 0: - msgs.warn('No lines were detected in the arc spectrum. Cannot create a synthetic arc spectrum for cross-correlation.') + msgs.warning('No lines were detected in the arc spectrum. Cannot create a synthetic arc spectrum for cross-correlation.') return np.zeros_like(inspec1) # Make a fake arc by plopping down Gaussians at the location of every centroided line we found @@ -597,7 +597,7 @@ def xcorr_shift(inspec1, inspec2, percent_ceil=50.0, use_raw_arc=False, sigdetec y1, y2 = inspec1, inspec2 if np.all(y1 == 0) or np.all(y2 == 0): - msgs.warn('One of the input spectra is all zeros. Returning shift = 0.0') + msgs.warning('One of the input spectra is all zeros. Returning shift = 0.0') return 0.0, 0.0 nspec = y1.shape[0] @@ -760,7 +760,7 @@ def xcorr_shift_stretch(inspec1, inspec2, cc_thresh=-1.0, percent_ceil=50.0, use sig_ceil=sig_ceil, fwhm=fwhm) if np.all(y1 == 0) or np.all(y2 == 0): - msgs.warn('No lines detected punting on shift/stretch') + msgs.warning('No lines detected punting on shift/stretch') return 0, None, None, None, None, None, None # Do the cross-correlation first and determine the initial shift @@ -782,12 +782,12 @@ def xcorr_shift_stretch(inspec1, inspec2, cc_thresh=-1.0, percent_ceil=50.0, use bounds = [lag_range, stretch_mnmx, (0.0,0.0)] x0_guess = np.array([shift_cc, 1.0, 0.0]) else: - msgs.error('Unrecognized stretch_func') + raise PypeItError('Unrecognized stretch_func') result = scipy.optimize.differential_evolution( zerolag_shift_stretch, args=(y1,y2), x0=x0_guess, tol=toler, bounds=bounds, disp=False, polish=True, seed=seed) except PypeItError: - msgs.warn("Differential evolution failed.") + msgs.warning("Differential evolution failed.") return 0, None, None, None, None, None, None corr_de = -result.fun shift_de = result.x[0] @@ -796,11 +796,11 @@ def xcorr_shift_stretch(inspec1, inspec2, cc_thresh=-1.0, percent_ceil=50.0, use if not result.success: - msgs.warn('Fit for shift and stretch did not converge!') + msgs.warning('Fit for shift and stretch did not converge!') if(corr_de < corr_cc): # Occasionally the differential evolution crapps out and returns a value worse that the CC value. In these cases just use the cc value - msgs.warn('Shift/Stretch optimizer performed worse than simple x-correlation.' + + msgs.warning('Shift/Stretch optimizer performed worse than simple x-correlation.' + 'Returning simple x-correlation shift and no stretch:' + msgs.newline() + ' Optimizer: corr={:5.3f}, shift={:5.3f}, stretch={:7.5f}'.format(corr_de, shift_de,stretch_de) + msgs.newline() + ' X-corr : corr={:5.3f}, shift={:5.3f}'.format(corr_cc,shift_cc)) diff --git a/pypeit/datamodel.py b/pypeit/datamodel.py index 169c4e599d..7f30e3fa22 100644 --- a/pypeit/datamodel.py +++ b/pypeit/datamodel.py @@ -676,7 +676,7 @@ def __init__(self, d=None): else: break if dc is None: - msgs.error(f'Could not assign dictionary element {key} to datamodel ' + raise PypeItError(f'Could not assign dictionary element {key} to datamodel ' f'for {self.__class__.__name__}.', cls='PypeItDataModelError') setattr(self, key, dc) continue @@ -824,7 +824,7 @@ def _bundle(self, ext=None, transpose_arrays=False): try: d = Table(d) except: - msgs.error(f'Cannot force all elements of {self.__class__.__name__} datamodel' + raise PypeItError(f'Cannot force all elements of {self.__class__.__name__} datamodel' 'into a single-row astropy Table!', cls='PypeItDataModelError') return [d] if ext is None else [{ext:d}] @@ -959,7 +959,7 @@ def _parse(cls, hdu, ext=None, ext_pseudo=None, transpose_table_arrays=False, _ext_pseudo = _ext if ext_pseudo is None else np.atleast_1d(ext_pseudo) if len(_ext_pseudo) != len(_ext): - msgs.error(f'Length of provided extension pseudonym list must match number of ' + raise PypeItError(f'Length of provided extension pseudonym list must match number of ' f'extensions selected: {len(_ext)}.', cls='PypeItDataModelError') str_ext = np.logical_not([isinstance(e, (int, np.integer)) for e in _ext_pseudo]) @@ -971,7 +971,7 @@ def _parse(cls, hdu, ext=None, ext_pseudo=None, transpose_table_arrays=False, # DataContainers that have no data, although such a usage case should be # rare. if np.all([_hdu[e].data is None for e in _ext]): - msgs.warn(f'Extensions to be read by {cls.__name__} have no data!') + msgs.warning(f'Extensions to be read by {cls.__name__} have no data!') # This is so that the returned booleans for reading the # data are not tripped as false! found_data = True @@ -1144,15 +1144,15 @@ def _check_parsed(cls, version_passed, type_passed, chk_version=True): Flag to impose strict version checking. """ if not type_passed: - msgs.error(f'The HDU(s) cannot be parsed by a {cls.__name__} object!', + raise PypeItError(f'The HDU(s) cannot be parsed by a {cls.__name__} object!', cls='PypeItDataModelError') if not version_passed: msg = f'Current version of {cls.__name__} object in code ({cls.version}) ' \ 'does not match version used to write your HDU(s)!' if chk_version: - msgs.error(msg, cls='PypeItDataModelError') + raise PypeItError(msg, cls='PypeItDataModelError') else: - msgs.warn(msg) + msgs.warning(msg) def __getattr__(self, item): """Maps values to attributes. @@ -1382,7 +1382,7 @@ def to_hdu(self, hdr=None, add_primary=False, primary_hdr=None, hdr_keys = np.array([k.upper() for k in self.keys()]) indx = np.isin(hdr_keys, list(_primary_hdr.keys())) if np.sum(indx) > 1: - msgs.error('CODING ERROR: Primary header should not contain keywords that are the ' + raise PypeItError('CODING ERROR: Primary header should not contain keywords that are the ' 'same as the datamodel for {0}.'.format(self.__class__.__name__), cls='PypeItDataModelError') @@ -1392,7 +1392,7 @@ def to_hdu(self, hdr=None, add_primary=False, primary_hdr=None, # with any datamodel keys. if _hdr is not None \ and np.any(np.isin([k.upper() for k in self.keys()], list(_hdr.keys()))): - msgs.error('CODING ERROR: Baseline header should not contain keywords that are the ' + raise PypeItError('CODING ERROR: Baseline header should not contain keywords that are the ' 'same as the datamodel for {0}.'.format(self.__class__.__name__), cls='PypeItDataModelError') diff --git a/pypeit/display/display.py b/pypeit/display/display.py index 62dabd9180..a836167105 100644 --- a/pypeit/display/display.py +++ b/pypeit/display/display.py @@ -73,7 +73,7 @@ def connect_to_ginga(host='localhost', port=grc.default_rc_port, else: break if i == maxiter-1: - msgs.error('Timeout waiting for ginga to start. If window does not appear, type ' + raise PypeItError('Timeout waiting for ginga to start. If window does not appear, type ' f'`ginga --rcport={port} --modules=RC,SlitWavelength` on the command line. In either ' 'case, wait for the ginga viewer to open and try the pypeit command ' 'again.') @@ -82,7 +82,7 @@ def connect_to_ginga(host='localhost', port=grc.default_rc_port, if raise_err: raise ValueError else: - msgs.warn('Problem connecting to Ginga. Launch an RC Ginga viewer and ' + msgs.warning('Problem connecting to Ginga. Launch an RC Ginga viewer and ' f'then continue: \n ginga --rcport={port} --modules=RC,SlitWavelength') # Return @@ -357,7 +357,7 @@ def show_slits(viewer, ch, left, right, slit_ids=None, left_ids=None, right_ids= nspec = _left.shape[0] if _right.shape[0] != nspec: # TODO: Any reason to remove this restriction? - msgs.error('Input left and right edges have different spectral lengths.') + raise PypeItError('Input left and right edges have different spectral lengths.') # Spectral pixel location if spec_vals is not None: @@ -368,16 +368,16 @@ def show_slits(viewer, ch, left, right, slit_ids=None, left_ids=None, right_ids= # Check input if synced: if left.shape != right.shape: - msgs.error('Input left and right traces must have the same shape if they have been ' + raise PypeItError('Input left and right traces must have the same shape if they have been ' 'synchronized into slits.') if left_ids is not None or right_ids is not None: - msgs.warn('For showing synced edges, left and right ID numbers are ignored.') + msgs.warning('For showing synced edges, left and right ID numbers are ignored.') nslits = _left.shape[1] _left_ids = None _right_ids = None _slit_ids = np.arange(nslits) if slit_ids is None else np.atleast_1d(slit_ids) if len(_slit_ids) != nslits: - msgs.error('Incorrect number of slit IDs provided.') + raise PypeItError('Incorrect number of slit IDs provided.') _slit_id_loc = _left + 0.45*(_right - _left) if maskdef_ids is not None and maskdef_ids.size == nslits: _maskdef_ids = np.atleast_1d(maskdef_ids) @@ -386,11 +386,11 @@ def show_slits(viewer, ch, left, right, slit_ids=None, left_ids=None, right_ids= else: _left_ids = -np.arange(nleft) if left_ids is None else np.atleast_1d(left_ids) if len(_left_ids) != nleft: - msgs.error('Incorrect number of left IDs provided.') + raise PypeItError('Incorrect number of left IDs provided.') _left_id_loc = _left*1.05 _right_ids = -np.arange(nright) if right_ids is None else np.atleast_1d(right_ids) if len(_right_ids) != nright: - msgs.error('Incorrect number of right IDs provided.') + raise PypeItError('Incorrect number of right IDs provided.') _right_id_loc = _right*(1-0.05) # Canvas @@ -577,7 +577,7 @@ def show_tilts(viewer, ch, tilt_traces, yoff=0., xoff=0., points=True, nspec=Non """ if tilt_traces is None: - return msgs.error('No tilts have been traced or fitted') + return raise PypeItError('No tilts have been traced or fitted') canvas = viewer.canvas(ch._chname) if clear_canvas: diff --git a/pypeit/edgetrace.py b/pypeit/edgetrace.py index 1b29775571..ae2f246595 100644 --- a/pypeit/edgetrace.py +++ b/pypeit/edgetrace.py @@ -425,11 +425,11 @@ def __init__(self, traceimg, spectrograph, par, qa_path=None, auto=False, debug= # Check input types if not isinstance(traceimg, TraceImage): - msgs.error('Input traceimg must be a TraceImage object.') + raise PypeItError('Input traceimg must be a TraceImage object.') if not isinstance(spectrograph, Spectrograph): - msgs.error('Input spectrograph must be a Spectrograph object.') + raise PypeItError('Input spectrograph must be a Spectrograph object.') if not isinstance(par, EdgeTracePar): - msgs.error('Input par must be an EdgeTracePar object.') + raise PypeItError('Input par must be an EdgeTracePar object.') self.traceimg = traceimg # Input TraceImage self.nspec, self.nspat = self.traceimg.shape # The shape of the trace image @@ -494,7 +494,7 @@ def ntrace(self): def nslits(self): if self.is_synced: return self.ntrace//2 - msgs.error('Number of slits undefined because edges are not left-right synchronized.') + raise PypeItError('Number of slits undefined because edges are not left-right synchronized.') # TODO: Add self.design to the data model when we're ready to match # to the slit-mask design data. @@ -639,7 +639,7 @@ def rectify(self, flux, bpm=None, extract_width=None, mask_threshold=0.5, side=' image and its boolean bad-pixel mask. """ if self.pcatype is None: - msgs.error('Must first run the PCA analysis for the traces; run build_pca.') + raise PypeItError('Must first run the PCA analysis for the traces; run build_pca.') _pca = (self.left_pca if side == 'left' else self.right_pca) \ if self.par['left_right_pca'] else self.pca @@ -770,7 +770,7 @@ def auto_trace(self, bpm=None, debug=0): if debug > 0: self.show(title='After matching to slit-mask design metadata.') if np.all(self.bitmask.flagged(self.edge_msk, self.bitmask.bad_flags)): - msgs.error('All traces masked! Problem with mask-design matching, which may be ' + raise PypeItError('All traces masked! Problem with mask-design matching, which may be ' 'due to spurious edges. Try changing the edge detection threshold ' '(edge_thresh) and troubleshooting the problem using the ' 'pypeit_trace_edges script.') @@ -778,7 +778,7 @@ def auto_trace(self, bpm=None, debug=0): if self.par['auto_pca'] and not self.can_pca() and not self.is_empty and self.par['sync_predict'] == 'pca': # TODO: This causes the code to fault. Maybe there's a way # to catch this earlier on? - msgs.warn('Sync predict cannot use PCA because too few edges were found. If you are ' + msgs.warning('Sync predict cannot use PCA because too few edges were found. If you are ' 'reducing multislit or echelle data, you may need a better trace image or ' 'change the mode used to predict traces (see below). If you are reducing ' 'longslit data, make sure to set the sync_predict parameter to nearest: ' @@ -811,7 +811,7 @@ def auto_trace(self, bpm=None, debug=0): self.order_refine(debug=debug > 1) # Check that the edges are still synced if not self.is_synced: - msgs.error('Traces are no longer synced after adding in missed orders.') + raise PypeItError('Traces are no longer synced after adding in missed orders.') # KBW: Keep this code around for a while. It is the old code that # resynced the edges just after adding in new orders. Nominally, this @@ -909,7 +909,7 @@ def initial_trace(self, bpm=None): # just the ones flagged as BPM? self.tracebpm = self.traceimg.select_flag(flag='BPM') if bpm is None else bpm.astype(bool) if self.tracebpm.shape != self.traceimg.shape: - msgs.error('Mask is not the same shape as the trace image.') + raise PypeItError('Mask is not the same shape as the trace image.') # Lightly smooth the image before using it to trace edges # TODO: Make this filter size a parameter? @@ -975,7 +975,7 @@ def initial_trace(self, bpm=None): # Check that edges were found if np.all(trace_id_img == 0): - msgs.warn('No edges found! Trace data will be empty.') + msgs.warning('No edges found! Trace data will be empty.') self._reinit_trace_data() self.log = [inspect.stack()[0][3]] return @@ -1041,7 +1041,7 @@ def _parse_exclude_regions(self): and ending pixels of the regions to exclude in this detector """ if self.par['exclude_regions'] is None: - msgs.error('No regions to exclude have been provided. ' + raise PypeItError('No regions to exclude have been provided. ' 'To do so, see parameter `exclude_regions` in `EdgeTracePar`') # create the arrays with det, starting pixels and ending pixels @@ -1209,7 +1209,7 @@ def from_hdu(cls, hdu, hdu_prefix=None, chk_version=True): # Check if there should be any PCAs parsed_pcas = np.any(['PCA' in h for h in parsed_hdus]) if d['pcatype'] is not None and not parsed_pcas: - msgs.error('CODING ERROR: Expect to parse PCA headers if pcatype is present.') + raise PypeItError('CODING ERROR: Expect to parse PCA headers if pcatype is present.') # Instantiate the TracePCAs using the appropriate hdus. if d['pcatype'] is not None: @@ -1253,7 +1253,7 @@ def from_hdu(cls, hdu, hdu_prefix=None, chk_version=True): # Check the bitmasks hdr_bitmask = BitMask.from_header(hdu['SOBELSIG'].header) if chk_version and hdr_bitmask.bits != self.bitmask.bits: - msgs.error('The bitmask in this fits file appear to be out of date! Recreate this ' + raise PypeItError('The bitmask in this fits file appear to be out of date! Recreate this ' 'file by re-running the relevant script or set chk_version=False.', cls='PypeItBitMaskError') @@ -1335,7 +1335,7 @@ def show(self, include_error=False, thin=10, in_ginga=False, include_img=True, ginga). If None, plot is not given a title. """ if include_img and include_sobel: - msgs.error('Cannot show both the trace image and the filtered version.') + raise PypeItError('Cannot show both the trace image and the filtered version.') # TODO: Clean and consolidate the objects needed for either the # ginga or matplotlib methods so that this isn't as onerous. @@ -1380,7 +1380,7 @@ def show(self, include_error=False, thin=10, in_ginga=False, include_img=True, # Use the provided SlitTraceSet _include_error = False if include_error: - msgs.warn('SlitTraceSet object has no errors.') + msgs.warning('SlitTraceSet object has no errors.') left, right, _ = slits.select_edges() # original=original) cen = np.hstack((left,right)) fit = cen @@ -1530,7 +1530,7 @@ def qa_plot(self, fileroot=None, min_spat=20): trace. """ if self.is_empty: - msgs.error('No traces for QA plot.') + raise PypeItError('No traces for QA plot.') # Restore matplotlib defaults # TODO: Is this going to screw up later plots? @@ -1669,7 +1669,7 @@ def _side_dependent_sobel(self, side): bpm=self.tracebpm, boxcar=boxcar, side='right') return self.sobelsig_right - msgs.error('Side must be left or right.') + raise PypeItError('Side must be left or right.') def centroid_refine(self, follow=True, start_indx=None, continuous=False, use_fit=False): """ @@ -1761,11 +1761,11 @@ def centroid_refine(self, follow=True, start_indx=None, continuous=False, use_fi # Check that there are traces to refine! if self.is_empty: # TODO: Continue to have this fault? - msgs.error('No traces are defined.') + raise PypeItError('No traces are defined.') # Check input if use_fit and self.edge_fit is None: - msgs.error('No fit data available.') + raise PypeItError('No fit data available.') # Parse parameters and report width = 2 * self.par['fwhm_uniform'] @@ -1837,7 +1837,7 @@ def centroid_refine(self, follow=True, start_indx=None, continuous=False, use_fi # Something has gone wrong # TODO: Get rid of this when convinced it won't # get tripped... - msgs.error('Traces remain but could not select good starting position.') + raise PypeItError('Traces remain but could not select good starting position.') ## TODO row and column should not be used here in the output. Adopt the PypeIt convention spec, spat msgs.info('Following {0} {1} edge(s) '.format(np.sum(to_trace), side) @@ -1925,13 +1925,13 @@ def trace_pixels_off_detector(self, cen=None): """ buff = 0 if self.par['det_buffer'] is None else self.par['det_buffer'] if buff < 0: - msgs.warn('Detector buffer must be >=0 (input was {0}). Setting buffer to 0.'.format( + msgs.warning('Detector buffer must be >=0 (input was {0}). Setting buffer to 0.'.format( self.par['det_buffer'])) buff = 0 if cen is None: cen = self.edge_cen if cen is None: - msgs.error('No trace locations!') + raise PypeItError('No trace locations!') return (cen < buff) | (cen > self.nspat - buff) def check_traces(self, cen=None, msk=None, subset=None, min_spatial=None, max_spatial=None, @@ -2002,7 +2002,7 @@ def check_traces(self, cen=None, msk=None, subset=None, min_spatial=None, max_sp """ if self.is_empty: - msgs.warn('No traces to check.') + msgs.warning('No traces to check.') # Indices of traces to check indx = np.ones(self.ntrace, dtype=bool) if subset is None else subset @@ -2128,12 +2128,12 @@ def merge_traces(self, merge_frac=0.5, refit=True, debug=False): True. """ if self.is_empty: - msgs.warn('No traces to merge.') + msgs.warning('No traces to merge.') if self.edge_fit is None: - msgs.error('Trace merging requires model fits to the trace location; run fit_refine.') + raise PypeItError('Trace merging requires model fits to the trace location; run fit_refine.') _refit = refit if refit and self.edge_fit is None: - msgs.warn('No previous fits existed, so fitting will not be redone.') + msgs.warning('No previous fits existed, so fitting will not be redone.') _refit = False # Construct the bad pixel mask depending whether we matching @@ -2197,14 +2197,14 @@ def merge_traces(self, merge_frac=0.5, refit=True, debug=False): def is_left(self): """Boolean array selecting the left traces.""" if self.is_empty: - msgs.error('No traces!') + raise PypeItError('No traces!') return self.traceid < 0 @property def is_right(self): """Boolean array selecting the right traces.""" if self.is_empty: - msgs.error('No traces!') + raise PypeItError('No traces!') return self.traceid > 0 @property @@ -2233,7 +2233,7 @@ def good_traces(self, include_box=False, good_orders=False): trace},)` flagging good traces. """ if self.spectrograph.pypeline == 'Echelle' and good_orders and self.orderid is None: - msgs.warn('Orders undefined! Selecting all traces. To select good orders only, first ' + msgs.warning('Orders undefined! Selecting all traces. To select good orders only, first ' 'run match_order().') bad_flags = self.bitmask.bad_flags exclude = self.bitmask.insert_flags @@ -2289,7 +2289,7 @@ def _masked_single_slit(self, trace_cen): if self.ntrace != 2: raise ValueError('Coding error: Should only get here if there are two traces.') - msgs.warn('The single slit found has been rejected because it is too short. If this ' + msgs.warning('The single slit found has been rejected because it is too short. If this ' 'was by mistake, re-run pypeit with a smaller `minimum_slit_length` parameter.' ' Otherwise, we assume this is a long-slit with one edge off the detector ' 'and with the current slit edges errantly isolating some feature in the data.') @@ -2297,12 +2297,12 @@ def _masked_single_slit(self, trace_cen): # TODO: May want to limit the number of columns included in this calculation. if np.mean(self.traceimg.image[:,int(np.ceil(np.max(trace_cen[:,1]))):]) \ > np.mean(self.traceimg.image[:,:int(np.floor(np.min(trace_cen[:,0])))]): - msgs.warn('The mean of the trace image to the right of the right trace is larger ' + msgs.warning('The mean of the trace image to the right of the right trace is larger ' 'than it is to the left of the left trace. Removing the right trace and ' 're-synchronizing.') self.remove_traces(np.array([False,True])) else: - msgs.warn('The mean of the trace image to the left of the left trace is larger than ' + msgs.warning('The mean of the trace image to the left of the left trace is larger than ' 'it is to the right of the right trace. Removing the right trace and ' 're-synchronizing.') self.remove_traces(np.array([True,False])) @@ -2331,7 +2331,7 @@ def _flag_edges(self, trace_cen, indx, flg): # the potential to yield an infinite loop, but it's # also the simplest approach. return self._masked_single_slit(trace_cen) - msgs.warn('All slits have been flagged!') + msgs.warning('All slits have been flagged!') if np.any(indx): msgs.info(f'Flagging {np.sum(indx)//2} slits as {flg}!') self.edge_msk[:,indx] = self.bitmask.turn_on(self.edge_msk[:,indx], flg) @@ -2404,13 +2404,13 @@ def check_synced(self, rebuild_pca=False): re-syncronized. """ if self.is_empty: - msgs.warn('No traces to check.') + msgs.warning('No traces to check.') return # Decide if the PCA should be rebuilt _rebuild_pca = rebuild_pca and self.pcatype is not None and self.can_pca() if rebuild_pca and not _rebuild_pca: - msgs.warn('Rebuilding the PCA was requested but is not possible.') + msgs.warning('Rebuilding the PCA was requested but is not possible.') # Remove any fully masked traces and its synced counterpart; # force the removal of traces marked as SYNCERROR, even if @@ -2432,7 +2432,7 @@ def check_synced(self, rebuild_pca=False): # Check the slits are synced if not self.is_synced: - msgs.error('Edge traces are not yet (or improperly) synced. Either sync() failed ' + raise PypeItError('Edge traces are not yet (or improperly) synced. Either sync() failed ' 'or has not yet been executed.') # Parse parameters and report @@ -2469,7 +2469,7 @@ def check_synced(self, rebuild_pca=False): f'Range in slit length limited to +/-{length_rtol*100:.1f}%') if length_rtol is None and self.par['overlap']: - msgs.warn('Overlap keyword ignored! Must set length_range to identify abnormally ' + msgs.warning('Overlap keyword ignored! Must set length_range to identify abnormally ' 'short slits.') # TODO: Should here and below only use the unmasked parts of @@ -2488,7 +2488,7 @@ def check_synced(self, rebuild_pca=False): # TODO: This should never happen, but keep this around # until we're sure it doesn't. if self.is_empty: - msgs.error('Coding error: Removing gaps removed all traces.') + raise PypeItError('Coding error: Removing gaps removed all traces.') # Reset the trace center data to use trace_cen = self.edge_cen if self.edge_fit is None else self.edge_fit @@ -2608,7 +2608,7 @@ def check_synced(self, rebuild_pca=False): # Remove 'em self.remove_traces(rmtrace, rebuild_pca=_rebuild_pca) if self.is_empty: - msgs.warn('Assuming a single long-slit and continuing.') + msgs.warning('Assuming a single long-slit and continuing.') self.bound_detector() return True @@ -2632,11 +2632,11 @@ def rm_user_traces(self, rm_traces): Flag that traces were removed. """ if not self.is_synced: - msgs.error('Trace removal should only be executed after traces have been ' + raise PypeItError('Trace removal should only be executed after traces have been ' 'synchronized into left-right slit pairs; run sync()') if not isinstance(rm_traces, list): - msgs.error(f'Input to rm_user_traces must be a list, not {type(rm_traces)}') + raise PypeItError(f'Input to rm_user_traces must be a list, not {type(rm_traces)}') if isinstance(rm_traces[0], str): # NOTE: Ignores any negatives in the definition of the detector @@ -2662,12 +2662,12 @@ def rm_user_traces(self, rm_traces): slit_to_remove = (lefts[y_spec,:] < xcen) & (rights[y_spec,:] > xcen) # Any slits found? if not np.any(slit_to_remove): - msgs.warn(f'No slit found to remove at pixel {y_spec}:{xcen} on ' + msgs.warning(f'No slit found to remove at pixel {y_spec}:{xcen} on ' f'{self.traceimg.detector.name}.') continue # More than one slit found? if np.sum(slit_to_remove) != 1: - msgs.error(f'Found *more than one slit* that covers pixel {y_spec}:{xcen} on ' + raise PypeItError(f'Found *more than one slit* that covers pixel {y_spec}:{xcen} on ' f'{self.traceimg.detector.name}. Something went wrong during tracing.' ' Refine your tracing parameters and try again.') idx = np.where(slit_to_remove)[0][0] @@ -2720,11 +2720,11 @@ def remove_traces(self, indx, resort=True, rebuild_pca=False): """ # Make sure there are traces to remove if not np.any(indx): - msgs.warn('No trace to remove.') + msgs.warning('No trace to remove.') return if np.all(indx): - msgs.warn('All traces removed!') + msgs.warning('All traces removed!') self._reinit_trace_data() return @@ -2796,10 +2796,10 @@ def synced_selection(self, indx, mode='ignore', assume_synced=False): return indx if indx.size != self.ntrace: - msgs.error('Boolean array selecting traces to remove has incorrect length.') + raise PypeItError('Boolean array selecting traces to remove has incorrect length.') if not assume_synced and not self.is_synced: - msgs.error('To synchronize the trace selection, it is expected that the traces have ' + raise PypeItError('To synchronize the trace selection, it is expected that the traces have ' 'been left-right synchronized. Either run sync() to sychronize or ignore ' 'the synchronization (which may raise an exception) by setting ' 'assume_synced=True.') @@ -2807,7 +2807,7 @@ def synced_selection(self, indx, mode='ignore', assume_synced=False): return np.repeat(np.any(indx.reshape(-1,2), axis=1), 2) elif mode == 'neither': return np.repeat(np.all(indx.reshape(-1,2), axis=1), 2) - msgs.error('Unknown synchronized trace selection mode: {0}'.format(mode)) + raise PypeItError('Unknown synchronized trace selection mode: {0}'.format(mode)) def clean_traces(self, force_flag=None, rebuild_pca=True, sync_mode='ignore', assume_synced=False): @@ -2844,7 +2844,7 @@ def clean_traces(self, force_flag=None, rebuild_pca=True, sync_mode='ignore', :func:`synced_selection`. """ if self.is_empty: - msgs.warn('No traces to clean.') + msgs.warning('No traces to clean.') return # Traces to remove @@ -2897,11 +2897,11 @@ def spatial_sort(self, use_mean=False, use_fit=True): location is masked. """ if self.is_empty: - msgs.error('No traces to sort.') + raise PypeItError('No traces to sort.') # Check input if use_fit and self.edge_fit is None: - msgs.warn('Fit data is not available; cannot use it for spatially sorting the edges.') + msgs.warning('Fit data is not available; cannot use it for spatially sorting the edges.') # Set up the coordinates to use bpm = self.bitmask.flagged(self.edge_msk, self.bitmask.bad_flags) @@ -3010,7 +3010,7 @@ def fit_refine(self, weighting='uniform', debug=False, idx=None): """ # Check that there are traces to refine! if self.is_empty: - msgs.error('No traces to refine!') + raise PypeItError('No traces to refine!') # Parse parameters and report maxshift = self.par['max_shift_abs'] @@ -3181,12 +3181,12 @@ def predict_traces(self, edge_cen, side=None): returned. """ if self.pcatype is None: - msgs.error('Must first run the PCA analysis fo the traces; run build_pca.') + raise PypeItError('Must first run the PCA analysis fo the traces; run build_pca.') _edge_cen = np.atleast_1d(edge_cen) _side = np.atleast_1d(side) if _edge_cen.size != _side.size: - msgs.error('Spatial locations and side integers must have the same shape.') + raise PypeItError('Spatial locations and side integers must have the same shape.') if self.par['left_right_pca']: trace_add = np.zeros((self.nspec,_side.size), dtype='float') @@ -3242,7 +3242,7 @@ def build_pca(self, use_center=False, debug=False): Run in debug mode. """ if self.is_empty: - msgs.error('No traces exist.') + raise PypeItError('No traces exist.') # Parse parameters and report left_right_pca = self.par['left_right_pca'] @@ -3273,13 +3273,13 @@ def build_pca(self, use_center=False, debug=False): # Check the state of the current object if self.pcatype is not None: - msgs.warn('PCA model already exists and will be overwritten.') + msgs.warning('PCA model already exists and will be overwritten.') if self.edge_fit is None and not use_center: - msgs.warn('No trace fits exits. PCA based on trace centroid measurements.') + msgs.warning('No trace fits exits. PCA based on trace centroid measurements.') # Check if the PCA decomposition can be performed if not self.can_pca(): - msgs.error('Traces do not meet necessary criteria for the PCA decomposition.') + raise PypeItError('Traces do not meet necessary criteria for the PCA decomposition.') # Set the data used to construct the PCA self.pcatype = 'center' if self.edge_fit is None or use_center else 'fit' @@ -3385,7 +3385,7 @@ def pca_refine(self, use_center=False, debug=False, force=False): already been done. """ if self.is_empty: - msgs.error('No traces to refine!') + raise PypeItError('No traces to refine!') # Perform the PCA decomposition if necessary _pcatype = 'center' if use_center or self.edge_fit is None else 'fit' @@ -3464,10 +3464,10 @@ def peak_refine(self, rebuild_pca=False, show_fits=False, show_peaks=False): """ # Check that there are traces to refine! if self.is_empty: - msgs.error('No traces are defined.') + raise PypeItError('No traces are defined.') if self.pcatype is None: - msgs.error('Must first run the PCA analysis fo the traces; run build_pca.') + raise PypeItError('Must first run the PCA analysis fo the traces; run build_pca.') # Parse parameters and report peak_thresh = self.par['edge_thresh'] @@ -3568,7 +3568,7 @@ def peak_refine(self, rebuild_pca=False, show_fits=False, show_peaks=False): # Assess the output ntrace = fit.shape[1] if ntrace < self.ntrace: - msgs.warn('Found fewer traces using peak finding than originally available. ' + msgs.warning('Found fewer traces using peak finding than originally available. ' 'May want to reset peak threshold.') if self.par['trace_rms_tol'] is not None: @@ -3743,7 +3743,7 @@ def _get_reference_locations(self, trace_cen, add_edge): # Check that the trace data are sorted at this spectral row if not np.array_equal(np.arange(trace_cen.shape[1]), np.argsort(trace_cen[reference_row,:])): - msgs.error('Trace data must be spatially sorted.') + raise PypeItError('Trace data must be spatially sorted.') # Build a masked array with the trace positions at that # spectral row, masked where new traces are supposed to go. @@ -3773,7 +3773,7 @@ def _get_reference_locations(self, trace_cen, add_edge): # The offset is the slit length of the nearest valid slit offset = slit_length.data[nearest] else: - msgs.error('Unknown trace centering mode: {0}'.format(center_mode)) + raise PypeItError('Unknown trace centering mode: {0}'.format(center_mode)) # Set the new edge trace reference locations for slit in range(nslits): @@ -3798,7 +3798,7 @@ def _get_reference_locations(self, trace_cen, add_edge): # TODO: Nothing should now be masked. Get rid of this once # satisfied that the coding is correct. if np.any(trace_ref.mask): - msgs.error('Coding error: this should not happen') + raise PypeItError('Coding error: this should not happen') trace_ref = trace_ref.data.ravel() # Check that the predicted reference positions don't cause slit @@ -3815,7 +3815,7 @@ def _get_reference_locations(self, trace_cen, add_edge): trace_ref[indx[too_hi]] = trace_ref[indx[too_hi]+1] - gap_offset noffset += np.sum(too_hi) if noffset > 0: - msgs.warn('Reference locations for {0} slit edges adjusted '.format(noffset) + msgs.warning('Reference locations for {0} slit edges adjusted '.format(noffset) + 'to have a slit gap of {0} pixel(s).'.format(gap_offset)) return trace_ref @@ -3850,10 +3850,10 @@ def nudge_traces(self, trace_cen): return trace_cen # Check vector size if trace_cen.shape[0] != self.nspec: - msgs.error('Traces have incorrect length.') + raise PypeItError('Traces have incorrect length.') _buffer = self.par['det_buffer'] if _buffer < 0: - msgs.warn('Buffer must be greater than 0; ignoring.') + msgs.warning('Buffer must be greater than 0; ignoring.') _buffer = 0 if self.par['max_nudge'] is not None: @@ -3955,7 +3955,7 @@ def sync(self, rebuild_pca=True, debug=False): if self.is_empty: if not self.par['bound_detector']: return False - msgs.warn('No traces left! Left and right edges placed at detector boundaries.') + msgs.warning('No traces left! Left and right edges placed at detector boundaries.') self.bound_detector() # Make sure that the traces are sorted spatially @@ -3971,9 +3971,9 @@ def sync(self, rebuild_pca=True, debug=False): # Edges are currently not synced, so check the input if self.par['sync_predict'] not in ['pca', 'nearest', 'auto']: - msgs.error('Unknown trace mode: {0}'.format(self.par['sync_predict'])) + raise PypeItError('Unknown trace mode: {0}'.format(self.par['sync_predict'])) if self.par['sync_predict'] == 'pca' and self.pcatype is None: - msgs.error('The PCA decomposition does not exist. Either run self.build_pca or use ' + raise PypeItError('The PCA decomposition does not exist. Either run self.build_pca or use ' 'a different trace_mode.') # Find the edges to add, what side they're on, and where to @@ -3995,14 +3995,14 @@ def sync(self, rebuild_pca=True, debug=False): # If there was only one edge, just add the other one if side.size == 2: - msgs.warn('Only one edge traced. Ignoring center_mode and adding edge at the ' + msgs.warning('Only one edge traced. Ignoring center_mode and adding edge at the ' 'opposite edge of the detector.') msgs.info('Detector edge buffer: {0}'.format(self.par['det_buffer'])) # TODO: PCA would have failed because there needs to be at # least two traces. Get rid of this test once satisfied # that this exception is never raised... if self.par['sync_predict'] == 'pca': - msgs.error('Coding error: this should not happen.') + raise PypeItError('Coding error: this should not happen.') # Set the offset to add to the existing trace offset = self.par['det_buffer'] - np.amin(trace_cen[:,0]) if add_edge[0] \ else self.nspat - np.amax(trace_cen[:,0]) - self.par['det_buffer'] @@ -4065,10 +4065,10 @@ def sync(self, rebuild_pca=True, debug=False): indx[::2] = side[::2] != -1 indx[1::2] = side[1::2] != 1 if np.all(indx): - msgs.error('Catastrophic error in left-right synchronization. Edge order is not ' + raise PypeItError('Catastrophic error in left-right synchronization. Edge order is not ' 'correctly sorted.') if np.any(indx): - msgs.warn('Synchronized traces are not properly ordered, likely because they ' + msgs.warning('Synchronized traces are not properly ordered, likely because they ' 'have been placed close to the detector edges. Flagging ' '{0} traces that are not properly sorted for removal.'.format(np.sum(indx))) # Mask the traces as due to a synchronization error @@ -4088,7 +4088,7 @@ def sync(self, rebuild_pca=True, debug=False): i += 1 if i == maxiter: - msgs.error('Fatal left-right trace de-synchronization error.') + raise PypeItError('Fatal left-right trace de-synchronization error.') if self.log is not None: self.log += [inspect.stack()[0][3]] @@ -4125,11 +4125,11 @@ def add_user_traces(self, add_traces, method='straight'): function will try to (re)build it. """ if not self.is_empty and not self.is_synced: - msgs.error('Adding traces should only be executed after traces have been ' + raise PypeItError('Adding traces should only be executed after traces have been ' 'synchronized into left-right slit pairs; run sync()') if not isinstance(add_traces, list): - msgs.error(f'Input to add_user_traces must be a list, not {type(add_traces)}') + raise PypeItError(f'Input to add_user_traces must be a list, not {type(add_traces)}') if isinstance(add_traces[0], str): # NOTE: Ignores any negatives in the definition of the detector @@ -4156,7 +4156,7 @@ def add_user_traces(self, add_traces, method='straight'): lindx = (x_start < lefts[y_spec,:]) & (x_end > lefts[y_spec,:]) rindx = (x_start < rights[y_spec,:]) & (x_end > rights[y_spec,:]) if any(lindx) or any(rindx): - msgs.warn(f'Inserted slit at {y_spec}:{x_start}:{x_end} on ' + msgs.warning(f'Inserted slit at {y_spec}:{x_start}:{x_end} on ' f'{self.traceimg.detector.name} overlaps with an existing slit! ' 'New slit will *not* be added.') keep[i] = False @@ -4183,7 +4183,7 @@ def add_user_traces(self, add_traces, method='straight'): new_traces = np.tile(new_trace_coo[:,1], (self.nspec,1)) elif method == 'nearest': if self.is_empty: - msgs.error('No edge traces currently exist. Cannot insert user slits with a ' + raise PypeItError('No edge traces currently exist. Cannot insert user slits with a ' 'shape based on the nearest existing slit edges! ' 'Set add_predict = straight.') # Use the measured edges if the functional forms don't exist (yet) @@ -4196,12 +4196,12 @@ def add_user_traces(self, add_traces, method='straight'): - trace_cen[new_trace_coo[:,0].astype(int), nearest] elif method == 'pca': if self.is_empty: - msgs.error('No edge traces currently exist. Cannot insert user slits with a ' + raise PypeItError('No edge traces currently exist. Cannot insert user slits with a ' 'shape based on the PCA decomposition of the existing slit edges! ' 'Set add_predict = straight.') if self.pcatype is None: if not self.can_pca(): - msgs.error('PCA does not exist and cannot be constructed! Cannot insert user ' + raise PypeItError('PCA does not exist and cannot be constructed! Cannot insert user ' 'slits with a shape based on the PCA decomposition of the existing ' 'slit edges! Use add_predict = straight or nearest.') self.build_pca() @@ -4218,7 +4218,7 @@ def add_user_traces(self, add_traces, method='straight'): - new_traces[new_trace_coo[:,0].astype(int),np.arange(n_add*2)] new_traces = self.predict_traces(trace_ref, side=side) else: - msgs.error(f'Unknown method for adding user slit: {method}') + raise PypeItError(f'Unknown method for adding user slit: {method}') # Insert self.insert_traces(side, new_traces, mode='user') @@ -4293,7 +4293,7 @@ def insert_traces(self, side, trace_cen, loc=None, mode='user', resort=True, nud # remove any existing array and warn the user they they'll need to # rematch the orders. if self.orderid is not None: - msgs.warn('Inserting traces invalidates order matching. Removing.') + msgs.warning('Inserting traces invalidates order matching. Removing.') self.orderid = None # Check input @@ -4301,12 +4301,12 @@ def insert_traces(self, side, trace_cen, loc=None, mode='user', resort=True, nud ntrace = _side.size _trace_cen = trace_cen.reshape(-1,1) if trace_cen.ndim == 1 else trace_cen if _trace_cen.shape[1] != ntrace: - msgs.error('Number of sides does not match the number of traces to insert.') + raise PypeItError('Number of sides does not match the number of traces to insert.') if loc is None: # Insertion locations not provided so append loc = np.full(ntrace, self.ntrace, dtype=int) if loc.size != ntrace: - msgs.error('Number of sides does not match the number of insertion locations.') + raise PypeItError('Number of sides does not match the number of insertion locations.') msgs.info(f'Inserting {ntrace} new traces.') @@ -4453,7 +4453,7 @@ def maskdesign_matching(self, debug=False): # Check that there are still traces to match! if self.is_empty: - msgs.warn('No edges traced. Slitmask matching cannot be performed') + msgs.warning('No edges traced. Slitmask matching cannot be performed') return # `traceimg` must have knowledge of the flat frame that built it @@ -4469,7 +4469,7 @@ def maskdesign_matching(self, debug=False): debug=debug) if omodel_bspat[omodel_bspat!=-1].size < 3: - msgs.warn('Less than 3 slits are expected on this detector, slitmask matching cannot be performed') + msgs.warning('Less than 3 slits are expected on this detector, slitmask matching cannot be performed') # update minimum_slit_gap and minimum_slit_length_sci par # this will allow to catch the boxslit, since in this case slitmask matching is not performed self.par = self.spectrograph.update_edgetracepar(self.par) @@ -4563,7 +4563,7 @@ def maskdesign_matching(self, debug=False): needind_t = np.where(needadd_t)[0] # edges we are missing if (needind_b.size > 0) | (needind_t.size > 0): - msgs.warn('Missing edge traces: {} left and {} right'.format(needind_b.shape[0], needind_t.shape[0])) + msgs.warning('Missing edge traces: {} left and {} right'.format(needind_b.shape[0], needind_t.shape[0])) if debug: slitdesign_matching.plot_matches(self.edge_fit[:,self.is_left], ind_b, bot_edge_pred, reference_row, @@ -4727,7 +4727,7 @@ def maskdesign_matching(self, debug=False): if self.is_synced: msgs.info('LEFT AND RIGHT EDGES SYNCHRONIZED AFTER MASK DESIGN MATCHING') else: - msgs.warn('LEFT AND RIGHT EDGES *NOT* SYNCHRONIZED AFTER MASK DESIGN MATCHING') + msgs.warning('LEFT AND RIGHT EDGES *NOT* SYNCHRONIZED AFTER MASK DESIGN MATCHING') def _fill_design_table(self, maskdef_id, cc_params_b, cc_params_t, omodel_bspat, omodel_tspat, spat_id): """ @@ -4774,7 +4774,7 @@ def _fill_design_table(self, maskdef_id, cc_params_b, cc_params_t, omodel_bspat, """ # Check that slitmask is initiated if self.slitmask is None: - msgs.error('Unable to read slitmask design info') + raise PypeItError('Unable to read slitmask design info') # as reference row we use the midpoint in the spectral direction reference_row = self.edge_fit[:, 0].size // 2 @@ -4843,7 +4843,7 @@ def _fill_objects_table(self, maskdef_id): """ # Check that slitmask is initiated if self.slitmask is None: - msgs.error('Unable to read slitmask design info') + raise PypeItError('Unable to read slitmask design info') if self.slitmask.objects is None: # No object data available in slit mask design object @@ -4879,11 +4879,11 @@ def order_refine(self, debug=False): present in the current set of edges. """ if self.spectrograph.pypeline != 'Echelle': - msgs.warn('Parameter add_missed_orders only valid for Echelle spectrographs.') + msgs.warning('Parameter add_missed_orders only valid for Echelle spectrographs.') return if not self.can_pca(): - msgs.error('Refining the orders currently requires a PCA decomposition of the ' + raise PypeItError('Refining the orders currently requires a PCA decomposition of the ' 'order edges. Ensure that the calibrations.slitedges.auto_pca parameter ' 'is True and that there are sufficient edges to create the PCA as set by ' 'the calibrations.slitedges.pca_min_edges parameter. If performing a ' @@ -4930,7 +4930,7 @@ def order_refine_fixed_format(self, reference_row, debug=False): Refine the order locations for fixed-format Echelles. """ if not self.spectrograph.ech_fixed_format: - msgs.error('order_refine_fixed_format can only be used with fixed-format Echelles!') + raise PypeItError('order_refine_fixed_format can only be used with fixed-format Echelles!') # TODO: # - What happens if *more* edges are detected than there are archived @@ -5078,7 +5078,7 @@ def order_refine_free_format(self, reference_row, combined_order_tol=1.8, bracke order_cen, order_missing \ = trace.find_missing_orders(cen[good_order], width_fit, gap_fit) if np.sum(order_missing) > order_missing.size // 2: - msgs.warn('Found more missing orders than detected orders. Check the order ' + msgs.warning('Found more missing orders than detected orders. Check the order ' 'refinement QA file! The code will continue, but you likely need to adjust ' 'your edge-tracing parameters.') @@ -5202,7 +5202,7 @@ def _handle_bracketing_orders(add_left, add_right): if nadd < 2: # TODO: The code should not get here! If it does, we need to # figure out why and fix it. - msgs.error('CODING ERROR: Order bracketing failed!') + raise PypeItError('CODING ERROR: Order bracketing failed!') if nadd == 2: return None, None return add_left[1:-1], add_right[1:-1] @@ -5462,7 +5462,7 @@ def slit_spatial_center(self, normalized=True, spec=None, use_center=False, locations are for bad/excluded slits. """ if not self.is_synced: - msgs.error('EdgeTraceSet must be synced to compute slit centers.') + raise PypeItError('EdgeTraceSet must be synced to compute slit centers.') # Select the good traces gpm = self.good_traces(include_box=include_box) @@ -5537,16 +5537,16 @@ def match_order(self, reference_row=None): """ if self.spectrograph.norders is None: - msgs.error('Coding error: norders not defined for {0}!'.format( + raise PypeItError('Coding error: norders not defined for {0}!'.format( self.spectrograph.__class__.__name__)) if self.spectrograph.orders is None: - msgs.error('Coding error: orders not defined for {0}!'.format( + raise PypeItError('Coding error: orders not defined for {0}!'.format( self.spectrograph.__class__.__name__)) if self.spectrograph.order_spat_pos is None: - msgs.error('Coding error: order_spat_pos not defined for {0}!'.format( + raise PypeItError('Coding error: order_spat_pos not defined for {0}!'.format( self.spectrograph.__class__.__name__)) if not self.is_synced: - msgs.error('EdgeTraceSet must be synced to match to orders.') + raise PypeItError('EdgeTraceSet must be synced to match to orders.') offset = self.par['order_offset'] if offset is None: @@ -5570,12 +5570,12 @@ def match_order(self, reference_row=None): fnd = slit_indx > -1 missed_orders = self.spectrograph.orders[np.logical_not(fnd)] if not np.all(fnd): - msgs.warn(f'Did not find all orders! Missing orders: {missed_orders}') + msgs.warning(f'Did not find all orders! Missing orders: {missed_orders}') # Flag paired edges that were not matched to a known order nomatch = np.setdiff1d(np.arange(np.sum(good_sync)), slit_indx[fnd]) if nomatch.size > 0: - msgs.warn(f'Flagging {nomatch.size} trace pairs as not being matched to an order.') + msgs.warning(f'Flagging {nomatch.size} trace pairs as not being matched to an order.') # Create a vector that selects the appropriate traces. This # *assumes* that the traces are left-right syncronized and the order # has not changed between the order of the traces in the relevant @@ -5636,7 +5636,7 @@ def get_slits(self): the slit traces. """ if not self.is_synced: - msgs.error('Edges must be synced to construct SlitTraceSet object.') + raise PypeItError('Edges must be synced to construct SlitTraceSet object.') # For echelle spectrographs, match the left-right trace pairs # to echelle orders @@ -5714,7 +5714,7 @@ def get_slits(self): # Check for mismatched `maskdef_id` in the left and right edges mkd_id_mismatch = self.maskdef_id[self.is_left] != self.maskdef_id[self.is_right] if np.any(mkd_id_mismatch): - msgs.warn("Mismatched `maskdefId` in left and right traces for {}/{} slits. ".format( + msgs.warning("Mismatched `maskdefId` in left and right traces for {}/{} slits. ".format( self.maskdef_id[self.is_left][mkd_id_mismatch].size, self.nslits) + "Choosing the left edge `maskdefId` if it is not -99, otherwise choosing right one") _maskdef_id = self.maskdef_id[gpm & self.is_left] @@ -5723,7 +5723,7 @@ def get_slits(self): # this may not work if the corresponding right edge is also -99. Assuming this is not the case _maskdef_id[mkd_id_bad] = self.maskdef_id[gpm & self.is_right][mkd_id_bad] if np.any(_maskdef_id == -99): - msgs.warn("{} slits do not have `maskdefId` assigned.".format(_maskdef_id[_maskdef_id == -99].size) + + msgs.warning("{} slits do not have `maskdefId` assigned.".format(_maskdef_id[_maskdef_id == -99].size) + "They will not be included in the design table") # Store the matched slit-design and object information in a table. diff --git a/pypeit/extraction.py b/pypeit/extraction.py index 68f31a3e3f..b1c62e8778 100644 --- a/pypeit/extraction.py +++ b/pypeit/extraction.py @@ -207,9 +207,9 @@ def __init__(self, sciImg, slits, sobjs_obj, spectrograph, par, objtype, global_ # Deal with dynamically generated calibrations, i.e. the tilts. If the tilts are not input generate # them from the fits in caliBrate, otherwise use the input tilts if waveTilts is None and tilts is None: - msgs.error("Must provide either waveTilts or tilts to Extract") + raise PypeItError("Must provide either waveTilts or tilts to Extract") elif waveTilts is not None and tilts is not None: - msgs.error("Cannot provide both waveTilts and tilts to Extract") + raise PypeItError("Cannot provide both waveTilts and tilts to Extract") elif waveTilts is not None and tilts is None: self.waveTilts = waveTilts self.waveTilts.is_synced(self.slits) @@ -229,9 +229,9 @@ def __init__(self, sciImg, slits, sobjs_obj, spectrograph, par, objtype, global_ # Now generate the wavelength image msgs.info("Generating wavelength image") if wv_calib is None and waveimg is None: - msgs.error("Must provide either wv_calib or waveimg to Extract") + raise PypeItError("Must provide either wv_calib or waveimg to Extract") if wv_calib is not None and waveimg is not None: - msgs.error("Cannot provide both wv_calib and waveimg to Extract") + raise PypeItError("Cannot provide both wv_calib and waveimg to Extract") if wv_calib is not None and waveimg is None: self.wv_calib = wv_calib self.waveimg = self.wv_calib.build_waveimg(self.tilts, self.slits, spat_flexure=self.spat_flexure_shift) @@ -243,7 +243,7 @@ def __init__(self, sciImg, slits, sobjs_obj, spectrograph, par, objtype, global_ if wv_calib is not None: self.fwhmimg = wv_calib.build_fwhmimg(self.tilts, self.slits, initial=True, spat_flexure=self.spat_flexure_shift) else: - msgs.warn("Spectral FWHM image could not be generated") + msgs.warning("Spectral FWHM image could not be generated") # get flatfield image for blaze function self.flatimg = None @@ -255,7 +255,7 @@ def __init__(self, sciImg, slits, sobjs_obj, spectrograph, par, objtype, global_ # TODO: Can we just use flat_raw if flatimages.pixelflat_norm is None? self.flatimg, _ = flat.flatfield(flat_raw, flatimages.pixelflat_norm) if self.flatimg is None: - msgs.warn("No flat image was found. A spectrum of the flatfield will not be extracted!") + msgs.warning("No flat image was found. A spectrum of the flatfield will not be extracted!") # Now apply a global flexure correction to each slit provided it's not a standard star if self.par['flexure']['spec_method'] != 'skip' and not self.std_redux: @@ -401,10 +401,10 @@ def extract(self, global_sky, bkg_redux_global_sky=None, model_noise=None, spat_ # Find them if sobj.OPT_COUNTS is None and sobj.BOX_COUNTS is None: remove_idx.append(idx) - msgs.warn(f'Removing object at pixel {sobj.SPAT_PIXPOS} because ' + msgs.warning(f'Removing object at pixel {sobj.SPAT_PIXPOS} because ' f'both optimal and boxcar extraction could not be performed') elif sobj.OPT_COUNTS is None: - msgs.warn(f'Optimal extraction could not be performed for object at pixel {sobj.SPAT_PIXPOS}') + msgs.warning(f'Optimal extraction could not be performed for object at pixel {sobj.SPAT_PIXPOS}') # Remove them if len(remove_idx) > 0: @@ -518,9 +518,9 @@ def spec_flexure_correct(self, mode="local", sobjs=None): # Perform some checks if mode == "local" and sobjs is None: - msgs.error("No spectral extractions provided for flexure, using slit center instead") + raise PypeItError("No spectral extractions provided for flexure, using slit center instead") elif mode not in ["local", "global"]: - msgs.error("Flexure mode must be 'global' or 'local'.") + raise PypeItError("Flexure mode must be 'global' or 'local'.") # initialize flex_list flex_list = None @@ -657,7 +657,7 @@ def show(self, attr, image=None, showmask=False, sobjs=None, ch_name = chname if chname is not None else 'image' viewer, ch = display.show_image(image, chname=ch_name, clear=clear, wcs_match=True) else: - msgs.warn("Not an option for show") + msgs.warning("Not an option for show") if sobjs is not None: for spec in sobjs: @@ -841,7 +841,7 @@ def __init__(self, sciImg, slits, sobjs_obj, spectrograph, par, objtype, **kwarg #self.order_vec = spectrograph.orders if 'coadd2d' in self.objtype \ # else self.slits.ech_order #if self.order_vec is None: - # msgs.error('Unable to set Echelle orders, likely because they were incorrectly ' + # raise PypeItError('Unable to set Echelle orders, likely because they were incorrectly ' # 'assigned in the relevant SlitTraceSet.') # JFH TODO Should we reduce the number of iterations for standards or near-IR redux where the noise model is not diff --git a/pypeit/find_objects.py b/pypeit/find_objects.py index f2403a6d43..a351bf547b 100644 --- a/pypeit/find_objects.py +++ b/pypeit/find_objects.py @@ -214,9 +214,9 @@ def __init__(self, sciImg, slits, spectrograph, par, objtype, wv_calib=None, wav # Deal with dynamically generated calibrations, i.e. the tilts. if waveTilts is None and tilts is None: - msgs.error("Must provide either waveTilts or tilts to FindObjects") + raise PypeItError("Must provide either waveTilts or tilts to FindObjects") elif waveTilts is not None and tilts is not None: - msgs.error("Cannot provide both waveTilts and tilts to FindObjects") + raise PypeItError("Cannot provide both waveTilts and tilts to FindObjects") elif waveTilts is not None and tilts is None: self.waveTilts = waveTilts self.waveTilts.is_synced(self.slits) @@ -583,7 +583,7 @@ def global_skysub(self, skymask=None, bkg_redux_sciimg=None, inmask = self.sciImg.select_flag(invert=True) & thismask & skymask_now # All masked? if not np.any(inmask): - msgs.warn("No pixels for fitting sky. If you are using mask_by_boxcar=True, your radius may be too large.") + msgs.warning("No pixels for fitting sky. If you are using mask_by_boxcar=True, your radius may be too large.") self.reduce_bpm[slit_idx] = True continue @@ -602,7 +602,7 @@ def global_skysub(self, skymask=None, bkg_redux_sciimg=None, # Mask if something went wrong if np.sum(global_sky[thismask]) == 0.: - msgs.warn("Bad fit to sky. Rejecting slit: {:d}".format(slit_spat)) + msgs.warning("Bad fit to sky. Rejecting slit: {:d}".format(slit_spat)) self.reduce_bpm[slit_idx] = True if update_crmask and self.par['scienceframe']['process']['mask_cr']: @@ -669,7 +669,7 @@ def show(self, attr, image=None, global_sky=None, showmask=False, sobjs=None, ch_name = chname if chname is not None else 'image' viewer, ch = display.show_image(image, chname=ch_name, clear=clear, wcs_match=True) else: - msgs.warn("Not an option for show") + msgs.warning("Not an option for show") if sobjs is not None: for spec in sobjs: @@ -853,7 +853,7 @@ def __init__(self, sciImg, slits, spectrograph, par, objtype, **kwargs): self.order_vec = spectrograph.orders if 'coadd2d' in self.objtype and spectrograph.orders is not None \ else self.slits.ech_order if self.order_vec is None: - msgs.error('Unable to set Echelle orders, likely because they were incorrectly ' + raise PypeItError('Unable to set Echelle orders, likely because they were incorrectly ' 'assigned in the relevant SlitTraceSet.') def get_platescale(self, slitord_id=None): @@ -869,7 +869,7 @@ def get_platescale(self, slitord_id=None): """ if slitord_id is None: - msgs.error('slitord_id is missing. Plate scale for current echelle order cannot be determined.') + raise PypeItError('slitord_id is missing. Plate scale for current echelle order cannot be determined.') return self.spectrograph.order_platescale(slitord_id, binning=self.binning)[0] @@ -1016,7 +1016,7 @@ def global_skysub(self, skymask=None, bkg_redux_sciimg=None, update_crmask=True, return global_sky_sep if self.wv_calib is None: - msgs.error("A wavelength calibration is needed (wv_calib) if a joint sky fit is requested.") + raise PypeItError("A wavelength calibration is needed (wv_calib) if a joint sky fit is requested.") msgs.info("Generating wavelength image") # Generate the waveimg which is needed if flexure is being computed @@ -1204,7 +1204,7 @@ def calculate_flexure(self, global_sky): maxwave=self.par['flexure']['maxwave']) this_slitshift = np.zeros(self.slits.nslits) if flex_dict_ref is not None: - msgs.warn("Only a relative spectral flexure correction will be performed") + msgs.warning("Only a relative spectral flexure correction will be performed") this_slitshift = np.ones(self.slits.nslits) * flex_dict_ref['shift'] # Now loop through all slits to calculate the additional shift relative to the reference slit flex_list = [] diff --git a/pypeit/flatfield.py b/pypeit/flatfield.py index e97b39825e..c992aee0a7 100644 --- a/pypeit/flatfield.py +++ b/pypeit/flatfield.py @@ -110,10 +110,10 @@ def _validate(self): """ if self.pixelflat_spat_bsplines is not None and len(self.pixelflat_spat_bsplines) > 0: if len(self.spat_id) != len(self.pixelflat_spat_bsplines): - msgs.error("Pixelflat Bsplines are out of sync with the slit IDs") + raise PypeItError("Pixelflat Bsplines are out of sync with the slit IDs") if self.illumflat_spat_bsplines is not None and len(self.illumflat_spat_bsplines) > 0: if len(self.spat_id) != len(self.illumflat_spat_bsplines): - msgs.error("Illumflat Bsplines are out of sync with the slit IDs") + raise PypeItError("Illumflat Bsplines are out of sync with the slit IDs") def is_synced(self, slits): """ @@ -126,7 +126,7 @@ def is_synced(self, slits): """ if not np.array_equal(self.spat_id, slits.spat_id): - msgs.error('Your flat solutions are out of sync with your slits. Remove Calibrations' + raise PypeItError('Your flat solutions are out of sync with your slits. Remove Calibrations' 'and restart from scratch.') def _bundle(self): @@ -201,14 +201,14 @@ def _parse(cls, hdu, ext=None, transpose_table_arrays=False, hdu_prefix=None, ** for i in range(nspat)] indx = np.isin(ext_bspl, hdunames) if np.any(indx) and not np.all(indx): - msgs.error('Expected {0} {1} bspline extensions, but only found {2}.'.format( + raise PypeItError('Expected {0} {1} bspline extensions, but only found {2}.'.format( nspat, flattype, np.sum(indx))) if np.all(indx): key = '{0}_spat_bsplines'.format(flattype) try: d[key] = np.array([bspline.bspline.from_hdu(hdu[k]) for k in ext_bspl]) except Exception as e: - msgs.warn('Error in bspline extension read:\n {0}: {1}'.format( + msgs.warning('Error in bspline extension read:\n {0}: {1}'.format( e.__class__.__name__, str(e))) # Assume this is because the type failed type_passed = False @@ -221,7 +221,7 @@ def _parse(cls, hdu, ext=None, transpose_table_arrays=False, hdu_prefix=None, ** for i in range(nspat)] indx = np.isin(ext_fcor, hdunames) if np.any(indx) and not np.all(indx): - msgs.error('Expected {0} {1} finecorr extensions, but only found {2}.'.format( + raise PypeItError('Expected {0} {1} finecorr extensions, but only found {2}.'.format( nspat, flattype, np.sum(indx))) if np.all(indx): key = '{0}_finecorr'.format(flattype) @@ -234,7 +234,7 @@ def _parse(cls, hdu, ext=None, transpose_table_arrays=False, hdu_prefix=None, ** allfit.append(fitting.PypeItFit.from_hdu(hdu[k])) d[key] = np.array(allfit) except Exception as e: - msgs.warn('Error in finecorr extension read:\n {0}: {1}'.format( + msgs.warning('Error in finecorr extension read:\n {0}: {1}'.format( e.__class__.__name__, str(e))) # Assume this is because the type failed type_passed = False @@ -253,7 +253,7 @@ def shape(self): return self.pixelflat_raw.shape if self.illumflat_raw is not None: return self.illumflat_raw.shape - msgs.error("Shape of FlatImages could not be determined") + raise PypeItError("Shape of FlatImages could not be determined") def get_procflat(self, frametype='pixel'): """ @@ -286,17 +286,17 @@ def get_bpmflats(self, frametype='pixel'): """ # Check if both BPMs are none if self.pixelflat_bpm is None and self.illumflat_bpm is None: - msgs.warn("FlatImages contains no BPM - trying to generate one") + msgs.warning("FlatImages contains no BPM - trying to generate one") return np.zeros(self.shape, dtype=int) # Now return the requested case, checking for None if frametype == 'illum': if self.illumflat_bpm is not None: return self.illumflat_bpm - msgs.warn("illumflat has no BPM - using the pixelflat BPM") + msgs.warning("illumflat has no BPM - using the pixelflat BPM") return self.pixelflat_bpm if self.pixelflat_bpm is not None: return self.pixelflat_bpm - msgs.warn("pixelflat has no BPM - using the illumflat BPM") + msgs.warning("pixelflat has no BPM - using the illumflat BPM") return self.illumflat_bpm def get_spat_bsplines(self, frametype='illum', finecorr=False): @@ -331,17 +331,17 @@ def get_spat_bsplines(self, frametype='illum', finecorr=False): illum_bsplines = self.illumflat_spat_bsplines # Ensure that at least one has been generated if pixel_bsplines is None and illum_bsplines is None: - msgs.warn(f'FlatImages contains no {fctxt}spatial bspline fit.') + msgs.warning(f'FlatImages contains no {fctxt}spatial bspline fit.') return None # Now return the requested case, checking for None if frametype == 'illum': if illum_bsplines is not None: return illum_bsplines - msgs.warn(f'illumflat has no {fctxt}spatial bspline fit - using the pixelflat.') + msgs.warning(f'illumflat has no {fctxt}spatial bspline fit - using the pixelflat.') return pixel_bsplines if pixel_bsplines is not None: return pixel_bsplines - msgs.warn(f'pixelflat has no {fctxt}spatial bspline fit - using the illumflat.') + msgs.warning(f'pixelflat has no {fctxt}spatial bspline fit - using the illumflat.') return illum_bsplines def fit2illumflat(self, slits, frametype='illum', finecorr=False, initial=False, @@ -368,7 +368,7 @@ def fit2illumflat(self, slits, frametype='illum', finecorr=False, initial=False, """ # Check spatial flexure type if spat_flexure is not None and not isinstance(spat_flexure, float): - msgs.error('Spatial flexure must be None or float.') + raise PypeItError('Spatial flexure must be None or float.') # Initialise the returned array illumflat = np.ones(self.shape, dtype=float) # Load spatial bsplines @@ -377,7 +377,7 @@ def fit2illumflat(self, slits, frametype='illum', finecorr=False, initial=False, if spat_bsplines is None: if finecorr: return np.ones(self.shape, dtype=float) - msgs.error('Cannot continue without spatial bsplines.') + raise PypeItError('Cannot continue without spatial bsplines.') # Loop for slit_idx in range(slits.nslits): @@ -432,7 +432,7 @@ def show(self, frametype='all', slits=None, wcs_match=True, chk_version=True): try: slits = slittrace.SlitTraceSet.from_file(slits_file, chk_version=chk_version) except (FileNotFoundError, PypeItDataModelError): - msgs.warn('Could not load slits to include when showing flat-field images. File ' + msgs.warning('Could not load slits to include when showing flat-field images. File ' 'was either not provided directly, or it could not be read based on its ' f'expected name: {slits_file}.') @@ -561,7 +561,7 @@ def __init__(self, rawflatimg, spectrograph, flatpar, slits, wavetilts=None, wv_ # get waveimg here if available if self.wavetilts is None or self.wv_calib is None: - msgs.warn("Wavelength calib or tilts are not available. Wavelength image not generated.") + msgs.warning("Wavelength calib or tilts are not available. Wavelength image not generated.") else: self.build_waveimg() # this set self.waveimg @@ -605,7 +605,7 @@ def run(self, doqa=False, debug=False, show=False): # check if self.wavetilts is available. It can be None if the flat is slitless, but it's needed otherwise if self.wavetilts is None and not self.slitless: - msgs.warn("Wavelength tilts are not available. Cannot generate this flat image.") + msgs.warning("Wavelength tilts are not available. Cannot generate this flat image.") return None # Fit it @@ -703,7 +703,7 @@ def build_waveimg(self): """ msgs.info("Generating wavelength image") if self.wavetilts is None or self.wv_calib is None: - msgs.error("Wavelength calib or tilts are not available. Cannot generate wavelength image.") + raise PypeItError("Wavelength calib or tilts are not available. Cannot generate wavelength image.") else: flex = self.wavetilts.spat_flexure slitmask = self.slits.slit_img(initial=True, flexure=flex) @@ -939,20 +939,20 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): 'You could also choose to use a different flat-field image ' \ 'for this calibration group.' if saturated_slits == 'crash': - msgs.error('Only {:4.2f}'.format(100*good_frac) + raise PypeItError('Only {:4.2f}'.format(100*good_frac) + '% of the pixels on slit {0} are not saturated. '.format(slit_spat) + 'Selected behavior was to crash if this occurred. ' + common_message) elif saturated_slits == 'mask': self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADFLATCALIB') - msgs.warn('Only {:4.2f}'.format(100*good_frac) + msgs.warning('Only {:4.2f}'.format(100*good_frac) + '% of the pixels on slit {0} are not saturated. '.format(slit_spat) + 'Selected behavior was to mask this slit and continue with the ' + 'remainder of the reduction, meaning no science data will be ' + 'extracted from this slit. ' + common_message) elif saturated_slits == 'continue': self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'SKIPFLATCALIB') - msgs.warn('Only {:4.2f}'.format(100*good_frac) + msgs.warning('Only {:4.2f}'.format(100*good_frac) + '% of the pixels on slit {0} are not saturated. '.format(slit_spat) + 'Selected behavior was to simply continue, meaning no ' + 'field-flatting correction will be applied to this slit but ' @@ -1010,7 +1010,7 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): # Set this to a parameter? if spec_nfit/spec_ntot < 0.5: # TODO: Shouldn't this raise an exception or continue to the next slit instead? - msgs.warn('Spectral fit includes only {:.1f}'.format(100*spec_nfit/spec_ntot) + msgs.warning('Spectral fit includes only {:.1f}'.format(100*spec_nfit/spec_ntot) + '% of the pixels on this slit.' + msgs.newline() + ' Either the slit has many bad pixels or the number of ' 'trimmed pixels is too large.') @@ -1044,7 +1044,7 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): if exit_status > 1: # TODO -- MAKE A FUNCTION - msgs.warn('Flat-field spectral response bspline fit failed! Not flat-fielding ' + msgs.warning('Flat-field spectral response bspline fit failed! Not flat-fielding ' 'slit {0} and continuing!'.format(slit_spat)) self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADFLATCALIB') continue @@ -1098,14 +1098,14 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): + ' pixels in the slit.') if spat_nfit/spat_ntot < 0.5: # TODO: Shouldn't this raise an exception or continue to the next slit instead? - msgs.warn('Spatial fit includes only {:.1f}'.format(100*spat_nfit/spat_ntot) + msgs.warning('Spatial fit includes only {:.1f}'.format(100*spat_nfit/spat_ntot) + '% of the pixels on this slit.' + msgs.newline() + ' Either the slit has many bad pixels, the model of the ' 'spectral shape is poor, or the illumination profile is very irregular.') # First fit -- With initial slits if not np.any(spat_gpm): - msgs.warn('Flat-field failed during normalization! Not flat-fielding ' + msgs.warning('Flat-field failed during normalization! Not flat-fielding ' 'slit {0} and continuing!'.format(slit_spat)) self.slits.mask[slit_idx] = self.slits.bitmask.turn_on( self.slits.mask[slit_idx], 'BADFLATCALIB') @@ -1208,7 +1208,7 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): continue else: # Save the nada - msgs.warn('Slit illumination profile bspline fit failed! Spatial profile not ' + msgs.warning('Slit illumination profile bspline fit failed! Spatial profile not ' 'included in flat-field model for slit {0}!'.format(slit_spat)) self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADFLATCALIB') continue @@ -1300,7 +1300,7 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): # Save the 2D residual model twod_model[...] = 1. if exit_status > 1: - msgs.warn('Two-dimensional fit to flat-field data failed! No higher order ' + msgs.warning('Two-dimensional fit to flat-field data failed! No higher order ' 'flat-field corrections included in model of slit {0}!'.format(slit_spat)) self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADFLATCALIB') else: @@ -1317,7 +1317,7 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): # Check for infinities and NaNs in the flat-field model winfnan = np.where(np.logical_not(np.isfinite(self.flat_model[onslit_tweak]))) if winfnan[0].size != 0: - msgs.warn('There are {0:d} pixels with non-finite values in the flat-field model ' + msgs.warning('There are {0:d} pixels with non-finite values in the flat-field model ' 'for slit {1:d}!'.format(winfnan[0].size, slit_spat) + msgs.newline() + 'These model pixel values will be set to the raw pixel value.') self.flat_model[np.where(onslit_tweak)[0][winfnan]] = rawflat[np.where(onslit_tweak)[0][winfnan]] @@ -1325,7 +1325,7 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): whilo = np.where((self.flat_model[onslit_tweak] >= nonlinear_counts) | (self.flat_model[onslit_tweak] <= 0.0)) if whilo[0].size != 0: - msgs.warn('There are {0:d} pixels with unrealistically high or low values in the flat-field model ' + msgs.warning('There are {0:d} pixels with unrealistically high or low values in the flat-field model ' 'for slit {1:d}!'.format(whilo[0].size, slit_spat) + msgs.newline() + 'These model pixel values will be set to the raw pixel value.') self.flat_model[np.where(onslit_tweak)[0][whilo]] = rawflat[np.where(onslit_tweak)[0][whilo]] @@ -1404,7 +1404,7 @@ def spatial_fit(self, norm_spec, spat_coo, median_slit_width, spat_gpm, gpm, deb # Make sure that the normalized and filtered flat is finite! if np.any(np.invert(np.isfinite(spat_flat_data))): - msgs.error('Inifinities in slit illumination function computation!') + raise PypeItError('Inifinities in slit illumination function computation!') # Determine the breakpoint spacing from the sampling of the # spatial coordinates. Use breakpoints at a spacing of a @@ -1464,7 +1464,7 @@ def spatial_fit_finecorr(self, normed, onslit_tweak, slit_idx, slit_spat, gpm, """ # check id self.waveimg is available if self.waveimg is None: - msgs.warn("Cannot perform the fine correction to the spatial illumination without the wavelength image.") + msgs.warning("Cannot perform the fine correction to the spatial illumination without the wavelength image.") return # TODO :: Include fit_order in the parset?? fit_order = np.array([3, 6]) @@ -1519,7 +1519,7 @@ def spatial_fit_finecorr(self, normed, onslit_tweak, slit_idx, slit_spat, gpm, self.list_of_finecorr_fits[slit_idx] = fullfit illumflat_finecorr[this_slit] = fullfit.eval(xpos, ypos) else: - msgs.warn(f"Fine correction to the spatial illumination failed for {slit_txt} {slit_ordid}") + msgs.warning(f"Fine correction to the spatial illumination failed for {slit_txt} {slit_ordid}") return illumflat_finecorr # If corrections exceed the tolerance, then clip them to the level of the tolerance @@ -1563,7 +1563,7 @@ def extract_structure(self, rawflat_orig, slit_trim=3): # check if the waveimg is available if self.waveimg is None: - msgs.error("Cannot perform the extraction of the flatfield structure without the wavelength image.") + raise PypeItError("Cannot perform the extraction of the flatfield structure without the wavelength image.") # Build the mask and make a temporary instance of FlatImages bpmflats = self.build_mask() @@ -1636,7 +1636,7 @@ def spectral_illumination(self, gpm=None, debug=False): msgs.info("Deriving spectral illumination profile") # check if the waveimg is available if self.waveimg is None: - msgs.warn("Cannot perform the spectral illumination without the wavelength image.") + msgs.warning("Cannot perform the spectral illumination without the wavelength image.") return None msgs.info('Performing a joint fit to the flat-field response') # Grab some parameters @@ -1722,7 +1722,7 @@ def tweak_slit_edges(self, left, right, spat_coo, norm_flat, method='threshold', elif method == "gradient": return flat.tweak_slit_edges_gradient(left, right, spat_coo, norm_flat, maxfrac=maxfrac, debug=debug) else: - msgs.error("Method for tweaking slit edges not recognized: {0}".format(method)) + raise PypeItError("Method for tweaking slit edges not recognized: {0}".format(method)) class SlitlessFlat: @@ -1763,7 +1763,7 @@ def slitless_pixflat_fname(self): """ if len(self.slitless_rows) == 0: - msgs.error('No slitless_pixflat frames found. Cannot generate the slitless pixel flat file name.') + raise PypeItError('No slitless_pixflat frames found. Cannot generate the slitless pixel flat file name.') # generate the slitless pixel flat file name spec_name = self.fitstbl.spectrograph.name @@ -1853,7 +1853,7 @@ def make_slitless_pixflat(self, msbias=None, msdark=None, calib_dir=None, write_ this_raw_idx = self.spectrograph.parse_raw_files(self.fitstbl[self.slitless_rows], det=_det, ftype='slitless_pixflat') if len(this_raw_idx) == 0: - msgs.warn(f'No raw slitless_pixflat frames found for {self.spectrograph.get_det_name(_det)}. ' + msgs.warning(f'No raw slitless_pixflat frames found for {self.spectrograph.get_det_name(_det)}. ' f'Continuing...') continue this_raw_files = self.fitstbl.frame_paths(self.slitless_rows[this_raw_idx]) @@ -2255,7 +2255,7 @@ def illum_profile_spectral(rawimg, waveimg, slits, slit_illum_ref_idx=0, smooth_ if (ii == 1) and (slits.spat_id[wvsrt[ss]] == slit_illum_ref_idx): # This must be the first element of the loop by construction, but throw an error just in case if ss != 0: - msgs.error("CODING ERROR - An error has occurred in the relative spectral illumination." + + raise PypeItError("CODING ERROR - An error has occurred in the relative spectral illumination." + msgs.newline() + "Please contact the developers.") tmp_cntr = cntr * spec_ref tmp_arr = hist * utils.inverse(tmp_cntr) @@ -2402,7 +2402,7 @@ def write_pixflat_to_fits(pixflat_norm_list, detname_list, spec_name, outdir, pi # Check that the number of detectors matches the number of pixelflat_norm arrays if len(pixflat_norm_list) != len(detname_list): - msgs.error("The number of detectors does not match the number of pixelflat_norm arrays. " + raise PypeItError("The number of detectors does not match the number of pixelflat_norm arrays. " "The pixelflat file cannot be written.") # local output (reduction directory) @@ -2413,7 +2413,7 @@ def write_pixflat_to_fits(pixflat_norm_list, detname_list, spec_name, outdir, pi old_detnames = [] old_hdr = None if pixelflat_file.exists(): - msgs.warn("The pixelflat file already exists. It will be overwritten/updated.") + msgs.warning("The pixelflat file already exists. It will be overwritten/updated.") old_hdus = fits.open(pixelflat_file) old_detnames = [h.name.split('-')[0] for h in old_hdus] # this has also 'PRIMARY' old_hdr = old_hdus[0].header @@ -2505,12 +2505,12 @@ def load_pixflat(pixel_flat_file, spectrograph, det, flatimages, calib_dir=None, """ # Check if the pixel flat file exists if pixel_flat_file is None: - msgs.error('No pixel flat file defined. Cannot load the pixel flat!') + raise PypeItError('No pixel flat file defined. Cannot load the pixel flat!') # get the path _pixel_flat_file = dataPaths.pixelflat.get_file_path(pixel_flat_file, return_none=True) if _pixel_flat_file is None: - msgs.error(f'Cannot load the pixel flat file, {pixel_flat_file}. It is not a direct path, ' + raise PypeItError(f'Cannot load the pixel flat file, {pixel_flat_file}. It is not a direct path, ' f'a cached file, or a file that can be downloaded from a PypeIt repository.') # If this is a mosaic, we need to construct the pixel flat mosaic @@ -2520,7 +2520,7 @@ def load_pixflat(pixel_flat_file, spectrograph, det, flatimages, calib_dir=None, edges_file = Path(edgetrace.EdgeTraceSet.construct_file_name(flatimages.calib_key, calib_dir=calib_dir)).absolute() if not edges_file.exists(): - msgs.error('Edges file not found in the Calibrations folder. ' + raise PypeItError('Edges file not found in the Calibrations folder. ' 'It is needed to grab the mosaic parameters to load and mosaic the input pixel flat!') # Load detector info from EdgeTraceSet file @@ -2529,7 +2529,7 @@ def load_pixflat(pixel_flat_file, spectrograph, det, flatimages, calib_dir=None, # check that the mosaic parameters are defined if not np.all(np.in1d(['tform', 'msc_ord'], list(det_info.keys()))) or \ det_info.tform is None or det_info.msc_ord is None: - msgs.error('Mosaic parameters are not defined in the Edges frame. Cannot load the pixel flat!') + raise PypeItError('Mosaic parameters are not defined in the Edges frame. Cannot load the pixel flat!') # read the file with io.fits_open(_pixel_flat_file) as hdu: @@ -2537,7 +2537,7 @@ def load_pixflat(pixel_flat_file, spectrograph, det, flatimages, calib_dir=None, file_dets = [int(h.name.split('-')[0].split('DET')[1]) for h in hdu[1:]] # check if all detectors required for the mosaic are in the list if not np.all(np.in1d(list(det), file_dets)): - msgs.error(f'Not all detectors in the mosaic are in the pixel flat file: ' + raise PypeItError(f'Not all detectors in the mosaic are in the pixel flat file: ' f'{pixel_flat_file}. Cannot load the pixel flat!') # get the pixel flat images of only the detectors in the mosaic @@ -2546,7 +2546,7 @@ def load_pixflat(pixel_flat_file, spectrograph, det, flatimages, calib_dir=None, pixflat_msc, _,_,_ = build_image_mosaic(pixflat_images, det_info.tform, order=det_info.msc_ord) # check that the mosaic has the correct shape if pixflat_msc.shape != traceimg.image.shape: - msgs.error('The constructed pixel flat mosaic does not have the correct shape. ' + raise PypeItError('The constructed pixel flat mosaic does not have the correct shape. ' 'Cannot load this pixel flat as a mosaic!') msgs.info(f'Using pixelflat file: {pixel_flat_file} ' f'for {spectrograph.get_det_name(det)}.') @@ -2568,7 +2568,7 @@ def load_pixflat(pixel_flat_file, spectrograph, det, flatimages, calib_dir=None, msgs.info(f'Using pixelflat file: {pixel_flat_file} for {detname}.') nrm_image = FlatImages(pixelflat_norm=hdu[idx].data) else: - msgs.error(f'{detname} not found in the pixel flat file: ' + raise PypeItError(f'{detname} not found in the pixel flat file: ' f'{pixel_flat_file}. Cannot load the pixel flat!') nrm_image = None diff --git a/pypeit/images/bitmaskarray.py b/pypeit/images/bitmaskarray.py index 20268e1855..ed307bf829 100644 --- a/pypeit/images/bitmaskarray.py +++ b/pypeit/images/bitmaskarray.py @@ -76,12 +76,12 @@ def _set_keys(self): # Check the bitmask keys = self.bit_keys() if any([not isinstance(k, str) for k in keys]): - msgs.error(f'CODING ERROR: {self.bitmask.__class__.__name__} must only contain ' + raise PypeItError(f'CODING ERROR: {self.bitmask.__class__.__name__} must only contain ' 'string bit flags.') self.lower_keys = [k.lower() for k in keys] if len(np.unique(self.lower_keys)) != len(keys): - msgs.error('CODING ERROR: All bitmask keys must be case-insensitive and unique: ' + raise PypeItError('CODING ERROR: All bitmask keys must be case-insensitive and unique: ' f'{keys}') def __getattr__(self, item): @@ -189,7 +189,7 @@ def from_hdu(cls, hdu, chk_version=True, **kwargs): hdr = hdu[parsed_hdus[0]].header if isinstance(hdu, fits.HDUList) else hdu.header hdr_bitmask = BitMask.from_header(hdr) if chk_version and hdr_bitmask.bits != self.bitmask.bits: - msgs.error('The bitmask in this fits file appear to be out of date! Recreate this ' + raise PypeItError('The bitmask in this fits file appear to be out of date! Recreate this ' 'file by re-running the relevant script or set chk_version=False.', cls='PypeItBitMaskError') diff --git a/pypeit/images/buildimage.py b/pypeit/images/buildimage.py index b09581ed56..e9501208e6 100644 --- a/pypeit/images/buildimage.py +++ b/pypeit/images/buildimage.py @@ -241,12 +241,12 @@ def buildimage_fromlist(spectrograph, det, frame_par, file_list, bias=None, bpm= """ # Check if not isinstance(frame_par, pypeitpar.FrameGroupPar): - msgs.error('Provided ParSet must be type FrameGroupPar, not ' + raise PypeItError('Provided ParSet must be type FrameGroupPar, not ' f'{frame_par.__class__.__name__}.') if not valid_frametype(frame_par['frametype'], quiet=True): # NOTE: This should not be necessary because FrameGroupPar explicitly # requires frametype to be valid - msgs.error(f'{frame_par["frametype"]} is not a valid PypeIt frame type.') + raise PypeItError(f'{frame_par["frametype"]} is not a valid PypeIt frame type.') # Should the detectors be reformatted into a single image mosaic? if mosaic is None: diff --git a/pypeit/images/combineimage.py b/pypeit/images/combineimage.py index 26c0a61a16..30c0020ff8 100644 --- a/pypeit/images/combineimage.py +++ b/pypeit/images/combineimage.py @@ -40,14 +40,14 @@ class CombineImage: """ def __init__(self, rawImages, par): if not isinstance(par, pypeitpar.ProcessImagesPar): - msgs.error('Provided ParSet for must be type ProcessImagesPar.') + raise PypeItError('Provided ParSet for must be type ProcessImagesPar.') self.rawImages = list(rawImages) if hasattr(rawImages, '__len__') else [rawImages] self.par = par # This musts be named this way as it is frequently a child # NOTE: nimgs is a property method. Defining rawImages above must come # before this check! if self.nimgs == 0: - msgs.error('CombineImage requires a list of files to instantiate') + raise PypeItError('CombineImage requires a list of files to instantiate') def run(self, ignore_saturation=False, maxiters=5): @@ -140,9 +140,9 @@ def run(self, ignore_saturation=False, maxiters=5): # Check the input (i.e., bomb out *before* it does any processing) if self.nimgs == 0: - msgs.error('Object contains no files to process!') + raise PypeItError('Object contains no files to process!') if self.nimgs > 1 and self.par['combine'] not in ['mean', 'median']: - msgs.error(f'Unknown image combination method, {self.par["combine"]}. Must be ' + raise PypeItError(f'Unknown image combination method, {self.par["combine"]}. Must be ' '"mean" or "median".') file_list = [] # Loop on the files @@ -191,7 +191,7 @@ def run(self, ignore_saturation=False, maxiters=5): # TODO: JFH suggests that we move this to calibrations.check_calibrations if np.any(np.absolute(np.diff(exptime)) > 0): # TODO: This should likely throw an error instead! - msgs.warn('Exposure time is not consistent for all images being combined! ' + msgs.warning('Exposure time is not consistent for all images being combined! ' 'Using the average.') comb_texp = np.mean(exptime) else: @@ -204,9 +204,9 @@ def run(self, ignore_saturation=False, maxiters=5): no_nan = np.logical_not(np.isnan(spat_flex)) if np.sum(no_nan) > 0: if np.any(np.absolute(np.diff(spat_flex[no_nan])) > 0.1): - msgs.warn(f'Spatial flexure is not consistent for all images being combined: {spat_flex}.') + msgs.warning(f'Spatial flexure is not consistent for all images being combined: {spat_flex}.') comb_spat_flex = np.round(np.mean(spat_flex[no_nan]),3) - msgs.warn(f'Using the average: {comb_spat_flex}.') + msgs.warning(f'Using the average: {comb_spat_flex}.') else: comb_spat_flex = spat_flex[no_nan][0] @@ -273,7 +273,7 @@ def run(self, ignore_saturation=False, maxiters=5): else: # NOTE: Given the check at the beginning of the function, the code # should *never* make it here. - msgs.error("Bad choice for combine. Allowed options are 'median', 'mean'.") + raise PypeItError("Bad choice for combine. Allowed options are 'median', 'mean'.") # Recompute the inverse variance using the combined image comb_var = procimg.variance_model(comb_basev, diff --git a/pypeit/images/mosaic.py b/pypeit/images/mosaic.py index a8b6e87899..9b79171962 100644 --- a/pypeit/images/mosaic.py +++ b/pypeit/images/mosaic.py @@ -78,9 +78,9 @@ def _validate(self): self.binning = self.detectors[0].binning for i in range(1,self.ndet): if self.detectors[i].platescale != self.platescale: - msgs.error('Platescale difference between detectors in mosaic.') + raise PypeItError('Platescale difference between detectors in mosaic.') if self.detectors[i].binning != self.binning: - msgs.error('Binning difference between detectors in mosaic.') + raise PypeItError('Binning difference between detectors in mosaic.') def _bundle(self): """ @@ -100,7 +100,7 @@ def _bundle(self): tbl = table.vstack([d._bundle()[0]['DETECTOR'] for d in self.detectors], join_type='exact') except: - msgs.error('CODING ERROR: Could not stack detector parameter tables when writing ' + raise PypeItError('CODING ERROR: Could not stack detector parameter tables when writing ' 'mosaic metadata.') if self.shift is not None: tbl['shift'] = self.shift @@ -146,7 +146,7 @@ def _parse(cls, hdu, hdu_prefix=None, **kwargs): # This should only ever read one hdu! if len(parsed_hdus) > 1: - msgs.error('CODING ERROR: Parsing saved Mosaic instances should only parse 1 HDU.') + raise PypeItError('CODING ERROR: Parsing saved Mosaic instances should only parse 1 HDU.') # These are the same as the attributes for the detectors, so we need to # get rid of them. We'll get them back via the _validate function. @@ -167,7 +167,7 @@ def _parse(cls, hdu, hdu_prefix=None, **kwargs): # version and type checking. _d, vp, tp, ph = DetectorContainer._parse(_hdu) if not vp: - msgs.warn('Detector datamodel version is incorrect. May cause a fault.') + msgs.warning('Detector datamodel version is incorrect. May cause a fault.') version_passed &= vp d['detectors'] += [DetectorContainer.from_dict(d=_d) if tp else None] type_passed &= tp diff --git a/pypeit/images/pypeitimage.py b/pypeit/images/pypeitimage.py index dc8e07b591..99022fa6e5 100644 --- a/pypeit/images/pypeitimage.py +++ b/pypeit/images/pypeitimage.py @@ -171,7 +171,7 @@ def __init__(self, image, ivar=None, nimg=None, amp_img=None, det_img=None, rn2i shot_noise=None, bpm=None, crmask=None, usermask=None, clean_mask=False): if image is None: - msgs.error('Must provide an image when instantiating PypeItImage.') + raise PypeItError('Must provide an image when instantiating PypeItImage.') # Instantiate as an empty DataContainer super().__init__() @@ -189,7 +189,7 @@ def __init__(self, image, ivar=None, nimg=None, amp_img=None, det_img=None, rn2i 'fullmask']: _arr = getattr(self, attr) if _arr is not None and _arr.shape != self.shape: - msgs.error(f'Attribute {attr} does not match image shape.') + raise PypeItError(f'Attribute {attr} does not match image shape.') # Make sure the units are defined if self.units is None: @@ -201,19 +201,19 @@ def __init__(self, image, ivar=None, nimg=None, amp_img=None, det_img=None, rn2i if bpm is not None: if not np.issubdtype(bpm.dtype, np.bool_) and not np.issubdtype(bpm.dtype, bool): - msgs.error('CODING ERROR: bpm entry in PypeItImage must have boolean type') + raise PypeItError('CODING ERROR: bpm entry in PypeItImage must have boolean type') if clean_mask: self.update_mask('BPM', action='turn_off') self.update_mask('BPM', indx=bpm) if crmask is not None: if not np.issubdtype(crmask.dtype, np.bool_) and not np.issubdtype(crmask.dtype, bool): - msgs.error('CODING ERROR: crmask entry in PypeItImage must have boolean type') + raise PypeItError('CODING ERROR: crmask entry in PypeItImage must have boolean type') if clean_mask: self.update_mask('CR', action='turn_off') self.update_mask('CR', indx=crmask) if usermask is not None: if not np.issubdtype(usermask.dtype, np.bool_) and not np.issubdtype(usermask.dtype, bool): - msgs.error('CODING ERROR: usermask entry in PypeItImage must have boolean type') + raise PypeItError('CODING ERROR: usermask entry in PypeItImage must have boolean type') if clean_mask: self.update_mask('USER', action='turn_off') self.update_mask('USER', indx=crmask) @@ -344,7 +344,7 @@ def build_crmask(self, par, subtract_img=None): cosmic rays; True mean a CR was flagged. """ if subtract_img is not None and subtract_img.shape != self.shape: - msgs.error('In cosmic-ray detection, image to subtract has incorrect shape.') + raise PypeItError('In cosmic-ray detection, image to subtract has incorrect shape.') # Image to flag use_img = self.image if subtract_img is None else self.image - subtract_img @@ -410,7 +410,7 @@ def map_detector_value(self, attr): # Must be defining the per-amplifier value if self.amp_img is None: - msgs.error(f'To remap detector {attr}, object must have amp_img defined.') + raise PypeItError(f'To remap detector {attr}, object must have amp_img defined.') out = np.zeros(self.shape, dtype=type(data[0])) for j in range(len(data)): out[self.amp_img == j+1] = data[j] @@ -427,7 +427,7 @@ def map_detector_value(self, attr): return np.repeat(data, np.prod(self.shape[1:])).reshape(self.shape) # Must be defining the per-amplifier value if self.amp_img is None: - msgs.error(f'To remap detector {attr}, object must have amp_img defined.') + raise PypeItError(f'To remap detector {attr}, object must have amp_img defined.') out = np.zeros(self.shape, dtype=type(data[0][0])) for i in range(self.detector.ndet): for j in range(len(data[i])): @@ -440,7 +440,7 @@ def map_detector_value(self, attr): # Check for amplifier dependent output before entering loop if not np.isscalar(data[0]) and self.amp_img is None: # Must be defining the per-amplifier value - msgs.error(f'To remap detector {attr}, object must have amp_img defined.') + raise PypeItError(f'To remap detector {attr}, object must have amp_img defined.') # Get the output type otype = type(data[0]) if np.isscalar(data[0]) else type(data[0][0]) # Fill the array @@ -456,7 +456,7 @@ def map_detector_value(self, attr): return out # Should not get here - msgs.error('CODING ERROR: Bad logic in map_detector_value.') + raise PypeItError('CODING ERROR: Bad logic in map_detector_value.') def build_mask(self, saturation=None, mincounts=None, slitmask=None, from_scratch=True): """ @@ -521,15 +521,15 @@ def build_mask(self, saturation=None, mincounts=None, slitmask=None, from_scratc # Check input if saturation is not None and isinstance(saturation, np.ndarray) \ and saturation.shape != self.shape: - msgs.error('Saturation array must have the same shape as the image.') + raise PypeItError('Saturation array must have the same shape as the image.') if mincounts is not None and isinstance(mincounts, np.ndarray) \ and mincounts.shape != self.shape: - msgs.error('Minimum counts array must have the same shape as the image.') + raise PypeItError('Minimum counts array must have the same shape as the image.') # Setup the saturation level if isinstance(saturation, str): if saturation != 'default': - msgs.error(f'Unknown saturation string: {saturation}') + raise PypeItError(f'Unknown saturation string: {saturation}') _saturation = self.map_detector_value('saturation') \ * self.map_detector_value('nonlinear') if self.units == 'e-': @@ -540,7 +540,7 @@ def build_mask(self, saturation=None, mincounts=None, slitmask=None, from_scratc # Setup the minimum counts level if isinstance(mincounts, str): if mincounts != 'default': - msgs.error(f'Unknown mincounts string: {mincounts}') + raise PypeItError(f'Unknown mincounts string: {mincounts}') _mincounts = self.map_detector_value('mincounts') if self.units == 'ADU': _mincounts /= self.map_detector_value('gain') @@ -587,7 +587,7 @@ def update_mask_slitmask(self, slitmask): """ if slitmask.shape != self.shape: - msgs.error('Slit mask image must have the same shape as data image.') + raise PypeItError('Slit mask image must have the same shape as data image.') # Pixels excluded from any slit. self.update_mask('OFFSLITS', action='turn_off') self.update_mask('OFFSLITS', indx=slitmask==-1) @@ -626,7 +626,7 @@ def update_mask(self, flag, indx=None, action='turn_on'): The action to perform. Must be ``'turn_on'`` or ``'turn_off'``. """ if action not in ['turn_on', 'turn_off']: - msgs.error(f'{action} is not a known bit action!') + raise PypeItError(f'{action} is not a known bit action!') if indx is None: getattr(self.fullmask, action)(flag) getattr(self.fullmask, action)(flag, select=indx) @@ -715,7 +715,7 @@ def sub(self, other): subtracting ``other`` from this image. """ if not isinstance(other, PypeItImage): - msgs.error('Image to subtract must be of type PypeItImage.') + raise PypeItError('Image to subtract must be of type PypeItImage.') # Subtract the image newimg = self.image - other.image @@ -783,7 +783,7 @@ def sub(self, other): spat_flexure = self.spat_flexure if other.spat_flexure is not None and spat_flexure is not None \ and other.spat_flexure != spat_flexure: - msgs.warn(f'Spatial flexure different for images being subtracted ({spat_flexure} ' + msgs.warning(f'Spatial flexure different for images being subtracted ({spat_flexure} ' f'vs. {other.spat_flexure}). Adopting {np.max(np.abs([spat_flexure, other.spat_flexure]))}.') # Create a copy of the detector, if it is defined, to be used when @@ -925,7 +925,7 @@ def from_hdu(cls, hdu, chk_version=True, hdu_prefix=None, **kwargs): hdr_to_parse = h.header break if hdr_to_parse is None: - msgs.error('Provided HDUList does not have any HDUs constructed by the correct ' + raise PypeItError('Provided HDUList does not have any HDUs constructed by the correct ' f'datamodel class, {cls.__name__}.') else: hdr_to_parse = hdu.header diff --git a/pypeit/images/rawimage.py b/pypeit/images/rawimage.py index d7e46f152d..1d4da0f741 100644 --- a/pypeit/images/rawimage.py +++ b/pypeit/images/rawimage.py @@ -255,7 +255,7 @@ def apply_gain(self, force=False): step = inspect.stack()[0][3] if self.steps[step] and not force: # Already applied - msgs.warn('Gain was already applied.') + msgs.warning('Gain was already applied.') return # Have the images been trimmed? @@ -294,7 +294,7 @@ def build_ivar(self): `numpy.ndarray`_: The inverse variance in the image. """ if self.dark is None and self.par['shot_noise']: - msgs.error('Dark image has not been created! Run build_dark.') + raise PypeItError('Dark image has not been created! Run build_dark.') _dark = self.dark if self.par['shot_noise'] else None _counts = self.image if self.par['shot_noise'] else None # NOTE: self.dark is expected to be in *counts*. This means that @@ -316,7 +316,7 @@ def correct_nonlinear(self): step = inspect.stack()[0][3] if self.steps[step]: # Already applied - msgs.warn('Non-linear correction was already applied.') + msgs.warning('Non-linear correction was already applied.') return inim = self.image.copy() @@ -342,7 +342,7 @@ def estimate_readnoise(self): This function edits :attr:`ronoise` in place. """ if self.oscansec_img.shape != self.image.shape: - msgs.error('Must estimate readnoise before trimming the image.') + raise PypeItError('Must estimate readnoise before trimming the image.') for i in range(self.nimg): for amp in range(len(self.ronoise[i])): if self.ronoise[i,amp] > 0 and not self.par['empirical_rn']: @@ -350,7 +350,7 @@ def estimate_readnoise(self): # estimate was not explicitly requested. continue if not np.any(self.oscansec_img[i]==amp+1): - msgs.error(f'Cannot estimate readnoise for amplifier {amp+1}. Raw image ' + raise PypeItError(f'Cannot estimate readnoise for amplifier {amp+1}. Raw image ' 'has no overscan region!') gain = 1. if self.steps['apply_gain'] else self.detector[i]['gain'][amp] biaspix = self.image[i,self.oscansec_img[i]==amp+1] * gain @@ -377,7 +377,7 @@ def build_rn2img(self, units='e-', digitization=False): """ if not np.all(self.ronoise > 0): # TODO: Consider just calling estimate_readnoise here... - msgs.error('Some readnoise values <=0; first call estimate_readnoise.') + raise PypeItError('Some readnoise values <=0; first call estimate_readnoise.') # Have the images been trimmed? not_trimmed = self.rawimage.shape is not None and self.image.shape == self.rawimage.shape @@ -539,26 +539,26 @@ def process(self, par, bpm=None, scattlight=None, flatimages=None, bias=None, sl # Check the input if self.par['use_biasimage'] and bias is None: - msgs.error('No bias available for bias subtraction!') + raise PypeItError('No bias available for bias subtraction!') if self.par['use_darkimage'] and dark is None: - msgs.error('No dark available for dark subtraction!') + raise PypeItError('No dark available for dark subtraction!') if self.par['subtract_scattlight'] and scattlight is None: - msgs.error('Scattered light subtraction requested, but scattered light model not provided.') + raise PypeItError('Scattered light subtraction requested, but scattered light model not provided.') if self.par['spat_flexure_correct'] and slits is None: - msgs.error('Spatial flexure correction requested but no slits provided.') + raise PypeItError('Spatial flexure correction requested but no slits provided.') if self.use_flat and flatimages is None: - msgs.error('Flat-field corrections requested but no flat-field images generated ' + raise PypeItError('Flat-field corrections requested but no flat-field images generated ' 'or provided. Make sure you have flat-field images in your PypeIt file!') if self.use_slits and slits is None: # TODO: I think this should only happen as a developer error, not a # user error, but I'm not sure. - msgs.error('Processing steps requested that require slit-edge traces, but they were ' + raise PypeItError('Processing steps requested that require slit-edge traces, but they were ' 'not provided!') if self.nimg > 1 and not mosaic and (self.use_flat or self.use_slits): - msgs.error('Mosaicing must be performed if multiple detectors are processed and ' + raise PypeItError('Mosaicing must be performed if multiple detectors are processed and ' 'either flat-fielding or spatial flexure corrections are applied.') if self.nimg == 1 and mosaic: - msgs.warn('Only processing a single detector; mosaicing is ignored.') + msgs.warning('Only processing a single detector; mosaicing is ignored.') msgs.info(f'Performing basic image processing on {os.path.basename(self.filename)}.') # TODO: Checking for bit saturation should be done here. @@ -620,10 +620,10 @@ def process(self, par, bpm=None, scattlight=None, flatimages=None, bias=None, sl self._bpm = None # This erases the current bpm attribute if self.bpm.shape != self.image.shape: # This recreates it # This should only happen because of a coding error, not a user error - msgs.error(f'CODING ERROR: From-scratch BPM has incorrect shape!') + raise PypeItError(f'CODING ERROR: From-scratch BPM has incorrect shape!') # If the above was successful, the code can continue, but first warn # the user that the code ignored the provided bpm. - msgs.warn(f'Bad-pixel mask has incorrect shape: found {bpm_shape}, expected ' + msgs.warning(f'Bad-pixel mask has incorrect shape: found {bpm_shape}, expected ' f'{self.image.shape}. Assuming this is because different binning used for ' 'various frames. Recreating BPM specifically for this frame ' f'({os.path.basename(self.filename)}) and assuming the difference in the ' @@ -786,10 +786,10 @@ def spatial_flexure_shift(self, slits, force=False, debug=False): step = inspect.stack()[0][3] if self.steps[step] and not force: # Already field flattened - msgs.warn('Spatial flexure shift already calculated.') + msgs.warning('Spatial flexure shift already calculated.') return if self.nimg > 1: - msgs.error('CODING ERROR: Must use a single image (single detector or detector ' + raise PypeItError('CODING ERROR: Must use a single image (single detector or detector ' 'mosaic) to determine spatial flexure.') # get filename for QA @@ -845,19 +845,19 @@ def flatfield(self, flatimages, slits=None, force=False, debug=False): step = inspect.stack()[0][3] if self.steps[step] and not force: # Already field flattened - msgs.warn('Image was already flat fielded.') + msgs.warning('Image was already flat fielded.') return # Check input if flatimages.pixelflat_norm is None: # We cannot do any flat-field correction without a pixel flat (yet) - msgs.error("Flat fielding desired but not generated/provided.") + raise PypeItError("Flat fielding desired but not generated/provided.") if self.par['use_illumflat'] and slits is None: - msgs.error('Need to provide slits to create illumination flat.') + raise PypeItError('Need to provide slits to create illumination flat.') if self.par['use_specillum'] and flatimages.pixelflat_spec_illum is None: - msgs.error("Spectral illumination correction desired but not generated/provided.") + raise PypeItError("Spectral illumination correction desired but not generated/provided.") if self.nimg > 1: - msgs.error('CODING ERROR: Can only apply flat field to a single image (single ' + raise PypeItError('CODING ERROR: Can only apply flat field to a single image (single ' 'detector or detector mosaic).') # Generate the illumination flat, as needed @@ -903,7 +903,7 @@ def orient(self, force=False): step = inspect.stack()[0][3] # Check if already oriented if self.steps[step] and not force: - msgs.warn('Image was already oriented.') + msgs.warning('Image was already oriented.') return # Orient the image to have blue/red run bottom to top self.image = np.array([self.spectrograph.orient_image(d, i) @@ -936,11 +936,11 @@ def subtract_bias(self, bias_image, force=False): step = inspect.stack()[0][3] if self.steps[step] and not force: # Already bias subtracted - msgs.warn('Image was already bias subtracted.') + msgs.warning('Image was already bias subtracted.') return _bias = bias_image.image if self.nimg > 1 else np.expand_dims(bias_image.image, 0) if self.image.shape != _bias.shape: - msgs.error('Shape mismatch with bias image!') + raise PypeItError('Shape mismatch with bias image!') self.image -= _bias # TODO: Also incorporate the mask? if bias_image.ivar is not None and self.proc_var is not None: @@ -1008,7 +1008,7 @@ def build_dark(self, dark_image=None, expscale=False): _dark = dark_image.image if self.nimg > 1 else np.expand_dims(dark_image.image, 0) if self.image.shape != _dark.shape: # Shapes must match - msgs.error(f'Dark image shape mismatch; expected {self.image.shape}, ' + raise PypeItError(f'Dark image shape mismatch; expected {self.image.shape}, ' f'found {_dark.shape}.') # Scale the observed dark counts by the ratio of the exposure times. @@ -1025,7 +1025,7 @@ def build_dark(self, dark_image=None, expscale=False): separator=',') drk_str = np.array2string(0.5*self.dark, formatter={'float_kind':lambda x: "%.2f" % x}, separator=',') - msgs.warn(f'Dark-subtracted dark frame has significant signal remaining. Median ' + msgs.warning(f'Dark-subtracted dark frame has significant signal remaining. Median ' f'counts are {med_str}; warning threshold = +/- {drk_str}.') # Combine the tabulated and observed dark values @@ -1052,11 +1052,11 @@ def subtract_dark(self, force=False): step = inspect.stack()[0][3] if self.steps[step] and not force: # Already bias subtracted - msgs.warn('Image was already dark subtracted.') + msgs.warning('Image was already dark subtracted.') return if self.dark is None: - msgs.error('Dark image has not been created! Run build_dark.') + raise PypeItError('Dark image has not been created! Run build_dark.') self.image -= self.dark if self.dark_var is not None: @@ -1078,7 +1078,7 @@ def subtract_overscan(self, force=False): step = inspect.stack()[0][3] if self.steps[step] and not force: # Already overscan subtracted - msgs.warn("Image was already overscan subtracted!") + msgs.warning("Image was already overscan subtracted!") return # NOTE: procimg.subtract_overscan checks that the provided images all @@ -1113,19 +1113,19 @@ def subtract_pattern(self): step = inspect.stack()[0][3] if self.steps[step]: # Already pattern subtracted - msgs.warn("Image was already pattern subtracted!") + msgs.warning("Image was already pattern subtracted!") return # The image cannot have already been trimmed if self.oscansec_img.shape != self.image.shape: - msgs.error('Must estimate readnoise before trimming the image.') + raise PypeItError('Must estimate readnoise before trimming the image.') # Calculate the slit image _ps_img = [None]*self.nimg for i in range(self.nimg): # The image must have an overscan region for this to work. if not np.any(self.oscansec_img[i] > 0): - msgs.error('Image has no overscan region. Pattern noise cannot be subtracted.') + raise PypeItError('Image has no overscan region. Pattern noise cannot be subtracted.') patt_freqs = self.spectrograph.calc_pattern_freq(self.image[i], self.datasec_img[i], self.oscansec_img[i], self.hdu) @@ -1150,7 +1150,7 @@ def subtract_continuum(self, force=False): step = inspect.stack()[0][3] if self.steps[step] and not force: # Already bias subtracted - msgs.warn('Image was already continuum subtracted.') + msgs.warning('Image was already continuum subtracted.') return # Generate the continuum image @@ -1182,11 +1182,11 @@ def subtract_scattlight(self, msscattlight, slits, debug=False): step = inspect.stack()[0][3] if self.steps[step]: # Already pattern subtracted - msgs.warn("The scattered light has already been subtracted from the image!") + msgs.warning("The scattered light has already been subtracted from the image!") return if self.par["scattlight"]["method"] == "model" and msscattlight.scattlight_param is None: - msgs.warn("Scattered light parameters are not set. Cannot perform scattered light subtraction.") + msgs.warning("Scattered light parameters are not set. Cannot perform scattered light subtraction.") return # Obtain some information that is needed for the scattered light @@ -1250,7 +1250,7 @@ def subtract_scattlight(self, msscattlight, slits, debug=False): arx_modpar, _ = self.spectrograph.scattered_light_archive(binning, dispname) arx_modpar[8] = 0.0 if arx_modpar is None: - msgs.error(f"{self.spectrograph.name} does not have archival scattered light parameters. Please " + raise PypeItError(f"{self.spectrograph.name} does not have archival scattered light parameters. Please " f"set 'scattlight_method' to another option.") scatt_img = scattlight.scattered_light_model(arx_modpar, _img) elif self.par["scattlight"]["method"] == "frame": @@ -1265,16 +1265,16 @@ def subtract_scattlight(self, msscattlight, slits, debug=False): # If failure, revert back to the Scattered Light calibration frame model parameters if not success: if msscattlight is not None: - msgs.warn("Scattered light model failed - using predefined model parameters") + msgs.warning("Scattered light model failed - using predefined model parameters") scatt_img = scattlight.scattered_light_model(this_modpar, _img) else: - msgs.warn("Scattered light model failed - using archival model parameters") + msgs.warning("Scattered light model failed - using archival model parameters") # Use archival model parameters arx_modpar, _ = self.spectrograph.scattered_light_archive(binning, dispname) arx_modpar[8] = 0.0 scatt_img = scattlight.scattered_light_model(arx_modpar, _img) else: - msgs.warn("Scattered light not performed") + msgs.warning("Scattered light not performed") scatt_img = np.zeros(self.image[ii, ...].shape) do_finecorr = False # Check if a fine correction to the scattered light should be applied @@ -1314,11 +1314,11 @@ def trim(self, force=False): # Image *must* have been trimmed already because shape does not # match raw image self.steps[step] = True - msgs.warn('Image shape does not match raw image. Assuming it was already trimmed.') + msgs.warning('Image shape does not match raw image. Assuming it was already trimmed.') return if self.steps[step] and not force: # Already trimmed - msgs.warn('Image was already trimmed.') + msgs.warning('Image was already trimmed.') return self.image = np.array([procimg.trim_frame(i, d < 1) for i, d in zip(self.image, self.datasec_img)]) @@ -1353,12 +1353,12 @@ def build_mosaic(self): if self.nimg == 1: # NOTE: This also catches cases where the mosaicing has already been # performed. - msgs.warn('There is only one image, so there is nothing to mosaic!') + msgs.warning('There is only one image, so there is nothing to mosaic!') return # Check that the mosaicing is allowed if not self.steps['trim'] or not self.steps['orient']: - msgs.error('Images must be trimmed and PypeIt-oriented before mosaicing.') + raise PypeItError('Images must be trimmed and PypeIt-oriented before mosaicing.') # Create images that will track which detector contributes to each pixel # in the mosaic. These images are created here first *before* diff --git a/pypeit/inputfiles.py b/pypeit/inputfiles.py index 42bedb3e5a..f2da8fffbc 100644 --- a/pypeit/inputfiles.py +++ b/pypeit/inputfiles.py @@ -107,7 +107,7 @@ def readlines(ifile:str): """ # Check the files if not os.path.isfile(ifile): - msgs.error('The filename does not exist -' + msgs.newline() + ifile) + raise PypeItError('The filename does not exist -' + msgs.newline() + ifile) # Read the input lines and replace special characters with open(ifile, 'r') as f: @@ -146,10 +146,10 @@ def from_file(cls, input_file:str, vet:bool=True, preserve_comments:bool=False): # Parse data block data_start, data_end = cls.find_block(lines, cls.data_block) if data_start >= 0 and data_end < 0: - msgs.error( + raise PypeItError( f"Missing '{cls.data_block} end' in {input_file}") if data_start < 0 and data_end>0: - msgs.error("You have not specified the start of the data block!") + raise PypeItError("You have not specified the start of the data block!") # Read it, if it exists if data_start>0 and data_end>0: paths, usrtbl = cls._read_data_file_table(lines[data_start:data_end], preserve_comments) @@ -157,7 +157,7 @@ def from_file(cls, input_file:str, vet:bool=True, preserve_comments:bool=False): data_block_found = True else: if cls.datablock_required: - msgs.error("You have not specified the data block!") + raise PypeItError("You have not specified the data block!") paths, usrtbl = [], None data_block_found = False @@ -165,9 +165,9 @@ def from_file(cls, input_file:str, vet:bool=True, preserve_comments:bool=False): setup_found = False setup_start, setup_end = cls.find_block(lines, 'setup') if setup_start >= 0 and setup_end < 0 and cls.setup_required: - msgs.error(f"Missing 'setup end' in {input_file}") + raise PypeItError(f"Missing 'setup end' in {input_file}") elif setup_start < 0 and cls.setup_required: - msgs.error(f"Missing 'setup read' in {input_file}") + raise PypeItError(f"Missing 'setup read' in {input_file}") elif setup_start >= 0 and setup_end > 0: setup_found = True @@ -227,14 +227,14 @@ def vet(self): # Data table if self.data is None: if self.datablock_required: - msgs.error("You have not specified the data block!") + raise PypeItError("You have not specified the data block!") else: for key in self.required_columns: if key not in self.data.keys(): - msgs.error(f'Add {key} to the Data block of your {self.flavor} file before running.') + raise PypeItError(f'Add {key} to the Data block of your {self.flavor} file before running.') if self.setup_required and self.setup is None: - msgs.error("Add setup info to your PypeIt file in the setup block!") + raise PypeItError("Add setup info to your PypeIt file in the setup block!") @property def setup_name(self): @@ -303,7 +303,7 @@ def _parse_setup_lines(lines): # Check if len(setups) > 1: - msgs.error("Setup block contains more than one Setup!") + raise PypeItError("Setup block contains more than one Setup!") return setups, sdict @@ -498,7 +498,7 @@ def path_and_files(self, key:str, skip_blank=False, include_commented_out=False, # Check we got a good hit if check_exists and not os.path.isfile(filename): - msgs.error(f"{name} does not exist in one of the provided paths. Modify your input {self.flavor} file") + raise PypeItError(f"{name} does not exist in one of the provided paths. Modify your input {self.flavor} file") data_files.append(filename) # Return @@ -605,7 +605,7 @@ def get_spectrograph(self): Raised if the relevant configuration parameter is not available. """ if 'rdx' not in self.config.keys() or 'spectrograph' not in self.config['rdx'].keys(): - msgs.error('Cannot define spectrograph. Configuration file missing \n' + raise PypeItError('Cannot define spectrograph. Configuration file missing \n' ' [rdx]\n spectrograph=\n entry.') return load_spectrograph(self.config['rdx']['spectrograph']) @@ -660,12 +660,12 @@ def vet(self): # Confirm spectrograph is present if 'rdx' not in self.config.keys() or 'spectrograph' not in self.config['rdx'].keys(): - msgs.error(f"Missing spectrograph in the Parameter block of your PypeIt file. Add it!") + raise PypeItError(f"Missing spectrograph in the Parameter block of your PypeIt file. Add it!") # Setup setup_keys = list(self.setup) if 'Setup' not in setup_keys[0]: - msgs.error("Setup does not appear in your setup block! Add it") + raise PypeItError("Setup does not appear in your setup block! Add it") # Done msgs.info('PypeIt file successfully vetted.') @@ -692,7 +692,7 @@ def get_pypeitpar(self): no example file was available. """ if 'frametype' not in self.data.keys(): - msgs.error('PypeIt file must provide the frametype column.') + raise PypeItError('PypeIt file must provide the frametype column.') # NOTE: self.filenames is a property function that generates the full # set of file names each time they are requested. However, this should @@ -755,7 +755,7 @@ def vet(self): # This is allowed if using an archived sensitivity function # And the checking has to be done in the script as the specgtrograph must be known.. if 'sensfile' not in self.data.keys(): - msgs.warn("sensfile column not provided. Fluxing will crash if an archived sensitivity function does not exist") + msgs.warning("sensfile column not provided. Fluxing will crash if an archived sensitivity function does not exist") self.data['sensfile'] = '' @property @@ -862,7 +862,7 @@ def vet(self): # Confirm spectrograph is present if 'rdx' not in self.config.keys() or 'spectrograph' not in self.config['rdx'].keys(): - msgs.error(f"Missing spectrograph in the Parameter block of your .coadd2d file. Add it!") + raise PypeItError(f"Missing spectrograph in the Parameter block of your .coadd2d file. Add it!") # Done msgs.info('.coadd2d file successfully vetted.') @@ -885,7 +885,7 @@ def vet(self): # Confirm spectrograph is present if 'rdx' not in self.config.keys() or 'spectrograph' not in self.config['rdx'].keys(): - msgs.error(f"Missing spectrograph in the Parameter block of your .coadd2d file. Add it!") + raise PypeItError(f"Missing spectrograph in the Parameter block of your .coadd2d file. Add it!") # Done msgs.info('.coadd3d file successfully vetted.') @@ -975,7 +975,7 @@ def options(self): if grating_corr is None: opts['grating_corr'] = [None]*len(self.filenames) elif len(grating_corr) == 1 and len(self.filenames) > 1: - msgs.error("You cannot specify a single grating correction file for multiple input files.") + raise PypeItError("You cannot specify a single grating correction file for multiple input files.") elif len(grating_corr) != 0: opts['grating_corr'] = grating_corr @@ -1010,7 +1010,7 @@ def options(self): opts['dec_offset'] = [odec/3600.0 for odec in off_dec] # Check that both have been set or both are not set if (off_ra is not None and off_dec is None) or (off_ra is None and off_dec is not None): - msgs.error("You must specify both or neither of the following arguments: ra_offset, dec_offset") + raise PypeItError("You must specify both or neither of the following arguments: ra_offset, dec_offset") # Return all options return opts @@ -1042,7 +1042,7 @@ def vet(self): # Confirm spectrograph is present if 'rdx' not in self.config.keys() or 'spectrograph' not in self.config['rdx'].keys(): - msgs.error(f"Missing spectrograph in the Parameter block of your .flex file. Add it!") + raise PypeItError(f"Missing spectrograph in the Parameter block of your .flex file. Add it!") # Done msgs.info('.flex file successfully vetted.') diff --git a/pypeit/io.py b/pypeit/io.py index 1d230ca5eb..e0e948bd8b 100644 --- a/pypeit/io.py +++ b/pypeit/io.py @@ -111,7 +111,7 @@ def rec_to_fits_type(col_element, single_row=False): if s < 0: s = col_element.dtype.str.find('S') if s < 0: - msgs.error(f'Unable to parse datatype: {col_element.dtype.str}') + raise PypeItError(f'Unable to parse datatype: {col_element.dtype.str}') l = int(col_element.dtype.str[s+1:]) # return '{0}A'.format(l) if n==1 else '{0}A{1}'.format(l*n,l) @@ -788,16 +788,16 @@ def fits_open(filename, **kwargs): # to do this! Is there are more appropriate os.path function that allows # for this different type of object? if isinstance(filename, (str, Path)) and not Path(filename).absolute().exists(): - msgs.error(f'{filename} does not exist!') + raise PypeItError(f'{filename} does not exist!') try: return fits.open(filename, **kwargs) except OSError as e: - msgs.warn(f'Error opening {filename} ({e}). Trying again by setting ' + msgs.warning(f'Error opening {filename} ({e}). Trying again by setting ' 'ignore_missing_end=True, assuming the error was a header problem.') try: return fits.open(filename, ignore_missing_end=True, **kwargs) except OSError as e: - msgs.error(f'That failed, too! Astropy is unable to open {filename} and reports the ' + raise PypeItError(f'That failed, too! Astropy is unable to open {filename} and reports the ' f'following error: {e}') @@ -883,7 +883,7 @@ def files_from_extension(raw_path, extension='.fits'): else: _raw_path, prefix = _raw_path.parent, _raw_path.name if not _raw_path.is_dir(): - msgs.error(f'{_raw_path} does not exist!') + raise PypeItError(f'{_raw_path} does not exist!') ext = [extension] if isinstance(extension, str) else extension files = numpy.concatenate([sorted(_raw_path.glob(f'{prefix}*{e}')) for e in ext]) return numpy.unique(files).tolist() @@ -892,7 +892,7 @@ def files_from_extension(raw_path, extension='.fits'): files = numpy.concatenate([files_from_extension(p, extension=extension) for p in raw_path]) return numpy.unique(files).tolist() - msgs.error(f"Incorrect type {type(raw_path)} for raw_path; must be str, Path, or list.") + raise PypeItError(f"Incorrect type {type(raw_path)} for raw_path; must be str, Path, or list.") @@ -951,7 +951,7 @@ def load_telluric_grid(filename: str): # Check for existence of file parameter # TODO: Do we need this check? if not isinstance(filename, str) or len(filename) == 0: - msgs.error("No file specified for telluric correction. " + raise PypeItError("No file specified for telluric correction. " "See https://pypeit.readthedocs.io/en/latest/telluric.html") # Get the data path for the filename, whether in the package directory or cache @@ -961,7 +961,7 @@ def load_telluric_grid(filename: str): # Check for existance of file # NOTE: With the use of `PypeItDataPath.get_file_path`, this should never fault if not file_with_path.is_file(): - msgs.error(f"File {file_with_path} is not on your disk. " + raise PypeItError(f"File {file_with_path} is not on your disk. " "You likely need to download the Telluric files. " "See https://pypeit.readthedocs.io/en/release/installing.html" "#atmospheric-model-grids") diff --git a/pypeit/manual_extract.py b/pypeit/manual_extract.py index 3967b5b5b5..a15028c0e9 100644 --- a/pypeit/manual_extract.py +++ b/pypeit/manual_extract.py @@ -88,7 +88,7 @@ def by_fitstbl_input(cls, frame: str, inp: str, spectrograph): for m_e in m_es: loc = parse.parse_image_location(m_e, spectrograph) if len(loc) not in [5,6]: - msgs.error('Definition of manual extraction aperture does not have the correct ' + raise PypeItError('Definition of manual extraction aperture does not have the correct ' f'number of parameters: {m_e}.') # TODO: Why is this spat:spec and not spec:spat like everything else?? diff --git a/pypeit/metadata.py b/pypeit/metadata.py index 1371c5f803..24c17493da 100644 --- a/pypeit/metadata.py +++ b/pypeit/metadata.py @@ -97,7 +97,7 @@ def __init__(self, spectrograph, par, files=None, data=None, usrdata=None, if data is None and files is None: # Warn that table will be empty - msgs.warn('Both data and files are None in the instantiation of PypeItMetaData.' + msgs.warning('Both data and files are None in the instantiation of PypeItMetaData.' ' The table will be empty!') # Initialize internals @@ -162,7 +162,7 @@ def _vet_instrument(self, meta_tbl): """ if 'instrument' in meta_tbl.keys(): if self.spectrograph.header_name is None: - msgs.error('CODING ERROR: header_name is not defined for ' + raise PypeItError('CODING ERROR: header_name is not defined for ' f'{self.spectrograph.__class__.__name__}!') # Check that there is only one instrument # This could fail if one mixes is much older calibs @@ -172,11 +172,11 @@ def _vet_instrument(self, meta_tbl): # An empty table is allowed if len(instr_names) > 0: if len(instr_names) != 1: - msgs.warn(f'More than one instrument in your dataset! {instr_names} \n' + msgs.warning(f'More than one instrument in your dataset! {instr_names} \n' 'Proceed with great caution...') # Check the name if not instr_names[0].startswith(self.spectrograph.header_name): - msgs.warn('The instrument name in the headers of the raw files does not match the ' + msgs.warning('The instrument name in the headers of the raw files does not match the ' f'expected one! Found {instr_names[0]}, expected {self.spectrograph.header_name}. ' 'You may have chosen the wrong PypeIt spectrograph name!') @@ -218,7 +218,7 @@ def _build(self, files, strict=True, usrdata=None): # TODO: This check should be done elsewhere # Check if _ifile.name != usrdata['filename'][idx].lstrip("# "): - msgs.error('File name list does not match user-provided metadata table. See ' + raise PypeItError('File name list does not match user-provided metadata table. See ' 'usrdata argument of instantiation of PypeItMetaData.') usr_row = usrdata[idx] @@ -243,7 +243,7 @@ def _build(self, files, strict=True, usrdata=None): self.par['rdx']['ignore_bad_headers'] or strict)) if isinstance(value, str) and '#' in value: value = value.replace('#', '') - msgs.warn('Removing troublesome # character from {0}. Returning {1}.'.format( + msgs.warning('Removing troublesome # character from {0}. Returning {1}.'.format( meta_key, value)) data[meta_key].append(value) @@ -263,7 +263,7 @@ def _build(self, files, strict=True, usrdata=None): 'frames either could not be opened, are empty, or have corrupt headers:\n' for file in bad_files: msg += f' {file}\n' - msgs.warn(msg) + msgs.warning(msg) # Return return data @@ -531,7 +531,7 @@ def construct_basename(self, row, obstime=None): # Raised if the 'setup' isn't been defined. # """ # if 'setup' not in self.keys(): -# msgs.error('Cannot provide instrument setup without \'setup\' column; ' +# raise PypeItError('Cannot provide instrument setup without \'setup\' column; ' # 'run set_configurations.') # dispname = 'none' if 'dispname' not in self.keys() else self['dispname'][row] # dispangle = 'none' if 'dispangle' not in self.keys() else self['dispangle'][row] @@ -594,7 +594,7 @@ def get_configuration_names(self, ignore=None, return_index=False, configs=None) Raised if the 'setup' isn't been defined. """ if 'setup' not in self.keys(): - msgs.error('Cannot get setup names; run set_configurations.') + raise PypeItError('Cannot get setup names; run set_configurations.') # Unique configurations # NOTE: This annoyingly returns Column types, not np.arrays! So need to @@ -661,7 +661,7 @@ def configuration_generator(start=0): @property def n_configs(self): if self.configs is None: - msgs.error('Configurations not defined by PypeItMetaData object. Execute ' + raise PypeItError('Configurations not defined by PypeItMetaData object. Execute ' 'unique_configurations first.') return len(list(self.configs.keys())) @@ -723,7 +723,7 @@ def unique_configurations(self, force=False, copy=False, rm_none=False): uniq, indx = np.unique(self['setup'], return_index=True) ignore = uniq == 'None' if np.sum(ignore) > 0: - msgs.warn(f'Ignoring {np.sum(ignore)} frames with configuration set to None.') + msgs.warning(f'Ignoring {np.sum(ignore)} frames with configuration set to None.') self.configs = {} for i in range(len(uniq)): if ignore[i]: @@ -749,7 +749,7 @@ def unique_configurations(self, force=False, copy=False, rm_none=False): indx = indx[np.logical_not(np.isin(indx, ignore_indx))] if len(indx) == 0: - msgs.error('No frames to use to define configurations!') + raise PypeItError('No frames to use to define configurations!') # Instantiate the configuration generator cfg_gen = PypeItMetaData.configuration_generator() @@ -783,7 +783,7 @@ def unique_configurations(self, force=False, copy=False, rm_none=False): # Get the next setup identifier setup = next(cfg_gen) except StopIteration: - msgs.error('Cannot assign more configurations! Either something went wrong' + raise PypeItError('Cannot assign more configurations! Either something went wrong' 'or you are trying to reduce data from more than ' f'{PypeItMetaData.maximum_number_of_configurations()} setups!') # Add the configuration @@ -839,7 +839,7 @@ def set_configurations(self, configs=None, force=False, fill=None): _configs = self.unique_configurations() if configs is None else configs for k, cfg in _configs.items(): if len(set(cfg.keys()) - set(self.keys())) > 0: - msgs.error('Configuration {0} defined using unavailable keywords!'.format(k)) + raise PypeItError('Configuration {0} defined using unavailable keywords!'.format(k)) # Some frame types need to be ignored ignore_frames, ignore_indx = self.ignore_frames() @@ -875,7 +875,7 @@ def set_configurations(self, configs=None, force=False, fill=None): # At this point, we need the frame type to continue if 'frametype' not in self.keys(): - msgs.error('To account for ignored frames, types must have been defined; run ' + raise PypeItError('To account for ignored frames, types must have been defined; run ' 'get_frame_types.') # For each configuration, determine if any of the frames with @@ -926,7 +926,7 @@ def set_configurations(self, configs=None, force=False, fill=None): # Warn the user that the matching meta values are not # unique for this configuration. if uniq_meta.size != 1: - msgs.warn('When setting the instrument configuration for {0} '.format(ftype) + msgs.warning('When setting the instrument configuration for {0} '.format(ftype) + 'frames, configuration {0} does not have unique '.format(cfg_key) + '{0} values.' .format(mkey)) # Find the frames of this type that match any of the @@ -951,7 +951,7 @@ def set_configurations(self, configs=None, force=False, fill=None): cfg_gen = self.configuration_generator(start=len(np.unique(self.table['setup'][np.logical_not(not_setup)]))) nw_setup = next(cfg_gen) self.configs[nw_setup] = {} - msgs.warn('All files that did not match any setup are grouped into a single configuration.') + msgs.warning('All files that did not match any setup are grouped into a single configuration.') self.table['setup'][not_setup] = nw_setup def clean_configurations(self): @@ -981,7 +981,7 @@ def clean_configurations(self): # Check that the metadata are valid for this column. indx = np.isin(self[key], cfg_limits[key]) if not np.all(indx): - msgs.warn('Found frames with invalid {0}.'.format(key)) + msgs.warning('Found frames with invalid {0}.'.format(key)) good &= indx if np.all(good): @@ -995,7 +995,7 @@ def clean_configurations(self): indx = np.where(np.logical_not(good))[0] for i in indx: msg += ' {0}\n'.format(self['filename'][i]) - msgs.warn(msg) + msgs.warning(msg) # And remove 'em self.table = self.table[good] @@ -1016,7 +1016,7 @@ def find_configuration(self, setup, index=False): setup/configuration. """ if 'setup' not in self.keys(): - msgs.error('Configurations not set; first execute self.unique_configurations.') + raise PypeItError('Configurations not set; first execute self.unique_configurations.') # NOTE: frames can be associated with multiple setups (namely biases), # meaning that we have to split the string by any separating commas. @@ -1096,7 +1096,7 @@ def _check_calib_groups(self): if not is_science[i]: continue if len(self.calib_bitmask.flagged_bits(self['calibbit'][i])) > 1: - msgs.error('Science frames can only be assigned to a single calibration group.') + raise PypeItError('Science frames can only be assigned to a single calibration group.') @property def n_calib_groups(self): @@ -1164,7 +1164,7 @@ def set_calibration_groups(self, global_frames=None, default=False, force=False) # The configuration must be present to determine the calibration # group if 'setup' not in self.keys(): - msgs.error('CODING ERROR: Must have defined \'setup\' column first; try running ' + raise PypeItError('CODING ERROR: Must have defined \'setup\' column first; try running ' 'set_configurations.') configs = np.unique(np.concatenate([_setup.split(',') for _setup in self['setup'].data])).tolist() if 'None' in configs: @@ -1193,7 +1193,7 @@ def set_calibration_groups(self, global_frames=None, default=False, force=False) # (like biases and darks) if global_frames is not None: if 'frametype' not in self.keys(): - msgs.error('To set global frames, types must have been defined; ' + raise PypeItError('To set global frames, types must have been defined; ' 'run get_frame_types.') calibs = '0' if n_cfg == 1 else ','.join(np.arange(n_cfg).astype(str)) @@ -1224,7 +1224,7 @@ def ignore_frames(self): ignmsk = np.zeros(len(self.table), dtype=bool) if ignore_frames is not None: if 'frametype' not in self.keys(): - msgs.error('To ignore frames, types must have been defined; run get_frame_types.') + raise PypeItError('To ignore frames, types must have been defined; run get_frame_types.') list_ignore_frames = list(ignore_frames.keys()) msgs.info('Unique configurations ignore frames with type: {0}'.format(list_ignore_frames)) for ftype in list_ignore_frames: @@ -1263,7 +1263,7 @@ def find_frames(self, ftype, calib_ID=None, index=False): Raised if the `framebit` column is not set in the table. """ if 'framebit' not in self.keys(): - msgs.error('Frame types are not set. First run get_frame_types.') + raise PypeItError('Frame types are not set. First run get_frame_types.') if ftype == 'None': return self['framebit'] == 0 # Select frames @@ -1407,7 +1407,7 @@ def get_frame_types(self, flag_unknown=False, user=None, merge=True): """ # Checks if 'frametype' in self.keys() or 'framebit' in self.keys(): - msgs.warn('Removing existing frametype and framebit columns.') + msgs.warning('Removing existing frametype and framebit columns.') if 'frametype' in self.keys(): del self.table['frametype'] if 'framebit' in self.keys(): @@ -1421,16 +1421,16 @@ def get_frame_types(self, flag_unknown=False, user=None, merge=True): if user is not None: if len(user.keys()) != len(self): if len(np.unique(self['filename'].data)) != len(self): - msgs.error('Your pypeit file has duplicate filenames which is not allowed.') + raise PypeItError('Your pypeit file has duplicate filenames which is not allowed.') else: - msgs.error('The user-provided dictionary does not match table length.') + raise PypeItError('The user-provided dictionary does not match table length.') msgs.info('Using user-provided frame types.') for ifile,ftypes in user.items(): indx = self['filename'] == ifile try: type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftypes.split(',')) except ValueError as err: - msgs.error(f'Improper frame type supplied!{msgs.newline()}' + raise PypeItError(f'Improper frame type supplied!{msgs.newline()}' f'{err}{msgs.newline()}' 'Check your PypeIt Reduction File') return self.set_frame_types(type_bits, merge=merge) @@ -1459,8 +1459,8 @@ def get_frame_types(self, flag_unknown=False, user=None, merge=True): for f in self['filename'][indx]: msgs.info(f) if not flag_unknown: - msgs.error("Check these files before continuing") - msgs.warn("These files are commented out and will be ignored during the reduction.") + raise PypeItError("Check these files before continuing") + msgs.warning("These files are commented out and will be ignored during the reduction.") # Comment out the frames that could not be identified # first change the dtype of the filename column to be able to add a # self['filename'] = self['filename'].value.astype(f" 1): - msgs.error('If defined, max_overlap must be in the range [0,1].') + raise PypeItError('If defined, max_overlap must be in the range [0,1].') if self['order_outlier'] is not None and self['order_outlier'] < self['order_fitrej']: - msgs.warn('Order outlier threshold should not be less than the rejection threshold.') + msgs.warning('Order outlier threshold should not be less than the rejection threshold.') class WaveTiltsPar(ParSet): @@ -4478,9 +4478,9 @@ def from_dict(cls, cfg): def validate(self): if self.data['std_spec1d'] is not None: if not self.data['use_std_trace']: - msgs.error('If you provide a standard star spectrum for tracing, you must set use_std_trace=True.') + raise PypeItError('If you provide a standard star spectrum for tracing, you must set use_std_trace=True.') elif not Path(self.data['std_spec1d']).absolute().exists(): - msgs.error(f'{self.data["std_spec1d"]} does not exist!') + raise PypeItError(f'{self.data["std_spec1d"]} does not exist!') class SkySubPar(ParSet): @@ -5242,7 +5242,7 @@ def from_cfg_lines(cls, cfg_lines=None, merge_with=None, evaluate=True): if isinstance(merge_with, list): merge_with = (merge_with,) if not isinstance(merge_with, tuple): - msgs.error('Input merge_with must be a tuple.') + raise PypeItError('Input merge_with must be a tuple.') # Proceed for f in merge_with: cfg.merge(ConfigObj(f)) diff --git a/pypeit/par/util.py b/pypeit/par/util.py index 884097b9ff..8e4412f638 100644 --- a/pypeit/par/util.py +++ b/pypeit/par/util.py @@ -45,7 +45,7 @@ def eval_tuple(inp): try: basic = eval(joined) except: - msgs.error(f'Cannot evaluate {joined} into a valid tuple.') + raise PypeItError(f'Cannot evaluate {joined} into a valid tuple.') # If any element of the basic evaluation is also a tuple, assume the result # of the evaluation is a tuple of tuples. This is converted to a list. diff --git a/pypeit/pypeit.py b/pypeit/pypeit.py index a81ae05863..b856463417 100644 --- a/pypeit/pypeit.py +++ b/pypeit/pypeit.py @@ -250,9 +250,9 @@ def get_std_outfile(self, standard_frames): std_outfile = self.par['reduce']['findobj']['std_spec1d'] if std_outfile is not None: if not self.par['reduce']['findobj']['use_std_trace']: - msgs.error('If you provide a standard star spectrum for tracing, you must set use_std_trace=True') + raise PypeItError('If you provide a standard star spectrum for tracing, you must set use_std_trace=True') elif not Path(std_outfile).absolute().exists(): - msgs.error(f'Provided standard spec1d file does not exist: {std_outfile}') + raise PypeItError(f'Provided standard spec1d file does not exist: {std_outfile}') return std_outfile # TODO: Need to decide how to associate standards with @@ -267,7 +267,7 @@ def get_std_outfile(self, standard_frames): std_outfile = self.spec_output_file(std_frame) \ if isinstance(std_frame, (int,np.integer)) else None if std_outfile is not None and not std_outfile.is_file(): - msgs.error(f'Could not find standard file: {std_outfile}') + raise PypeItError(f'Could not find standard file: {std_outfile}') return std_outfile def calib_all(self): @@ -299,7 +299,7 @@ def calib_all(self): self.caliBrate = self.calib_one(grp_frames, self.det) if not self.caliBrate.success: - msgs.warn(f'Calibrations for detector {self.det} were unsuccessful! The step ' + msgs.warning(f'Calibrations for detector {self.det} were unsuccessful! The step ' f'that failed was {self.caliBrate.failed_step}. Continuing to next ' f'detector.') @@ -333,7 +333,7 @@ def reduce_all(self): # run if there are no science/standard frames and `run_pypeit` is run # without -c flag if not np.any(is_science) and not np.any(is_standard): - msgs.error('No science/standard frames provided. Add them to your PypeIt file ' + raise PypeItError('No science/standard frames provided. Add them to your PypeIt file ' 'if this is a standard run! Otherwise run calib_only reduction using -c flag') # Frame indices @@ -404,7 +404,7 @@ def reduce_all(self): # for now... # # Quicklook mode? # if self.par['rdx']['quicklook'] and j > 0: -# msgs.warn('PypeIt executed in quicklook mode. Only reducing science frames ' +# msgs.warning('PypeIt executed in quicklook mode. Only reducing science frames ' # 'in the first combination group!') # break # @@ -436,10 +436,10 @@ def reduce_all(self): self.save_exposure(frames[0], sci_spec2d, sci_sobjs, self.basename, history, skip_write_2d=self.par['scienceframe']['process']['skip_write_2d']) else: - msgs.warn('No spec2d and spec1d saved to file because the ' + msgs.warning('No spec2d and spec1d saved to file because the ' 'calibration/reduction was not successful for all the detectors') else: - msgs.warn(f'Output file: {self.fitstbl.construct_basename(frames[0])} already ' + msgs.warning(f'Output file: {self.fitstbl.construct_basename(frames[0])} already ' 'exists. Set overwrite=True to recreate and overwrite.') msgs.info(f'Finished calibration group {calib_ID}') @@ -565,7 +565,7 @@ def reduce_exposure(self, frames, bg_frames=None, std_outfile=None): # run calibration self.caliBrate = self.calib_one(frames, self.det) if not self.caliBrate.success: - msgs.warn(f'Calibrations for detector {self.det} were unsuccessful! The step ' + msgs.warning(f'Calibrations for detector {self.det} were unsuccessful! The step ' f'that failed was {self.caliBrate.failed_step}. Continuing by ' f'skipping this detector.') continue @@ -677,7 +677,7 @@ def get_sci_metadata(self, frame, det): elif 'standard' in types: objtype_out = 'standard' else: - msgs.error('get_sci_metadata() should only be run on standard or science frames. ' + raise PypeItError('get_sci_metadata() should only be run on standard or science frames. ' f'Types of this frame are: {types}') calib_key = CalibFrame.construct_calib_key(self.fitstbl['setup'][frame], self.fitstbl['calib'][frame], @@ -715,7 +715,7 @@ def calib_one(self, frames, det, stop_at_step:str=None): # Check if stop_at_step is not None and stop_at_step not in caliBrate.steps: - msgs.error(f"Requested stop_at_step={stop_at_step} is not a valid calibration step.\n Allowed steps are: {caliBrate.steps}") + raise PypeItError(f"Requested stop_at_step={stop_at_step} is not a valid calibration step.\n Allowed steps are: {caliBrate.steps}") # These need to be separate to accomodate COADD2D caliBrate.set_config(frames[0], det, self.par['calibrations']) @@ -921,7 +921,7 @@ def load_skyregions(self, initial_slits=False, scifile=None, frame=None, spat_fl basename=io.remove_suffix(scifile)) regfile = Path(regfile).absolute() if not regfile.exists(): - msgs.error(f'Unable to find SkyRegions file: {regfile} . Create a SkyRegions ' + raise PypeItError(f'Unable to find SkyRegions file: {regfile} . Create a SkyRegions ' 'frame using pypeit_skysub_regions, or change the user_regions to ' 'the percentage format. See documentation.') msgs.info(f'Loading SkyRegions file: {regfile}') @@ -940,9 +940,9 @@ def load_skyregions(self, initial_slits=False, scifile=None, frame=None, spat_fl # Get the regions status, regions = skysub.read_userregions(skyregtxt, self.caliBrate.slits.nslits, maxslitlength) if status == 1: - msgs.error("Unknown error in sky regions definition. Please check the value:" + msgs.newline() + skyregtxt) + raise PypeItError("Unknown error in sky regions definition. Please check the value:" + msgs.newline() + skyregtxt) elif status == 2: - msgs.error("Sky regions definition must contain a percentage range, and therefore must contain a ':'") + raise PypeItError("Sky regions definition must contain a percentage range, and therefore must contain a ':'") # Generate and return image return skysub.generate_mask(self.spectrograph.pypeline, regions, self.caliBrate.slits, slits_left, slits_right, spat_flexure=spat_flexure) diff --git a/pypeit/pypeitdata.py b/pypeit/pypeitdata.py index 10db419468..5027dae124 100644 --- a/pypeit/pypeitdata.py +++ b/pypeit/pypeitdata.py @@ -97,7 +97,7 @@ class PypeItDataPath: def __init__(self, subdirs, remote_host=None): if remote_host not in [None, 's3_cloud', 'github']: - msgs.error(f'Remote host not recognized: {self.host}') + raise PypeItError(f'Remote host not recognized: {self.host}') self.host = remote_host self.subdirs = subdirs self.data = self.check_isdir(cache.__PYPEIT_DATA__) @@ -158,7 +158,7 @@ def __truediv__(self, p): remote_host=self.host) if (self.path / p).is_file(): return self.path / p - msgs.error(f'{str(self.path / p)} is not a valid PypeIt data path or is a file ' + raise PypeItError(f'{str(self.path / p)} is not a valid PypeIt data path or is a file ' 'that does not exist.', cls='PypeItPathError') @staticmethod @@ -178,7 +178,7 @@ def check_isdir(path:pathlib.Path) -> pathlib.Path: Raised if the path does not exist or is not a directory. """ if not path.is_dir(): - msgs.error(f"Unable to find {path}. Check your installation.", cls='PypeItPathError') + raise PypeItError(f"Unable to find {path}. Check your installation.", cls='PypeItPathError') return path @staticmethod @@ -305,7 +305,7 @@ def get_file_path(self, data_file, force_update=False, to_pkg=None, return_forma _cached_file = cache.fetch_remote_file(data_file, subdir, remote_host=self.host, force_update=force_update, return_none=return_none) if _cached_file is None: - msgs.warn(f'File {data_file} not found in the cache.') + msgs.warning(f'File {data_file} not found in the cache.') return None # If we've made it this far, the file is being pulled from the cache. diff --git a/pypeit/pypeitsetup.py b/pypeit/pypeitsetup.py index 6511705497..3c78cbe192 100644 --- a/pypeit/pypeitsetup.py +++ b/pypeit/pypeitsetup.py @@ -101,7 +101,7 @@ def __init__(self, file_list, frametype=None, usrdata=None, setups=None, cfg_lin # The provided list of files cannot be None if file_list is None or len(file_list) == 0: - msgs.error('Must provide a list of files to be reduced!') + raise PypeItError('Must provide a list of files to be reduced!') # Save input self.file_list = file_list @@ -116,7 +116,7 @@ def __init__(self, file_list, frametype=None, usrdata=None, setups=None, cfg_lin # Cannot proceed without spectrograph name if _spectrograph_name is None: - msgs.error('Must provide spectrograph name directly or using configuration lines.') + raise PypeItError('Must provide spectrograph name directly or using configuration lines.') # Instantiate the spectrograph self.spectrograph = load_spectrograph(_spectrograph_name) @@ -191,7 +191,7 @@ def from_file_root(cls, root, spectrograph, extension=None): files = spec.find_raw_files(root, extension=extension) nfiles = len(files) if nfiles == 0: - msgs.error(f'Unable to find any raw files for {spec.name} in {root}!') + raise PypeItError(f'Unable to find any raw files for {spec.name} in {root}!') else: msgs.info(f'Found {nfiles} {spec.name} raw files.') return cls.from_rawfiles(files, spectrograph) @@ -252,7 +252,7 @@ def append_user_cfg(self, user_cfg:list=None): def nfiles(self): """The number of files to reduce.""" if self.fitstbl is None: - msgs.warn('No fits files have been read!') + msgs.warning('No fits files have been read!') return 0 if self.fitstbl is None else len(self.fitstbl) def __repr__(self): @@ -365,7 +365,7 @@ class is fully instantiated such that the user can if clean_config: self.fitstbl.clean_configurations() if len(self.fitstbl) == 0: - msgs.error('Cleaning the configurations removed all the files! Rerun ' + raise PypeItError('Cleaning the configurations removed all the files! Rerun ' 'pypeit_setup with the --keep_bad_frames option.') # Determine the type of each frame. diff --git a/pypeit/scattlight.py b/pypeit/scattlight.py index 675da0d07b..6ccd416236 100644 --- a/pypeit/scattlight.py +++ b/pypeit/scattlight.py @@ -102,7 +102,7 @@ def get_model(self, image): """ msgs.info("Generating a scattered light image") if self.scattlight_param is None: - msgs.warn("No scattered light parameters are available") + msgs.warning("No scattered light parameters are available") return np.zeros_like(image) # Return the model of the scattered light return scattlight.scattered_light_model_pad(self.scattlight_param, image) diff --git a/pypeit/scripts/arxiv_solution.py b/pypeit/scripts/arxiv_solution.py index 8370e6d9c5..88d11d4630 100644 --- a/pypeit/scripts/arxiv_solution.py +++ b/pypeit/scripts/arxiv_solution.py @@ -42,9 +42,9 @@ def main(args): # Check that a file has been provided if args.file is None: - msgs.error('You must input a MasterWaveCalib file') + raise PypeItError('You must input a MasterWaveCalib file') elif not os.path.exists(args.file): - msgs.error("The following MasterWaveCalib file does not exist:" + msgs.newline() + args.file) + raise PypeItError("The following MasterWaveCalib file does not exist:" + msgs.newline() + args.file) # Load the wavelength calibration file wv_calib = WaveCalib.from_file(args.file, chk_version=chk_version) @@ -60,7 +60,7 @@ def main(args): thismsg += "There are no good slits - the WaveCalib file is bad." else: thismsg += "Try one of the following slits, instead: " + msgs.newline() + ", ".join(gd_slits) - msgs.error(thismsg) + raise PypeItError(thismsg) wave = wv_calib['wv_fits'][args.slit]['wave_soln'].flatten() spec = wv_calib['wv_fits'][args.slit]['spec'].flatten() outname = args.file.replace(".fits", "_arXiv.fits") diff --git a/pypeit/scripts/cache_github_data.py b/pypeit/scripts/cache_github_data.py index b7ec524722..b90a0035f4 100644 --- a/pypeit/scripts/cache_github_data.py +++ b/pypeit/scripts/cache_github_data.py @@ -79,7 +79,7 @@ def main(args): # Access the repo; use a token if one is available if os.getenv('GITHUB_TOKEN') is None: - msgs.warn('GITHUB_TOKEN environmental variable is not defined, meaning script will ' + msgs.warning('GITHUB_TOKEN environmental variable is not defined, meaning script will ' 'not authenticate a GitHub user via an OAuth token. Beware of rate limits!') auth = None else: diff --git a/pypeit/scripts/chk_edges.py b/pypeit/scripts/chk_edges.py index 1c64e3d822..c482d543a0 100644 --- a/pypeit/scripts/chk_edges.py +++ b/pypeit/scripts/chk_edges.py @@ -51,7 +51,7 @@ def main(args): slit_filename = Path(args.slits_file).absolute() if not slit_filename.exists(): # But doesn't exist - msgs.warn(f'{slit_filename} does not exist!') + msgs.warning(f'{slit_filename} does not exist!') # Set the file name to None so that the code will try to find # the default file slit_filename = None @@ -59,7 +59,7 @@ def main(args): slit_filename = slittrace.SlitTraceSet.construct_file_name( edges.traceimg.calib_key, calib_dir=edges.traceimg.calib_dir) if not slit_filename.exists(): - msgs.warn(f'{slit_filename} does not exist!') + msgs.warning(f'{slit_filename} does not exist!') # NOTE: At this point, slit_filename *must* be a Path object slits = slittrace.SlitTraceSet.from_file(slit_filename, chk_version=chk_version) \ diff --git a/pypeit/scripts/chk_flexure.py b/pypeit/scripts/chk_flexure.py index 74faf77b01..d26d609ff9 100644 --- a/pypeit/scripts/chk_flexure.py +++ b/pypeit/scripts/chk_flexure.py @@ -53,13 +53,13 @@ def main(args): allspec2D.flexure_diagnostics(flexure_type=flexure_type) elif 'DMODCLS' in head0.keys() and head0['DMODCLS'].strip() == 'SpecObjs': if flexure_type == 'spat': - msgs.error("Spat flexure not available in the spec1d file, try with a " + raise PypeItError("Spat flexure not available in the spec1d file, try with a " "spec2d file") # load the spec1d file sobjs = specobjs.SpecObjs.from_fitsfile(in_file, chk_version=chk_version) sobjs.flexure_diagnostics() else: - msgs.error("Bad file type input!") + raise PypeItError("Bad file type input!") # space between files for clarity print('') diff --git a/pypeit/scripts/chk_for_calibs.py b/pypeit/scripts/chk_for_calibs.py index fa2fe7245b..0a76ba1d70 100644 --- a/pypeit/scripts/chk_for_calibs.py +++ b/pypeit/scripts/chk_for_calibs.py @@ -124,7 +124,7 @@ def main(args): msgs.info('Setting configuration-specific parameters using {0}'.format( os.path.split(config_specific_file)[1])) else: - msgs.warn('No science or standard frame. Punting..') + msgs.warning('No science or standard frame. Punting..') answers['pass'][i] = False answers['scifiles'][i] = None continue @@ -142,14 +142,14 @@ def main(args): answers['scifiles'][i] \ = ', '.join(ps.fitstbl['filename'][in_cfg & is_science].tolist()) else: - msgs.warn("This setup has no science frames!") + msgs.warning("This setup has no science frames!") answers['scifiles'][i] = '' # Check! answers['pass'][i] = calibrations.check_for_calibs(par, ps.fitstbl, raise_error=False, cut_cfg=in_cfg) if not answers['pass'][i]: - msgs.warn("Setup {} did not pass the calibration check!".format(setup)) + msgs.warning("Setup {} did not pass the calibration check!".format(setup)) print('= RESULTS ============================================') # Print diff --git a/pypeit/scripts/chk_noise_1dspec.py b/pypeit/scripts/chk_noise_1dspec.py index 88577e109b..252ed627ec 100644 --- a/pypeit/scripts/chk_noise_1dspec.py +++ b/pypeit/scripts/chk_noise_1dspec.py @@ -254,7 +254,7 @@ def main(args): input_mask &= lbda < args.wavemax if lbda[input_mask].size < 10: - msgs.warn("The spectrum was cut down to <10 pixels. Skipping") + msgs.warning("The spectrum was cut down to <10 pixels. Skipping") continue # determine if plotting the shaded area in the plot that shows the diff --git a/pypeit/scripts/chk_noise_2dspec.py b/pypeit/scripts/chk_noise_2dspec.py index 95e5287ae9..56a7bff5ea 100644 --- a/pypeit/scripts/chk_noise_2dspec.py +++ b/pypeit/scripts/chk_noise_2dspec.py @@ -238,7 +238,7 @@ def main(args): all_maskdef_ids = spec2DObj.slits.maskdef_id all_pypeit_ids = spec2DObj.slits.slitord_id if args.maskdef_id is not None and all_maskdef_ids is None: - msgs.error('This spec2d does not have maskdef_id. Choose a pypeit_id insteed.') + raise PypeItError('This spec2d does not have maskdef_id. Choose a pypeit_id insteed.') # Build the mask input_mask = spec2DObj.bpmmask.mask == 0 @@ -274,7 +274,7 @@ def main(args): # Cut down chi_select = chi_slit * input_mask if np.all(chi_select == 0): - msgs.warn(f"All of the chi values are masked in slit {pypeit_id} of {basename}!") + msgs.warning(f"All of the chi values are masked in slit {pypeit_id} of {basename}!") continue # Flux to show @@ -293,7 +293,7 @@ def main(args): # Wavelengths if spec2DObj.waveimg[input_mask].size == 0: - msgs.warn(f"None of the wavelength values work in slit {pypeit_id} of {basename}!") + msgs.warning(f"None of the wavelength values work in slit {pypeit_id} of {basename}!") continue lbda_1darray = spec2DObj.waveimg[:, mid_spat] diff --git a/pypeit/scripts/chk_plugins.py b/pypeit/scripts/chk_plugins.py index b6e2c6bcb2..0d6c7b063b 100644 --- a/pypeit/scripts/chk_plugins.py +++ b/pypeit/scripts/chk_plugins.py @@ -16,7 +16,7 @@ def main(args): success, report = plugins_available(return_report=True) if not success: - msgs.error(report) + raise PypeItError(report) msgs.info('All required plugins found: {0}'.format(', '.join(required_plugins))) diff --git a/pypeit/scripts/chk_scattlight.py b/pypeit/scripts/chk_scattlight.py index 548b9fd536..26451d2d70 100644 --- a/pypeit/scripts/chk_scattlight.py +++ b/pypeit/scripts/chk_scattlight.py @@ -59,13 +59,13 @@ def main(args): # Load the alternate file if requested display_frame = None # The default is to display the frame used to calculate the scattered light model if args.spec2d is not None: - msgs.error("displaying the spec2d scattered light is not currently supported") + raise PypeItError("displaying the spec2d scattered light is not currently supported") try: # TODO :: the spec2d file may have already had the scattered light removed, so this is not correct. This script only works when the scattered light is turned off for the spec2d file spec2D = spec2dobj.Spec2DObj.from_file(args.spec2d, detname, chk_version=chk_version) except PypeItDataModelError: - msgs.warn(f"Error loading spec2d file {args.spec2d} - attempting to load science image from fits") + msgs.warning(f"Error loading spec2d file {args.spec2d} - attempting to load science image from fits") spec2D = None # Now set the frame to be displayed diff --git a/pypeit/scripts/chk_wavecalib.py b/pypeit/scripts/chk_wavecalib.py index 9c8e767e1b..9446ff5d51 100644 --- a/pypeit/scripts/chk_wavecalib.py +++ b/pypeit/scripts/chk_wavecalib.py @@ -44,7 +44,7 @@ def main(args): elif 'PYP_CLS' in head0.keys() and head0['PYP_CLS'].strip() == 'AllSpec2DObj': file_type = 'AllSpec2D' else: - msgs.error("Bad file type input!") + raise PypeItError("Bad file type input!") if file_type == 'WaveCalib': waveCalib = wavecalib.WaveCalib.from_file(in_file, chk_version=chk_version) @@ -66,5 +66,5 @@ def main(args): continue else: # Should not get here unless it can't read either file type - msgs.error("Unrecognized file type. Must be a WaveCalib or spec2d file.") + raise PypeItError("Unrecognized file type. Must be a WaveCalib or spec2d file.") diff --git a/pypeit/scripts/clean_cache.py b/pypeit/scripts/clean_cache.py index 71e30177c1..e117394bf6 100644 --- a/pypeit/scripts/clean_cache.py +++ b/pypeit/scripts/clean_cache.py @@ -47,7 +47,7 @@ def main(args): return if args.pattern is None and not args.clear: - msgs.error('Arguments provided not sufficient to find files for deletion.') + raise PypeItError('Arguments provided not sufficient to find files for deletion.') if args.clear: # Removes the entire cache @@ -77,7 +77,7 @@ def main(args): # For now, we only need the urls. contents = list(contents.keys()) if len(contents) == 0: - msgs.warn('No files to remove.') + msgs.warning('No files to remove.') return # Report diff --git a/pypeit/scripts/coadd_1dspec.py b/pypeit/scripts/coadd_1dspec.py index 5cce61cb00..a3b8ab178d 100644 --- a/pypeit/scripts/coadd_1dspec.py +++ b/pypeit/scripts/coadd_1dspec.py @@ -38,7 +38,7 @@ def build_coadd_file_name(spec1dfiles, spectrograph): try: mjd_list.append(float(fits.getheader(f)['MJD'])) except Exception as e: - msgs.error(f"Failed to read MJD from {f}: {e}") + raise PypeItError(f"Failed to read MJD from {f}: {e}") start_mjd = np.min(mjd_list) end_mjd = np.max(mjd_list) @@ -171,7 +171,7 @@ def main(args): merge_with=(coadd1dFile.cfg_lines,)) # Check that sensfunc column is populated if this is echelle if spectrograph.pypeline == 'Echelle' and coadd1dFile.sensfiles is None: - msgs.error("To coadd echelle spectra, the 'sensfile' column must present in your .coadd1d file") + raise PypeItError("To coadd echelle spectra, the 'sensfile' column must present in your .coadd1d file") # Write the par to disk print("Writing the parameters to {}".format(args.par_outfile)) diff --git a/pypeit/scripts/coadd_2dspec.py b/pypeit/scripts/coadd_2dspec.py index 8d8bb80cb2..d7ae8bff52 100644 --- a/pypeit/scripts/coadd_2dspec.py +++ b/pypeit/scripts/coadd_2dspec.py @@ -74,16 +74,16 @@ def main(args): # Check some of the parameters # TODO Heliocentric for coadd2d needs to be thought through. Currently turning it off. if par['calibrations']['wavelengths']['refframe'] != 'observed': - msgs.warn('Wavelength reference frame shift (e.g., heliocentric correction) not yet ' + msgs.warning('Wavelength reference frame shift (e.g., heliocentric correction) not yet ' 'fully developed. Ignoring input and setting "refframe = observed".') par['calibrations']['wavelengths']['refframe'] = 'observed' # TODO Flexure correction for coadd2d needs to be thought through. Currently turning it off. if par['flexure']['spec_method'] != 'skip': - msgs.warn('Spectroscopic flexure correction not yet fully developed. Skipping.') + msgs.warning('Spectroscopic flexure correction not yet fully developed. Skipping.') par['flexure']['spec_method'] = 'skip' # TODO This is currently the default for 2d coadds, but we need a way to toggle it on/off if not par['reduce']['findobj']['skip_skysub']: - msgs.warn('Must skip sky subtraction when finding objects (i.e., sky should have ' + msgs.warning('Must skip sky subtraction when finding objects (i.e., sky should have ' 'been subtracted during primary reduction procedure). Skipping.') par['reduce']['findobj']['skip_skysub'] = True @@ -146,7 +146,7 @@ def main(args): only_dets, only_spat_ids = parse.parse_slitspatnum(par['coadd2d']['only_slits']) if par['coadd2d']['exclude_slits'] is not None: if par['coadd2d']['only_slits'] is not None: - msgs.warn('Both `only_slits` and `exclude_slits` are provided. They are mutually exclusive. ' + msgs.warning('Both `only_slits` and `exclude_slits` are provided. They are mutually exclusive. ' 'Using `only_slits` and ignoring `exclude_slits`') else: exclude_dets, exclude_spat_ids = parse.parse_slitspatnum(par['coadd2d']['exclude_slits']) diff --git a/pypeit/scripts/coadd_datacube.py b/pypeit/scripts/coadd_datacube.py index a07d935386..a55f01537a 100644 --- a/pypeit/scripts/coadd_datacube.py +++ b/pypeit/scripts/coadd_datacube.py @@ -39,7 +39,7 @@ def main(args): # Check that a file has been provided if args.file is None: - msgs.error('You must input a coadd3d file') + raise PypeItError('You must input a coadd3d file') # Read in the relevant information from the .coadd3d file coadd3dfile = inputfiles.Coadd3DFile.from_file(args.file) diff --git a/pypeit/scripts/collate_1d.py b/pypeit/scripts/collate_1d.py index a8d72e2759..5ac8e59a23 100644 --- a/pypeit/scripts/collate_1d.py +++ b/pypeit/scripts/collate_1d.py @@ -189,7 +189,7 @@ def exclude_source_objects(source_objects, exclude_map, par): if sobj.OPT_COUNTS is None and sobj.BOX_COUNTS is None: msg = f'Excluding {sobj.NAME} in {spec1d_file} because of missing both OPT_COUNTS and BOX_COUNTS' - msgs.warn(msg) + msgs.warning(msg) excluded_messages.append(msg) continue @@ -204,7 +204,7 @@ def exclude_source_objects(source_objects, exclude_map, par): msg = f'Excluding {sobj.NAME} in {spec1d_file} because all of OPT_COUNTS was masked out. Consider changing ex_value to "BOX".' if msg is not None: - msgs.warn(msg) + msgs.warning(msg) excluded_messages.append(msg) continue @@ -219,7 +219,7 @@ def exclude_source_objects(source_objects, exclude_map, par): msg = f'Excluding {sobj.NAME} in {spec1d_file} because all of BOX_COUNTS was masked out. Consider changing ex_value to "OPT".' if msg is not None: - msgs.warn(msg) + msgs.warning(msg) excluded_messages.append(msg) continue @@ -252,8 +252,8 @@ def read_spec1d_files(par, spec1d_files, failure_msgs): good_spec1d_files.append(spec1d_file) except Exception as e: formatted_exception = traceback.format_exc() - msgs.warn(formatted_exception) - msgs.warn(f"Failed to read {spec1d_file}, skipping it.") + msgs.warning(formatted_exception) + msgs.warning(f"Failed to read {spec1d_file}, skipping it.") failure_msgs.append(f"Failed to read {spec1d_file}, skipping it.") failure_msgs.append(formatted_exception) @@ -279,7 +279,7 @@ def flux(par, spectrograph, spec1d_files, failed_fluxing_msgs): # Make sure fluxing from archive is supported for this spectrograph if spectrograph.name not in SensFileArchive.supported_spectrographs(): - msgs.error(f"Flux calibrating {spectrograph.name} with an archived sensfunc is not supported.") + raise PypeItError(f"Flux calibrating {spectrograph.name} with an archived sensfunc is not supported.") par['fluxcalib']['extrap_sens'] = True @@ -292,8 +292,8 @@ def flux(par, spectrograph, spec1d_files, failed_fluxing_msgs): sens_file = sf_archive.get_archived_sensfile(spec1d_file) except Exception: formatted_exception = traceback.format_exc() - msgs.warn(formatted_exception) - msgs.warn(f"Could not find archived sensfunc to flux {spec1d_file}, skipping it.") + msgs.warning(formatted_exception) + msgs.warning(f"Could not find archived sensfunc to flux {spec1d_file}, skipping it.") failed_fluxing_msgs.append(f"Could not find archived sensfunc to flux {spec1d_file}, skipping it.") failed_fluxing_msgs.append(formatted_exception) continue @@ -307,8 +307,8 @@ def flux(par, spectrograph, spec1d_files, failed_fluxing_msgs): except Exception: formatted_exception = traceback.format_exc() - msgs.warn(formatted_exception) - msgs.warn(f"Failed to flux calibrate {spec1d_file}, skipping it.") + msgs.warning(formatted_exception) + msgs.warning(f"Failed to flux calibrate {spec1d_file}, skipping it.") failed_fluxing_msgs.append(f"Failed to flux calibrate {spec1d_file}, skipping it.") failed_fluxing_msgs.append(formatted_exception) continue @@ -476,7 +476,7 @@ def find_spec2d_from_spec1d(spec1d_files): spec2d_file = os.path.join(path, filename.replace('spec1d', 'spec2d', 1)) if not os.path.exists(spec2d_file): - msgs.error(f'Could not find matching spec2d file for {spec1d_file}') + raise PypeItError(f'Could not find matching spec2d file for {spec1d_file}') spec2d_files.append(spec2d_file) @@ -826,8 +826,8 @@ def main(args): successful_source_list.append(source) except Exception: formatted_exception = traceback.format_exc() - msgs.warn(formatted_exception) - msgs.warn(f"Failed to coadd {coaddfile}, skipping") + msgs.warning(formatted_exception) + msgs.warning(f"Failed to coadd {coaddfile}, skipping") failed_source_msgs.append(f"Failed to coadd {coaddfile}:") failed_source_msgs.append(formatted_exception) diff --git a/pypeit/scripts/compile_wvarxiv.py b/pypeit/scripts/compile_wvarxiv.py index db70d50862..c4c6b93384 100644 --- a/pypeit/scripts/compile_wvarxiv.py +++ b/pypeit/scripts/compile_wvarxiv.py @@ -71,13 +71,13 @@ def main(args): # Does a file already exist? if out_path.exists() and not args.append: - msgs.error(f'File {out_path} already exists. Use --append to overwrite the file and add your new solutions to the existing ones.') + raise PypeItError(f'File {out_path} already exists. Use --append to overwrite the file and add your new solutions to the existing ones.') # What if user asks to append solutions? elif out_path.exists() and args.append: old_table = Table.read(out_path) old_array_len = len(old_table['wave'][0].data) if old_array_len != array_len: - msgs.error(f'The old file has an array length of {old_array_len} while the new files have an array length of {array_len}. Cannot merge these files.') + raise PypeItError(f'The old file has an array length of {old_array_len} while the new files have an array length of {array_len}. Cannot merge these files.') else: reid_table = join(old_table, reid_table) reid_table.write(out_path, format='fits', overwrite=args.append) diff --git a/pypeit/scripts/extract_datacube.py b/pypeit/scripts/extract_datacube.py index ce52d04119..ca4a849aff 100644 --- a/pypeit/scripts/extract_datacube.py +++ b/pypeit/scripts/extract_datacube.py @@ -48,7 +48,7 @@ def main(args): # Check that a file has been provided if args.file is None: - msgs.error('You must input a spec3d (i.e. PypeIt DataCube) fits file') + raise PypeItError('You must input a spec3d (i.e. PypeIt DataCube) fits file') extcube = DataCube.from_file(args.file) spectrograph = load_spectrograph(extcube.PYP_SPEC) diff --git a/pypeit/scripts/flux_calib.py b/pypeit/scripts/flux_calib.py index 1bd78b65f7..38be5218f4 100644 --- a/pypeit/scripts/flux_calib.py +++ b/pypeit/scripts/flux_calib.py @@ -106,7 +106,7 @@ def main(args): sf_archive = SensFileArchive.get_instance(spectrograph.name) sensfiles = nspec*[sf_archive.get_archived_sensfile(fluxFile.filenames[0])] else: - msgs.error('Invalid format for .flux file.' + msgs.newline() + + raise PypeItError('Invalid format for .flux file.' + msgs.newline() + 'You must specify a single sensfile on the first line of the flux block,' + msgs.newline() + 'or specify a sensfile for every spec1dfile in the flux block,' + msgs.newline() + 'or specify "use_archived_sens = True" to use an archived sensfile.' + msgs.newline() + diff --git a/pypeit/scripts/flux_setup.py b/pypeit/scripts/flux_setup.py index 153bfe9288..f1a25e8b40 100644 --- a/pypeit/scripts/flux_setup.py +++ b/pypeit/scripts/flux_setup.py @@ -99,7 +99,7 @@ def main(args): else: msgs.info('{:} is not a standard PypeIt output, skipping.'.format(ifile)) if len(spec2dfiles) > len(spec1dfiles): - msgs.warn('The following exposures do not have 1D extractions:') + msgs.warning('The following exposures do not have 1D extractions:') for ii in range(len(spec2dfiles)): if (spec2dfiles[ii].parent / spec2dfiles[ii].name.replace("spec2d", "spec1d")).exists(): msgs.info('\t {:}'.format(spec2dfiles[ii])) diff --git a/pypeit/scripts/identify.py b/pypeit/scripts/identify.py index 0260c4b3e8..9c3e8e03b2 100644 --- a/pypeit/scripts/identify.py +++ b/pypeit/scripts/identify.py @@ -86,7 +86,7 @@ def main(args): if args.lamps is None: lamps = par['lamps'] if lamps is None or lamps == ['use_header']: - msgs.error('Cannot determine the lamps; use --lamps argument') + raise PypeItError('Cannot determine the lamps; use --lamps argument') else: lamps = args.lamps.split(",") par['lamps'] = lamps diff --git a/pypeit/scripts/install_extinctfile.py b/pypeit/scripts/install_extinctfile.py index 5bee43a66c..068860d075 100644 --- a/pypeit/scripts/install_extinctfile.py +++ b/pypeit/scripts/install_extinctfile.py @@ -34,7 +34,7 @@ def main(args): # Loop through the files passed for f in files: if not f.is_file(): - msgs.warn(f'{f} is not a file.') + msgs.warning(f'{f} is not a file.') continue # Copy the user-created file to the cache msgs.info(f'Installing {f}') diff --git a/pypeit/scripts/install_linelist.py b/pypeit/scripts/install_linelist.py index f11b0ba077..c074166bcb 100644 --- a/pypeit/scripts/install_linelist.py +++ b/pypeit/scripts/install_linelist.py @@ -33,7 +33,7 @@ def main(args): # Loop through the files passed for f in files: if not f.is_file(): - msgs.warn(f'{f} is not a file.') + msgs.warning(f'{f} is not a file.') continue # Copy the user-created file to the cache msgs.info(f'Installing {f}') diff --git a/pypeit/scripts/install_wvarxiv.py b/pypeit/scripts/install_wvarxiv.py index 77de419032..c57d3c6d18 100644 --- a/pypeit/scripts/install_wvarxiv.py +++ b/pypeit/scripts/install_wvarxiv.py @@ -33,7 +33,7 @@ def main(args): # Loop through the files passed for f in files: if not f.is_file(): - msgs.warn(f'{f} is not a file.') + msgs.warning(f'{f} is not a file.') continue # Copy the user-created file to the cache cache.write_file_to_cache(str(f), f.name, 'arc_lines/reid_arxiv') diff --git a/pypeit/scripts/parse_slits.py b/pypeit/scripts/parse_slits.py index c150032a20..546eaa05b3 100644 --- a/pypeit/scripts/parse_slits.py +++ b/pypeit/scripts/parse_slits.py @@ -80,7 +80,7 @@ def main(args): elif 'PYP_CLS' in head0.keys() and head0['PYP_CLS'].strip() == 'AllSpec2DObj': file_type = 'AllSpec2D' else: - msgs.error("Bad file type input!") + raise PypeItError("Bad file type input!") if file_type == 'Slits': slits = slittrace.SlitTraceSet.from_file(args.input_file, chk_version=chk_version) @@ -96,5 +96,5 @@ def main(args): spec2Dobj = allspec2D[det] print_slits(spec2Dobj.slits) else: - msgs.error("Bad file type input! Must be a Slits calibration frame or a spec2d file.") + raise PypeItError("Bad file type input! Must be a Slits calibration frame or a spec2d file.") diff --git a/pypeit/scripts/print_bpm.py b/pypeit/scripts/print_bpm.py index 2d9b2f0937..70d0032f6d 100644 --- a/pypeit/scripts/print_bpm.py +++ b/pypeit/scripts/print_bpm.py @@ -77,7 +77,7 @@ def main(args): file_pypeit_version = fits.getval(args.file, 'VERSPYP', 0) except KeyError: file_pypeit_version = '*unknown*' - msgs.warn(f'Your installed version of PypeIt ({__version__}) cannot be used to parse ' + msgs.warning(f'Your installed version of PypeIt ({__version__}) cannot be used to parse ' f'{args.file}, which was reduced using version {file_pypeit_version}. You ' 'are strongly encouraged to re-reduce your data using this (or, better yet, ' 'the most recent) version of PypeIt. Script will try to parse only the ' diff --git a/pypeit/scripts/ql.py b/pypeit/scripts/ql.py index a9651ce1b3..1d702c32ec 100644 --- a/pypeit/scripts/ql.py +++ b/pypeit/scripts/ql.py @@ -95,7 +95,7 @@ def get_files(raw_files, raw_path): try: files = inputfiles.grab_rawfiles(raw_paths=[raw_path], list_of_files=raw_files) except PypeItError as e: - msgs.error('Unable to parse provided input files. Check --raw_files and ' + raise PypeItError('Unable to parse provided input files. Check --raw_files and ' '--raw_path input.') return files @@ -154,17 +154,17 @@ def quicklook_regroup(fitstbl): # All frames must be of the same target if 'target' in fitstbl.keys() \ and not all(fitstbl['target'][is_type] == fitstbl['target'][is_type][0]): - msgs.error(f'All {frametype} frames must be of the same target.') + raise PypeItError(f'All {frametype} frames must be of the same target.') # Regroup dithered observations so that all images at a unique # offset are combined. if 'bkg_id' in fitstbl.keys() and any(fitstbl['bkg_id'].data[is_type] != -1): if 'dithoff' not in fitstbl.keys(): - msgs.error('CODING ERROR: Metadata does not include dithoff column!') + raise PypeItError('CODING ERROR: Metadata does not include dithoff column!') # Group the unique dither positions dith, inv = np.unique(fitstbl['dithoff'].data[is_type], return_inverse=True) if len(dith) == 1: - msgs.warn('All exposures have the same offset!') + msgs.warning('All exposures have the same offset!') fitstbl['comb_id'][is_type] = comb_strt else: # This creates comb+bkg pairs that match the absolute value of the offset @@ -256,7 +256,7 @@ def generate_sci_pypeitfile(redux_path:str, # Check the directory with the reference calibrations exists if not ref_calib_dir.exists(): - msgs.error(f'Reference calibration directory does not exist: {ref_calib_dir}') + raise PypeItError(f'Reference calibration directory does not exist: {ref_calib_dir}') # Get the setup and calibration group to use for the science frame(s) setup, calib = get_setup_calib(ref_calib_dir) @@ -286,7 +286,7 @@ def generate_sci_pypeitfile(redux_path:str, # already exists, check that it points to the right directory. If not, # raise an error. if calib_dir.exists() and calib_dir.is_symlink() and calib_dir.readlink() != ref_calib_dir: - msgs.error(f'Symlink to calibrations directory ({calib_dir}) already exists and points ' + raise PypeItError(f'Symlink to calibrations directory ({calib_dir}) already exists and points ' f'to {calib_dir.readlink()} instead of {ref_calib_dir}. Re-run quicklook ' f'forcing the existing reductions in {sci_dir} to be removed.') # Create the symlink if it doesn't already exist @@ -309,7 +309,7 @@ def generate_sci_pypeitfile(redux_path:str, if std_spec1d is not None: # Found an existing reduction, so remove the standard frames. # NOTE: Should not need to regroup! - msgs.warn(f'Found existing standard star reduction: {std_spec1d}. This will be used ' + msgs.warning(f'Found existing standard star reduction: {std_spec1d}. This will be used ' 'and the standards will not be re-reduced! To force them to be ' 're-reduced, use the --clear_science option.') ps_sci.remove_table_rows(is_std) @@ -345,7 +345,7 @@ def generate_sci_pypeitfile(redux_path:str, CalibFrame.parse_key_dir(str(f), from_filename=True)[0]) keep[i] = _setup == setup and _calib in ['all', calib] if not any(keep): - msgs.error('Could not find valid Slits calibration frame!') + raise PypeItError('Could not find valid Slits calibration frame!') slittrace_files = slittrace_files[keep] # Iterate through each file to find the one with the relevant mask ID. @@ -367,7 +367,7 @@ def generate_sci_pypeitfile(redux_path:str, detnum = [ps_sci.spectrograph.allowed_mosaics[det_id[0]]] if mosaic else det_id[0]+1 break if detname is None: - msgs.error(f'Could not find a SlitTrace file with maskID={maskID}') + raise PypeItError(f'Could not find a SlitTrace file with maskID={maskID}') # Add to config cfg['rdx']['detnum'] = detnum @@ -512,7 +512,7 @@ def match_to_calibs(ps:pypeitsetup.PypeItSetup, calib_dir:str, calibrated_setups matched_configs[setup] = None continue elif len(matched_configs[setup]['setup']) > 1: - msgs.warn('Existing calibrations have degenerate configurations! We recommend you ' + msgs.warning('Existing calibrations have degenerate configurations! We recommend you ' 'clean your calibrations parent directory. For now, using the first match.') matched_configs[setup]['setup'] = matched_configs[setup]['setup'][0] matched_configs[setup]['calib_dir'] = matched_configs[setup]['calib_dir'][0] @@ -586,7 +586,7 @@ def get_setup_calib(calib_dir, calib_grp=None): # Check there are files in the directory calib_files = sorted(_calib_dir.glob('*')) if len(calib_files) == 0: - msgs.error(f'Calibrations directory is empty: {_calib_dir}') + raise PypeItError(f'Calibrations directory is empty: {_calib_dir}') # For each file, try to parse the setup and calibration ID(s) setups = [] @@ -603,7 +603,7 @@ def get_setup_calib(calib_dir, calib_grp=None): # Find the unique setups setups = np.unique(setups) if len(setups) != 1: - msgs.error(f'Calibration files for more than one setup found in {_calib_dir}, ' + raise PypeItError(f'Calibration files for more than one setup found in {_calib_dir}, ' 'according to their file names. Calibration directory should only hold data ' 'for *one* setup.') setup = setups[0] @@ -622,12 +622,12 @@ def get_setup_calib(calib_dir, calib_grp=None): if calib_grp is not None: if str(calib_grp) in unique_calibs: return setup, str(calib_grp) - msgs.error(f'Selected calibration group {calib_grp} is not available in {_calib_dir}. ' + raise PypeItError(f'Selected calibration group {calib_grp} is not available in {_calib_dir}. ' 'Must select a valid group. Directory currently contains the following ' f'calibration groups: {unique_calibs}') # Cannot determine which calibration group to use. - msgs.error(f'Calibrations in {_calib_dir} are part of multiple calibration groups. Unclear ' + raise PypeItError(f'Calibrations in {_calib_dir} are part of multiple calibration groups. Unclear ' 'how to proceed.') @@ -764,7 +764,7 @@ def main(args): # Parse the raw files files = get_files(args.raw_files, args.raw_path) if len(files) == 0: - msgs.error('No files to read! Check --raw_files and --raw_path input.') + raise PypeItError('No files to read! Check --raw_files and --raw_path input.') # TODO: Include an option to save the ingested file list as a PypeIt # RawFile that can be edited? @@ -788,7 +788,7 @@ def main(args): unknown_types = [t is None for t in ps.fitstbl['frametype']] if any(unknown_types & np.logical_not(sci_idx)): # TODO: Remove them and keep going instead? - msgs.error('Could not determine frame types for the following files: ' + + raise PypeItError('Could not determine frame types for the following files: ' + ', '.join(ps.fitstbl['filename'][unknown_types & np.logical_not(sci_idx)])) # Include any standards? @@ -829,7 +829,7 @@ def main(args): # Limit to a single setup if len(ps_sci.fitstbl.configs.keys()) > 1: - msgs.error('Your science/standard files come from more than one setup. Try ' + raise PypeItError('Your science/standard files come from more than one setup. Try ' 'either ignoring the standard frames (if any are present and ' 'auto-detected) and/or changing the list of science files.') @@ -848,7 +848,7 @@ def main(args): # TODO: This is now the only place bkg_redux is used... bkg_redux = 'bkg_id' in ps_sci.fitstbl.keys() and any(ps_sci.fitstbl['bkg_id'] != -1) if bkg_redux: - msgs.warn('Dither pattern automatically detected for these observations. Image ' + msgs.warning('Dither pattern automatically detected for these observations. Image ' 'combination and background subtraction sequences automatically set; ' 'confirm the behavior is what you want by checking the auto-generated ' 'pypeit file.') @@ -868,7 +868,7 @@ def main(args): # in generate_sci_pypeitfile, but it's useful to keep the warning # here. if any(ps_sci.fitstbl['calib'] != ps_sci.fitstbl['calib'][0]): - msgs.warn('Automated configuration assigned multiple calibration groups to your ' + msgs.warning('Automated configuration assigned multiple calibration groups to your ' 'science frames. Ignoring! Assigning all frames to the same group.') ps_sci.fitstbl['calib'] = ps_sci.fitstbl['calib'][0] @@ -879,14 +879,14 @@ def main(args): if setup_calib_dir is None: # TODO: Fault here, or keep going to the next step, which is # to try to build the calibrations? - msgs.error('No calibrations exist or could not find appropriate setup match ' + raise PypeItError('No calibrations exist or could not find appropriate setup match ' f'in provided parent directory: {args.parent_calib_dir}') # NOTE: Code above check that there is only one setup in ps_sci setup_calib_dir = setup_calib_dir[ps_sci.fitstbl['setup'][0]]['calib_dir'] msgs.info(f'Attempting to use archived calibrations found in {setup_calib_dir}.') elif not args.calibs_only: - msgs.warn('No science frames found among the files provided. Will only process ' + msgs.warning('No science frames found among the files provided. Will only process ' 'calibration frames. If you have provided science frames, you can specify ' 'which ones they are using the --sci_files option.') @@ -1005,7 +1005,7 @@ def main(args): # time. coadd_file = sorted(Path(sci_pypeit_file).absolute().parent.glob('*.coadd2d')) if len(coadd_file) != 1: - msgs.error('There should be only one 2D coadd file.') + raise PypeItError('There should be only one 2D coadd file.') coadd_file = coadd_file[0] # Run the coadding @@ -1064,7 +1064,7 @@ def print_offset_report(fitstbl:Table, platescale:float): # Proceed if len(np.unique(dither_pattern)) > 1: - msgs.error('Script only supported for a single type of dither pattern.') + raise PypeItError('Script only supported for a single type of dither pattern.') # Print out a report on the offsets msg_string = msgs.newline() + '*******************************************************' diff --git a/pypeit/scripts/run_pypeit.py b/pypeit/scripts/run_pypeit.py index ae2e6b68ea..8df6f41983 100644 --- a/pypeit/scripts/run_pypeit.py +++ b/pypeit/scripts/run_pypeit.py @@ -87,7 +87,7 @@ def main(args): # Load options from command line splitnm = os.path.splitext(args.pypeit_file) if splitnm[1] != '.pypeit': - msgs.error('Input file must have a .pypeit extension!') + raise PypeItError('Input file must have a .pypeit extension!') logname = splitnm[0] + ".log" # Instantiate the main pipeline reduction object diff --git a/pypeit/scripts/run_to_calibstep.py b/pypeit/scripts/run_to_calibstep.py index cde32042ab..cd67f81a45 100644 --- a/pypeit/scripts/run_to_calibstep.py +++ b/pypeit/scripts/run_to_calibstep.py @@ -55,14 +55,14 @@ def main(args): # Load options from command line _pypeit_file = Path(args.pypeit_file).absolute() if _pypeit_file.suffix != '.pypeit': - msgs.error(f'Input file {_pypeit_file} must have a .pypeit extension!') + raise PypeItError(f'Input file {_pypeit_file} must have a .pypeit extension!') logname = _pypeit_file.parent / f'{_pypeit_file.stem}.log' # Check for the frame or calib_group if args.science_frame is None and args.calib_group is None: - msgs.error('Must provide either a science frame or a calibration group ID') + raise PypeItError('Must provide either a science frame or a calibration group ID') elif args.science_frame is not None and args.calib_group is not None: - msgs.warn("Both science_frame and calib_group ID provided. Will use the science_frame") + msgs.warning("Both science_frame and calib_group ID provided. Will use the science_frame") # Instantiate the main pipeline reduction object pypeIt = pypeit.PypeIt(args.pypeit_file, verbosity=args.verbosity, @@ -80,11 +80,11 @@ def main(args): if args.science_frame is not None: row = np.where(pypeIt.fitstbl['filename'] == args.science_frame)[0] if len(row) != 1: - msgs.error(f"Frame {args.frame} not found or not unique") + raise PypeItError(f"Frame {args.frame} not found or not unique") elif args.calib_group is not None: rows = np.where((pypeIt.fitstbl['calib'].data.astype(str) == args.calib_group))[0] if len(rows) == 0: - msgs.error(f"Calibration group {args.calib_group} not found") + raise PypeItError(f"Calibration group {args.calib_group} not found") row = rows[0] row = int(row) diff --git a/pypeit/scripts/sensfunc.py b/pypeit/scripts/sensfunc.py index 4f983f07ae..8712cbfffa 100644 --- a/pypeit/scripts/sensfunc.py +++ b/pypeit/scripts/sensfunc.py @@ -104,7 +104,7 @@ def main(args): # Check parameter inputs if args.algorithm is not None and args.sens_file is not None: - msgs.error("It is not possible to set --algorithm and simultaneously use a .sens " + raise PypeItError("It is not possible to set --algorithm and simultaneously use a .sens " "file via the --sens_file option. If you are using a .sens file set the " "algorithm there via:\n" "\n" @@ -113,7 +113,7 @@ def main(args): "\n") if args.use_flat and args.sens_file is not None: - msgs.error("It is not possible to set --use_flat and simultaneously use a .sens " + raise PypeItError("It is not possible to set --use_flat and simultaneously use a .sens " "file via the --sens_file option. If you are using a .sens file set the " "use_flat flag in your .sens file using the argument:\n" "\n" @@ -122,7 +122,7 @@ def main(args): "\n") if args.multi is not None and args.sens_file is not None: - msgs.error("It is not possible to set --multi and simultaneously use a .sens file via " + raise PypeItError("It is not possible to set --multi and simultaneously use a .sens file via " "the --sens_file option. If you are using a .sens file set the detectors " "there via:\n" "\n" @@ -131,7 +131,7 @@ def main(args): "\n") if args.extr is not None and args.sens_file is not None: - msgs.error("It is not possible to set --extr and simultaneously use a .sens file via " + raise PypeItError("It is not possible to set --extr and simultaneously use a .sens file via " "the --sens_file option. If you are using a .sens file set the extraction " "method there via:\n" "\n" diff --git a/pypeit/scripts/setup.py b/pypeit/scripts/setup.py index aee0b49c5a..a448586b33 100644 --- a/pypeit/scripts/setup.py +++ b/pypeit/scripts/setup.py @@ -111,7 +111,7 @@ def main(args): user_cfgs = [l.rstrip() for l in user_par_fobj.readlines()] ps.append_user_cfg(user_cfgs) else: - msgs.warn(f"Could not open param_block file {args.param_block_file}. " + msgs.warning(f"Could not open param_block file {args.param_block_file}. " "Not adding any additional user parameters to the .pypeit file.") # Run the setup ps.run(setup_only=True, clean_config=not args.keep_bad_frames) diff --git a/pypeit/scripts/setup_coadd2d.py b/pypeit/scripts/setup_coadd2d.py index 8acc28cabb..b5d55d9ea6 100644 --- a/pypeit/scripts/setup_coadd2d.py +++ b/pypeit/scripts/setup_coadd2d.py @@ -127,13 +127,13 @@ def main(args): msgs_string = 'The following science directories do not exist:' + msgs.newline() for s in np.array(sci_dirs)[np.logical_not(sci_dirs_exist)]: msgs_string += f'{s}' + msgs.newline() - msgs.error(msgs_string) + raise PypeItError(msgs_string) # Find all the spec2d files: spec2d_files = np.concatenate([sorted(sci_dir.glob('spec2d*')) for sci_dir in sci_dirs]).tolist() if len(spec2d_files) == 0: - msgs.error(f'No spec2d files.') + raise PypeItError(f'No spec2d files.') if spec_name is None: with io.fits_open(spec2d_files[0]) as hdu: @@ -150,7 +150,7 @@ def main(args): _objects = [o for o in objects if o in args.obj] # Check some were found if len(_objects) == 0: - msgs.error('Unable to find relevant objects. Unique objects are ' + raise PypeItError('Unable to find relevant objects. Unique objects are ' f'{objects.tolist()}; you requested {args.obj}.') objects = _objects @@ -159,12 +159,12 @@ def main(args): for obj in objects: object_spec2d_files[obj] = [f for f in spec2d_files if obj.strip() in f.name] if len(object_spec2d_files[obj]) == 0: - msgs.warn(f'No spec2d files found for target={obj}.') + msgs.warning(f'No spec2d files found for target={obj}.') del object_spec2d_files[obj] # Check spec2d files exist for the selected objects if len(object_spec2d_files.keys()) == 0: - msgs.error('Unable to match any spec2d files to objects.') + raise PypeItError('Unable to match any spec2d files to objects.') # Add the paths to make sure they match the pypeit file. # NOTE: cfg does *not* need to include the spectrograph parameter in diff --git a/pypeit/scripts/show_1dspec.py b/pypeit/scripts/show_1dspec.py index 06703fbe6a..fb46ae757f 100644 --- a/pypeit/scripts/show_1dspec.py +++ b/pypeit/scripts/show_1dspec.py @@ -73,11 +73,11 @@ def main(args): # if args.jdaviz: # from pypeit.specutils import Spectrum1D, SpectrumList # if Spectrum1D is None: -# msgs.error('specutils package must be installed.') +# raise PypeItError('specutils package must be installed.') # try: # from jdaviz import Specviz # except ModuleNotFoundError: -# msgs.error('jdaviz package must be installed.') +# raise PypeItError('jdaviz package must be installed.') # # # First try reading it as a list # try: @@ -103,20 +103,20 @@ def main(args): # return # # # If we get here, the file couldn't be parsed -# msgs.error(f'Could not parse input file: {args.file}') +# raise PypeItError(f'Could not parse input file: {args.file}') if args.obj is not None: exten = np.where(sobjs.NAME == args.obj)[0][0] if exten < 0: - msgs.error("Bad input object name: {:s}".format(args.obj)) + raise PypeItError("Bad input object name: {:s}".format(args.obj)) else: exten = args.exten-1 # 1-index in FITS file # Check Extraction if args.extract == 'OPT': if sobjs[exten]['OPT_WAVE'] is None: #not in sobjs[exten]._data.keys(): - msgs.error("Spectrum not extracted with OPT. Try --extract BOX") + raise PypeItError("Spectrum not extracted with OPT. Try --extract BOX") if args.ginga: # show the 1d spectrum in Ginga diff --git a/pypeit/scripts/show_2dspec.py b/pypeit/scripts/show_2dspec.py index 8d7b751407..09e3be433e 100644 --- a/pypeit/scripts/show_2dspec.py +++ b/pypeit/scripts/show_2dspec.py @@ -73,7 +73,7 @@ def show_trace(sobjs, det, viewer, ch): display.show_trace(viewer, ch, np.swapaxes(trace_list, 1,0), np.array(trc_name_list), maskdef_extr=np.array(maskdef_extr_list), manual_extr=np.array(manual_extr_list)) else: - msgs.warn('spec1d file found, but no objects were extracted for this detector.') + msgs.warning('spec1d file found, but no objects were extracted for this detector.') class Show2DSpec(scriptbase.ScriptBase): @@ -172,7 +172,6 @@ def main(args): except KeyError: file_pypeit_version = '*unknown*' if chk_version: - msgs_func = msgs.error addendum = 'To allow the script to attempt to read the data anyway, use the ' \ '--try_old command-line option. This will first try to simply ' \ 'ignore the version number. If the datamodels are incompatible ' \ @@ -182,14 +181,19 @@ def main(args): 'script. In either case, BEWARE that the displayed data may be in ' \ 'error!' else: - msgs_func = msgs.warn addendum = 'The datamodels are sufficiently different that the script will now ' \ 'try to parse only the components necessary for use by this ' \ 'script. BEWARE that the displayed data may be in error!' - msgs_func(f'Your installed version of PypeIt ({__version__}) cannot be used to parse ' - f'{args.file}, which was reduced using version {file_pypeit_version}. You ' - 'are strongly encouraged to re-reduce your data using this (or, better yet, ' - 'the most recent) version of PypeIt. ' + addendum) + message = ( + f'Your installed version of PypeIt ({__version__}) cannot be used to parse ' + f'{args.file}, which was reduced using version {file_pypeit_version}. You ' + 'are strongly encouraged to re-reduce your data using this (or, better yet, ' + 'the most recent) version of PypeIt. ' + addendum + ) + if check_version: + raise PypeItError(message) + else: + msgs.warning(message) spec2DObj = None if spec2DObj is None: @@ -198,11 +202,11 @@ def main(args): names = [h.name for h in hdu] has_det = any([detname in n for n in names]) if not has_det: - msgs.error(f'Provided file has no extensions including {detname}.') + raise PypeItError(f'Provided file has no extensions including {detname}.') for ext in ['SCIIMG', 'SKYMODEL', 'OBJMODEL', 'IVARMODEL']: _ext = f'{detname}-{ext}' if _ext not in names: - msgs.error(f'{args.file} missing extension {_ext}.') + raise PypeItError(f'{args.file} missing extension {_ext}.') sciimg = hdu[f'{detname}-SCIIMG'].data skymodel = hdu[f'{detname}-SKYMODEL'].data @@ -218,7 +222,7 @@ def main(args): _ext = f'{detname}-SLITS' if _ext not in names: - msgs.warn(f'{args.file} missing extension {_ext}; cannot show slit edges.') + msgs.warning(f'{args.file} missing extension {_ext}; cannot show slit edges.') else: slit_columns = hdu[_ext].columns.names slit_spat_id = hdu[_ext].data['spat_id'] if 'spat_id' in slit_columns else None @@ -262,7 +266,7 @@ def main(args): img_gpm = spec2DObj.select_flag(invert=True) if not np.any(img_gpm): - msgs.warn('The full science image is masked!') + msgs.warning('The full science image is masked!') model_gpm = img_gpm.copy() if args.ignore_extract_mask: @@ -284,7 +288,7 @@ def main(args): sobjs = specobjs.SpecObjs.from_fitsfile(spec1d_file, chk_version=False) else: sobjs = None - msgs.warn('Could not find spec1d file: {:s}'.format(spec1d_file) + msgs.newline() + + msgs.warning('Could not find spec1d file: {:s}'.format(spec1d_file) + msgs.newline() + ' No objects were extracted.') # TODO: This may be too restrictive, i.e. ignore BADFLTCALIB?? diff --git a/pypeit/scripts/show_pixflat.py b/pypeit/scripts/show_pixflat.py index 0240837691..ff8267d7f4 100644 --- a/pypeit/scripts/show_pixflat.py +++ b/pypeit/scripts/show_pixflat.py @@ -32,7 +32,7 @@ def main(args): # check if the file exists file_path = dataPaths.pixelflat.get_file_path(args.file, return_none=True) if file_path is None: - msgs.error(f'Provided pixelflat file, {args.file} not found. It is not a direct path, ' + raise PypeItError(f'Provided pixelflat file, {args.file} not found. It is not a direct path, ' f'a cached file, or a file that can be downloaded from a PypeIt repository.') # Load the image @@ -44,11 +44,11 @@ def main(args): in_file = np.isin(args.det, file_dets) # if none of the provided detectors are in the file, raise an error if not np.any(in_file): - msgs.error(f"Provided detector(s) not found in the file. Available detectors are {file_dets}") + raise PypeItError(f"Provided detector(s) not found in the file. Available detectors are {file_dets}") # if some of the provided detectors are not in the file, warn the user elif np.any(np.logical_not(in_file)): det_not_in_file = np.array(args.det)[np.logical_not(in_file)] - msgs.warn(f"Detector(s) {det_not_in_file} not found in the file. Available detectors are {file_dets}") + msgs.warning(f"Detector(s) {det_not_in_file} not found in the file. Available detectors are {file_dets}") # show the image display.connect_to_ginga(raise_err=True, allow_new=True) diff --git a/pypeit/scripts/skysub_regions.py b/pypeit/scripts/skysub_regions.py index 516d24a691..d0e450d97f 100644 --- a/pypeit/scripts/skysub_regions.py +++ b/pypeit/scripts/skysub_regions.py @@ -71,7 +71,7 @@ def main(args): key = EdgeTraceSet.calib_type.upper() if key not in spec2DObj.calibs: # TODO: Until I can figure out a better approach... - msgs.error(f'EdgeTrace calibration frame not recorded in {args.file}!') + raise PypeItError(f'EdgeTrace calibration frame not recorded in {args.file}!') calib_key, _ = EdgeTraceSet.parse_key_dir(spec2DObj.calibs[key], from_filename=True) # Use the appropriate class to get the "detector" number diff --git a/pypeit/scripts/tellfit.py b/pypeit/scripts/tellfit.py index 8ec24efbee..1d1e5f3096 100644 --- a/pypeit/scripts/tellfit.py +++ b/pypeit/scripts/tellfit.py @@ -114,13 +114,13 @@ def main(args): par['telluric']['telgridfile'] = par['sensfunc']['IR']['telgridfile'] else: par['telluric']['telgridfile'] = 'TellPCA_3000_26000_R10000.fits' - msgs.warn(f"No telluric file given. Using PCA method with {par['telluric']['telgridfile']}.") + msgs.warning(f"No telluric file given. Using PCA method with {par['telluric']['telgridfile']}.") # Checks if par['telluric']['telgridfile'] is None: - msgs.error('A file with the telluric grid must be provided.') + raise PypeItError('A file with the telluric grid must be provided.') elif not os.path.isfile(dataPaths.telgrid.get_file_path(par['telluric']['telgridfile'])): - msgs.error(f"{par['telluric']['telgridfile']} does not exist. Either the file was not" + raise PypeItError(f"{par['telluric']['telgridfile']} does not exist. Either the file was not" "downloaded successfully or the file name is incorrect.") # Write the par to disk @@ -227,6 +227,6 @@ def main(args): chk_version=args.chk_version, ) else: - msgs.error("Object model is not supported yet. Must be 'qso', 'star', or 'poly'.") + raise PypeItError("Object model is not supported yet. Must be 'qso', 'star', or 'poly'.") diff --git a/pypeit/scripts/trace_edges.py b/pypeit/scripts/trace_edges.py index b766df3671..7195d0d689 100644 --- a/pypeit/scripts/trace_edges.py +++ b/pypeit/scripts/trace_edges.py @@ -77,13 +77,13 @@ def main(args): # msgs.set_logfile_and_verbosity('trace_edges', args.verbosity) if args.show: - msgs.warn('"show" option is deprecated. Setting debug = 1.') + msgs.warning('"show" option is deprecated. Setting debug = 1.') args.debug = 1 if args.pypeit_file is not None: pypeit_file = Path(args.pypeit_file).absolute() if not pypeit_file.exists(): - msgs.error(f'File does not exist: {pypeit_file}') + raise PypeItError(f'File does not exist: {pypeit_file}') redux_path = pypeit_file.parent if args.redux_path is None \ else Path(args.redux_path).absolute() @@ -94,7 +94,7 @@ def main(args): # Get the calibration group to use group = np.unique(rdx.fitstbl['calib'])[0] if args.group is None else args.group if group not in np.unique(rdx.fitstbl['calib']): - msgs.error(f'Invalid calibration group: {group}') + raise PypeItError(f'Invalid calibration group: {group}') # Find the rows in the metadata table with trace frames in the # specified calibration group tbl_rows = rdx.fitstbl.find_frames('trace', calib_ID=int(group), index=True) @@ -139,7 +139,7 @@ def main(args): binning = '1,1' if args.binning is None else args.binning trace_file = Path(args.trace_file).absolute() if not trace_file.exists(): - msgs.error(f'File does not exist: {trace_file}') + raise PypeItError(f'File does not exist: {trace_file}') files = [str(trace_file)] redux_path = trace_file.parent if args.redux_path is None \ else Path(args.redux_path).absolute() @@ -208,7 +208,7 @@ def main(args): edges = edgetrace.EdgeTraceSet(traceImage, spec, trace_par, auto=True, debug=args.debug, qa_path=qa_path) if not edges.success: - msgs.warn(f'Edge tracing for detector {det} failed. Continuing...') + msgs.warning(f'Edge tracing for detector {det} failed. Continuing...') continue msgs.info(f'Tracing for detector {det} finished in { time.perf_counter()-t:.1f} s.') diff --git a/pypeit/scripts/view_fits.py b/pypeit/scripts/view_fits.py index a98d12f273..3f51b1bbca 100644 --- a/pypeit/scripts/view_fits.py +++ b/pypeit/scripts/view_fits.py @@ -64,9 +64,9 @@ def main(args): msgs.reset(verbosity=2) if args.proc and args.exten is not None: - msgs.error('You cannot specify --proc and --exten, since --exten shows the raw image') + raise PypeItError('You cannot specify --proc and --exten, since --exten shows the raw image') if args.exten is not None and args.det == 'mosaic': - msgs.error('You cannot specify --exten and --det mosaic, since --mosaic displays ' + raise PypeItError('You cannot specify --exten and --det mosaic, since --mosaic displays ' 'multiple extensions by definition') if args.exten is not None: @@ -84,12 +84,12 @@ def main(args): mosaic = True _det = spectrograph.default_mosaic if _det is None: - msgs.error(f'{args.spectrograph} does not have a known mosaic') + raise PypeItError(f'{args.spectrograph} does not have a known mosaic') else: try: _det = tuple(int(d) for d in args.det) except: - msgs.error(f'Could not convert detector input to integer.') + raise PypeItError(f'Could not convert detector input to integer.') mosaic = len(_det) > 1 if not mosaic: _det = _det[0] @@ -102,7 +102,7 @@ def main(args): Img = buildimage.buildimage_fromlist(spectrograph, _det, par, [args.file], mosaic=mosaic) except Exception as e: - msgs.error(bad_read_message + raise PypeItError(bad_read_message + f' Original exception -- {type(e).__name__}: {str(e)}') if args.bkg_file is not None: @@ -110,7 +110,7 @@ def main(args): bkgImg = buildimage.buildimage_fromlist(spectrograph, _det, par, [args.bkg_file], mosaic=mosaic) except Exception as e: - msgs.error(bad_read_message + raise PypeItError(bad_read_message + f' Original exception -- {type(e).__name__}: {str(e)}') @@ -122,7 +122,7 @@ def main(args): try: img = spectrograph.get_rawimage(args.file, _det)[1] except Exception as e: - msgs.error(bad_read_message + raise PypeItError(bad_read_message + f' Original exception -- {type(e).__name__}: {str(e)}') display.connect_to_ginga(raise_err=True, allow_new=True) diff --git a/pypeit/sensfilearchive.py b/pypeit/sensfilearchive.py index 9faa81c134..f49f2d2130 100644 --- a/pypeit/sensfilearchive.py +++ b/pypeit/sensfilearchive.py @@ -84,7 +84,7 @@ def get_archived_sensfile(self, fitsfile, symlink_in_pkgdir=False): grating = header['DISPNAME'] if grating not in ["600ZD", "830G", "900ZD", "1200B", "1200G"]: - msgs.error(f"There are no archived SensFuncFiles for keck_deimos grating {grating}.") + raise PypeItError(f"There are no archived SensFuncFiles for keck_deimos grating {grating}.") to_pkg = 'symlink' if symlink_in_pkgdir else None archived_file = dataPaths.sensfunc.get_file_path(f"keck_deimos_{grating}_sensfunc.fits", diff --git a/pypeit/sensfunc.py b/pypeit/sensfunc.py index 692fae6432..68f2d09a7d 100644 --- a/pypeit/sensfunc.py +++ b/pypeit/sensfunc.py @@ -265,7 +265,7 @@ def __init__(self, spec1dfile, sensfile, par, par_fluxcalib=None, debug=False, self.norderdet = utils.spec_atleast_2d(wave_twk, counts_twk, counts_ivar_twk, counts_mask_twk, log10_blaze_function=log10_blaze_function_twk) if self.nspec_in == 0: - msgs.error('1D spectra have 0 length!') + raise PypeItError('1D spectra have 0 length!') # If the user provided RA and DEC use those instead of what is in meta star_ra = self.meta_spec['RA'] if self.par['star_ra'] is None else self.par['star_ra'] @@ -283,10 +283,10 @@ def __init__(self, spec1dfile, sensfile, par, par_fluxcalib=None, debug=False, overlap = (self.wave_cnts[:,0] <= self.std_dict['wave'].value.max()) & \ (self.wave_cnts[:,0] >= self.std_dict['wave'].value.min()) if np.sum(overlap) == 0: - msgs.error(f'No wavelength overlap between the archival and observed standard star spectrum. ' + raise PypeItError(f'No wavelength overlap between the archival and observed standard star spectrum. ' 'This is not the right standard star for your observations.') elif np.sum(overlap)/self.nspec_in < 0.8: - msgs.warn(f'Only {np.sum(overlap)/self.nspec_in:.1%} of the observed wavelength range is covered by the ' + msgs.warning(f'Only {np.sum(overlap)/self.nspec_in:.1%} of the observed wavelength range is covered by the ' f'archival standard star. This may not be the right standard star for your observations. ') def unpack_std(self): @@ -326,7 +326,7 @@ def unpack_std(self): multi_spec_det=self.par['multi_spec_det']) if sobjs_std is None: - msgs.error(f'There is a problem with your standard star spec1d file: {self.spec1df}') + raise PypeItError(f'There is a problem with your standard star spec1d file: {self.spec1df}') # Unpack standard wave, counts, counts_ivar, counts_mask, log10_blaze_function, meta_spec, header \ @@ -335,15 +335,15 @@ def unpack_std(self): elif hdul[1].header.get('DMODCLS') == 'OneSpec': spec = OneSpec.from_file(self.spec1df, chk_version=self.chk_version) if spec.head0['PYPELINE'] == 'Echelle': - msgs.error('Standard star 1D spectrum from OneSpec class cannot be used for Echelle data.') + raise PypeItError('Standard star 1D spectrum from OneSpec class cannot be used for Echelle data.') if spec.fluxed: - msgs.error('Standard star 1D spectrum from OneSpec class is already fluxed ' + raise PypeItError('Standard star 1D spectrum from OneSpec class is already fluxed ' 'and cannot be used to generate the sensitivity function.') if self.par['use_flat']: - msgs.error('"use_flat" set to True, but standard star 1D spectrum from OneSpec class ' + raise PypeItError('"use_flat" set to True, but standard star 1D spectrum from OneSpec class ' 'does not contain the flat spectrum. The blaze function cannot be estimated.') if spec.ext_mode != self.par['extr']: - msgs.warn(f'Standard star 1D spectrum from OneSpec class was obtained using the {spec.ext_mode} ' + msgs.warning(f'Standard star 1D spectrum from OneSpec class was obtained using the {spec.ext_mode} ' f'extraction, while the requested extraction is {self.par["extr"]}. ' f'The available {spec.ext_mode} extraction will be used instead.') self.extr = spec.ext_mode @@ -358,7 +358,7 @@ def unpack_std(self): sobj[f'{self.extr}_MASK'] |= counts_mask sobjs_std = specobjs.SpecObjs(specobjs=np.array([sobj]), header=spec.head0) else: - msgs.error('Unrecognized class for the 1D spectrum file. Cannot read in the standard') + raise PypeItError('Unrecognized class for the 1D spectrum file. Cannot read in the standard') return wave, counts, counts_ivar, counts_mask, log10_blaze_function, meta_spec, header, sobjs_std @@ -396,7 +396,7 @@ def _bundle(self): # TODO: I added this neurotic check, just to make sure... if self.wave_splice is None or self.zeropoint_splice is None \ or self.throughput_splice is None: - msgs.error('CODING ERROR: Assumed if splice_multi_det is True, then the *_splice ' + raise PypeItError('CODING ERROR: Assumed if splice_multi_det is True, then the *_splice ' 'arrays have all been defined. Found a case where this is not true!') # Loop through this list of dictionaries for _d in d: @@ -895,7 +895,7 @@ def sensfunc_weights(cls, sensfile, waves, ech_order_vec=None, debug=False, extr if waves.ndim == 2: nspec, norder = waves.shape if ech_order_vec is not None and ech_order_vec.size != norder: - msgs.warn('The number of orders in the wave grid does not match the ' + msgs.warning('The number of orders in the wave grid does not match the ' 'number of orders in the unpacked sobjs. Echelle order vector not used.') ech_order_vec = None nexp = 1 @@ -908,14 +908,14 @@ def sensfunc_weights(cls, sensfile, waves, ech_order_vec=None, debug=False, extr norder, nexp = 1, 1 waves_stack = np.reshape(waves, (nspec, 1, 1)) else: - msgs.error('Unrecognized dimensionality for waves') + raise PypeItError('Unrecognized dimensionality for waves') weights_stack = np.ones_like(waves_stack) if norder != sens.zeropoint.shape[1] and ech_order_vec is None: - msgs.error('The number of orders in {:} does not agree with your data. Wrong sensfile?'.format(sensfile)) + raise PypeItError('The number of orders in {:} does not agree with your data. Wrong sensfile?'.format(sensfile)) elif norder != sens.zeropoint.shape[1] and ech_order_vec is not None: - msgs.warn('The number of orders in {:} does not match the number of orders in the data. ' + msgs.warning('The number of orders in {:} does not match the number of orders in the data. ' 'Using only the matching orders.'.format(sensfile)) # array of order to loop through diff --git a/pypeit/setup_gui/controller.py b/pypeit/setup_gui/controller.py index 4fa60bf74e..df7fa60679 100644 --- a/pypeit/setup_gui/controller.py +++ b/pypeit/setup_gui/controller.py @@ -195,7 +195,7 @@ def postRun(self, canceled, exc_info): if exc_info[0] is not None: traceback_string = "".join(traceback.format_exception(*exc_info)) - msgs.warn(f"Failed to {self.name.lower()}:\n" + traceback_string) + msgs.warning(f"Failed to {self.name.lower()}:\n" + traceback_string) display_error(self._main_window, f"Failed to {self.name.lower()} {exc_info[0]}: {exc_info[1]}") self._model.reset() elif canceled: @@ -432,7 +432,7 @@ def view_file(self, n=None): display.connect_to_ginga(raise_err=True, allow_new=True) except Exception as e: display_error(self._main_controller.main_window, f"Could not start ginga to view FITS files: {e}") - msgs.warn(f"Failed to connect to ginga:\n" + traceback.format_exc()) + msgs.warning(f"Failed to connect to ginga:\n" + traceback.format_exc()) # Display each file in its own ginga tab @@ -448,13 +448,13 @@ def view_file(self, n=None): img = self._model.spectrograph.get_rawimage(str(file), n)[1] except Exception as e: display_error(self._main_controller.main_window, f"Failed to read image {file.name}: {e}") - msgs.warn(f"Failed get raw image:\n" + traceback.format_exc()) + msgs.warning(f"Failed get raw image:\n" + traceback.format_exc()) try: display.show_image(img, chname = f"{file.name} {det_name}") except Exception as e: display_error(self._main_controller.main_window, f"Failed to send image {file.name} to ginga: {e}") - msgs.warn(f"Failed send image to ginga:\n" + traceback.format_exc()) + msgs.warning(f"Failed send image to ginga:\n" + traceback.format_exc()) def view_header(self): """ Display the header of one or more selected files in the metadata. @@ -474,7 +474,7 @@ def view_header(self): hdu.header.totextfile(header_string_buffer) except Exception as e: display_error(self._main_controller.main_window, f"Failed to read header from file {file.name} in {file.parent}: {e}") - msgs.warn(f"Failed to read header from {file}:\n" + traceback.format_exc()) + msgs.warning(f"Failed to read header from {file}:\n" + traceback.format_exc()) return header_string_buffer.seek(0) window = TextViewerWindow(title=f"{file.name} Header", width=80, height=50,start_at_top=True, filename=file.parent / (file.name+".txt"), text_stream=header_string_buffer) @@ -525,7 +525,7 @@ def paste_metadata_rows(self): self._model.pasteFrom(clipboard) except Exception as e: traceback_string = "".join(traceback.format_exc()) - msgs.warn(f"Failed to paste metadata rows:\n" + traceback_string) + msgs.warning(f"Failed to paste metadata rows:\n" + traceback_string) display_error(self._main_controller.main_window, f"Could not paste rows to this PypeIt file: {e}") @@ -745,7 +745,7 @@ def save_one(self): # Shouldn't really happen, it would mean the save tab button was enabled # when it shouldn't be. We'll handle this case and log it to prevent a crash # just in case though. - msgs.warn(f"Attempt to save a tab that is *not* a PypeItFileView!") + msgs.warning(f"Attempt to save a tab that is *not* a PypeItFileView!") def _save_file(self, file_model : PypeItFileModel, prompt_for_all : bool=False) -> DialogResponses: diff --git a/pypeit/setup_gui/dialog_helpers.py b/pypeit/setup_gui/dialog_helpers.py index efed49699f..3cfefb5fe5 100644 --- a/pypeit/setup_gui/dialog_helpers.py +++ b/pypeit/setup_gui/dialog_helpers.py @@ -231,7 +231,7 @@ def display_error(parent : QWidget, message: str) -> None: parent: The parent widget of the pop-up dialog message: The message to display. """ - msgs.warn(message) # Make sure the message also goes to the logs + msgs.warning(message) # Make sure the message also goes to the logs QMessageBox.warning(parent, "PypeIt Setup Error", message, QMessageBox.Ok) def prompt_to_save(parent : QWidget) -> DialogResponses: diff --git a/pypeit/setup_gui/model.py b/pypeit/setup_gui/model.py index 86d46546ae..ccd5fb5d5e 100644 --- a/pypeit/setup_gui/model.py +++ b/pypeit/setup_gui/model.py @@ -365,7 +365,7 @@ def setData(self, index, value, role=Qt.EditRole): try: self.metadata[colname][index.row()] = value except ValueError as e: - msgs.warn(f"Failed to set {colname} row {index.row()} to '{value}'. ValueError: {e}") + msgs.warning(f"Failed to set {colname} row {index.row()} to '{value}'. ValueError: {e}") self.dataChanged.emit(index,index,[Qt.DisplayRole, Qt.EditRole]) return True @@ -1075,8 +1075,8 @@ def save(self): pf.write(self.filename) except Exception as e: - msgs.warn(f"Failed saving setup {self.name_stem} to {self.save_location}.") - msgs.warn(traceback.format_exc()) + msgs.warning(f"Failed saving setup {self.name_stem} to {self.save_location}.") + msgs.warning(traceback.format_exc()) # Raise an exception that will look nice when displayed to the GUI raise RuntimeError(f"Failed saving setup {self.name_stem} to {self.save_location}.\nException: {e}") self.state = ModelState.UNCHANGED diff --git a/pypeit/setup_gui/view.py b/pypeit/setup_gui/view.py index 5dbfabd6c4..2ae4f69b65 100644 --- a/pypeit/setup_gui/view.py +++ b/pypeit/setup_gui/view.py @@ -1388,7 +1388,7 @@ def closeTab(self, tab_name): try: index = self._tabNames.index(tab_name) except ValueError : - msgs.warn(f"Failed to find tab named {tab_name} in list.") + msgs.warning(f"Failed to find tab named {tab_name} in list.") return tab = self.widget(index) if tab.closeable: @@ -1408,7 +1408,7 @@ def updateTabText(self, tab_name, tab_state): try: index = self._tabNames.index(tab_name) except ValueError : - msgs.warn(f"Failed to find tab named {tab_name} in list.") + msgs.warning(f"Failed to find tab named {tab_name} in list.") return tab = self.widget(index) @@ -1593,7 +1593,7 @@ def _helpButton(self): if result: msgs.info("Opened PypeIT docs.") else: - msgs.warn("Failed to open PypeIt docs at 'https://pypeit.readthedocs.io/en/latest/'") + msgs.warning("Failed to open PypeIt docs at 'https://pypeit.readthedocs.io/en/latest/'") def _create_button_box(self): """Create the box with action buttons. diff --git a/pypeit/slittrace.py b/pypeit/slittrace.py index ada5c686e3..b271a5a937 100644 --- a/pypeit/slittrace.py +++ b/pypeit/slittrace.py @@ -247,7 +247,7 @@ def _validate(self): # If the echelle order is provided, check that the number of # orders matches the number of provided "slits" if self.ech_order is not None and len(self.ech_order) != self.nslits: - msgs.error('Number of provided echelle orders does not match the number of ' + raise PypeItError('Number of provided echelle orders does not match the number of ' 'order traces.') # Make sure mask, specmin, and specmax are at least 1D arrays. @@ -357,7 +357,7 @@ def from_hdu(cls, hdu, chk_version=True, **kwargs): hdr = hdu[parsed_hdus[0]].header if isinstance(hdu, fits.HDUList) else hdu.header hdr_bitmask = BitMask.from_header(hdr) if chk_version and hdr_bitmask.bits != self.bitmask.bits: - msgs.error('The bitmask in this fits file appear to be out of date! Recreate this ' + raise PypeItError('The bitmask in this fits file appear to be out of date! Recreate this ' 'file by re-running the relevant script or set chk_version=False.', cls='PypeItBitMaskError') @@ -405,7 +405,7 @@ def slitord_id(self): return self.spat_id if self.pypeline == 'Echelle': return self.ech_order - msgs.error(f'Unrecognized Pypeline {self.pypeline}') + raise PypeItError(f'Unrecognized Pypeline {self.pypeline}') @property def slitord_txt(self): @@ -421,7 +421,7 @@ def slitord_txt(self): return 'slit' if self.pypeline == 'Echelle': return 'order' - msgs.error(f'Unrecognized Pypeline {self.pypeline}') + raise PypeItError(f'Unrecognized Pypeline {self.pypeline}') def spatid_to_zero(self, spat_id): """ @@ -451,7 +451,7 @@ def slitord_to_zero(self, slitord): return np.where(self.spat_id == slitord)[0][0] if self.pypeline == 'Echelle': return np.where(self.ech_order == slitord)[0][0] - msgs.error('Unrecognized Pypeline {:}'.format(self.pypeline)) + raise PypeItError('Unrecognized Pypeline {:}'.format(self.pypeline)) def get_slitlengths(self, initial=False, median=False): """ @@ -527,7 +527,7 @@ def get_radec_image(self, wcs, alignSplines, tilts, slit_compute=None, slice_off elif isinstance(slit_compute, (int, list)): slit_compute = np.atleast_1d(slit_compute) else: - msgs.error('Unrecognized type for slit_compute') + raise PypeItError('Unrecognized type for slit_compute') # Prepare the print out substring = '' if slice_offset is None else f' with slice_offset={slice_offset:.3f}' @@ -536,7 +536,7 @@ def get_radec_image(self, wcs, alignSplines, tilts, slit_compute=None, slice_off if slice_offset is None: slice_offset = 0.0 if slice_offset < -0.5 or slice_offset > 0.5: - msgs.error(f"Slice offset must be between -0.5 and 0.5. slice_offset={slice_offset}") + raise PypeItError(f"Slice offset must be between -0.5 and 0.5. slice_offset={slice_offset}") # Initialise the output raimg = np.zeros((self.nspec, self.nspat)) decimg = np.zeros((self.nspec, self.nspat)) @@ -549,7 +549,7 @@ def get_radec_image(self, wcs, alignSplines, tilts, slit_compute=None, slice_off onslit = (slitid_img_init == spatid) onslit_init = np.where(onslit) if self.mask[slit_idx] != 0: - msgs.error(f'Slit {spatid} ({slit_idx+1}/{self.spat_id.size}) is masked. Cannot ' + raise PypeItError(f'Slit {spatid} ({slit_idx+1}/{self.spat_id.size}) is masked. Cannot ' 'generate RA/DEC image.') # Retrieve the pixel offset from the central trace evalpos = alignSplines.transform(slit_idx, onslit_init[1], onslit_init[0]) @@ -661,13 +661,13 @@ def slit_img(self, pad=None, slitidx=None, initial=False, flexure=None, exclude_ """ # if slitidx is not None and exclude_flag is not None: - msgs.error("Cannot pass in both slitidx and exclude_flag!") + raise PypeItError("Cannot pass in both slitidx and exclude_flag!") # Check the input if pad is None: pad = self.pad _pad = pad if isinstance(pad, tuple) else (pad,pad) if len(_pad) != 2: - msgs.error('Padding for both left and right edges should be provided as a 2-tuple!') + raise PypeItError('Padding for both left and right edges should be provided as a 2-tuple!') # Pixel coordinates spat = np.arange(self.nspat) @@ -749,14 +749,14 @@ def spatial_coordinate_image(self, slitidx=None, full=False, slitid_img=None, # Slit indices to include _slitidx = np.arange(self.nslits) if slitidx is None else np.atleast_1d(slitidx).ravel() if full and len(_slitidx) > 1: - msgs.error('For a full image with the slit coordinates, must select a single slit.') + raise PypeItError('For a full image with the slit coordinates, must select a single slit.') # Generate the slit ID image if it wasn't provided if not full: if slitid_img is None: slitid_img = self.slit_img(pad=pad, slitidx=_slitidx, initial=initial) if slitid_img.shape != (self.nspec,self.nspat): - msgs.error('Provided slit ID image does not have the correct shape!') + raise PypeItError('Provided slit ID image does not have the correct shape!') # Choose the slit edges to use left, right, _ = self.select_edges(initial=initial, flexure=flexure_shift) @@ -771,7 +771,7 @@ def spatial_coordinate_image(self, slitidx=None, full=False, slitid_img=None, if np.any(indx[:,_slitidx]): bad_slits = np.where(np.any(indx, axis=0))[0] # TODO: Shouldn't this fault? - msgs.warn('Slits {0} have negative (or 0) slit width!'.format(bad_slits)) + msgs.warning('Slits {0} have negative (or 0) slit width!'.format(bad_slits)) # Output image coo_img = np.zeros((self.nspec,self.nspat), dtype=float) @@ -835,7 +835,7 @@ def slit_spat_pos(left, right, nspat): spatial coordinates. """ if left.shape != right.shape: - msgs.error('Left and right traces must have the same shape.') + raise PypeItError('Left and right traces must have the same shape.') nspec = left.shape[0] return (left[nspec//2,:] + right[nspec//2,:])/2/nspat @@ -862,15 +862,15 @@ def mask_add_missing_obj(self, sobjs, spat_flexure, fwhm, boxcar_rad): msgs.info('Add undetected objects at the expected location from slitmask design.') if fwhm is None: - msgs.error('A FWHM for the optimal extraction must be provided. See `find_fwhm` in ' + raise PypeItError('A FWHM for the optimal extraction must be provided. See `find_fwhm` in ' '`FindObjPar`.') if self.maskdef_objpos is None: - msgs.error('An array with the object positions expected from slitmask design is ' + raise PypeItError('An array with the object positions expected from slitmask design is ' 'missing.') if self.maskdef_offset is None: - msgs.error('A value for the slitmask offset must be provided.') + raise PypeItError('A value for the slitmask offset must be provided.') # Restrict to objects on this detector if sobjs.nobj > 0: @@ -907,7 +907,7 @@ def mask_add_missing_obj(self, sobjs, spat_flexure, fwhm, boxcar_rad): # If we keep what follows, probably should add some tolerance to be off the edge # Otherwise things break in skysub if (SPAT_PIXPOS > right_tweak[specmid, islit]) or (SPAT_PIXPOS < left_tweak[specmid, islit]): - msgs.warn("Targeted object is off the detector") + msgs.warning("Targeted object is off the detector") continue # Generate a new specobj @@ -976,7 +976,7 @@ def mask_add_missing_obj(self, sobjs, spat_flexure, fwhm, boxcar_rad): # Vette for sobj in sobjs: if not sobj.ready_for_extraction(): - msgs.error("Bad SpecObj. Can't proceed") + raise PypeItError("Bad SpecObj. Can't proceed") # Return return sobjs @@ -1002,11 +1002,11 @@ def assign_maskinfo(self, sobjs, plate_scale, spat_flexure, TOLER=1.): """ if self.maskdef_objpos is None: - msgs.error('An array of object positions predicted by the slitmask design must be provided.') + raise PypeItError('An array of object positions predicted by the slitmask design must be provided.') if self.maskdef_slitcen is None: - msgs.error('An array of slit centers predicted by the slitmask design must be provided.') + raise PypeItError('An array of slit centers predicted by the slitmask design must be provided.') if self.maskdef_offset is None: - msgs.error('A value for the slitmask offset must be provided.') + raise PypeItError('A value for the slitmask offset must be provided.') # Unpack -- Remove this once we have a DataModel obj_maskdef_id = self.maskdef_designtab['MASKDEF_ID'].data @@ -1021,10 +1021,10 @@ def assign_maskinfo(self, sobjs, plate_scale, spat_flexure, TOLER=1.): on_det = (sobjs.DET == self.detname) & (sobjs.OBJID > 0) # use only positive detections cut_sobjs = sobjs[on_det] if cut_sobjs.nobj == 0: - msgs.warn('NO detected objects.') + msgs.warning('NO detected objects.') return sobjs else: - msgs.warn('NO detected objects.') + msgs.warning('NO detected objects.') return sobjs msgs.info('Assign slitmask design info to detected objects. ' @@ -1253,9 +1253,9 @@ def get_maskdef_offset(self, sobjs, platescale, spat_flexure, slitmask_off, brig """ if self.maskdef_objpos is None: - msgs.error('An array of object positions predicted by the slitmask design must be provided.') + raise PypeItError('An array of object positions predicted by the slitmask design must be provided.') if self.maskdef_slitcen is None: - msgs.error('An array of slit centers predicted by the slitmask design must be provided.') + raise PypeItError('An array of slit centers predicted by the slitmask design must be provided.') # If slitmask offset provided by the user, just save it and return if slitmask_off is not None: @@ -1275,12 +1275,12 @@ def get_maskdef_offset(self, sobjs, platescale, spat_flexure, slitmask_off, brig on_det = (sobjs.DET == self.detname) & (sobjs.OBJID > 0) # use only positive detections cut_sobjs = sobjs[on_det] if cut_sobjs.nobj == 0: - msgs.warn('NO detected objects. Slitmask offset cannot be estimated in ' + msgs.warning('NO detected objects. Slitmask offset cannot be estimated in ' f'{self.detname}.') self.maskdef_offset = 0.0 return else: - msgs.warn('NO detected objects. Slitmask offset cannot be estimated in ' + msgs.warning('NO detected objects. Slitmask offset cannot be estimated in ' f'{self.detname}.') self.maskdef_offset = 0.0 return @@ -1387,11 +1387,11 @@ def get_maskdef_offset(self, sobjs, platescale, spat_flexure, slitmask_off, brig f'{round(self.maskdef_offset, 2)} pixels (' f'{round(self.maskdef_offset*platescale, 2)} arcsec)') else: - msgs.warn(f'Less than 3 objects detected above {snr_thrshd} sigma threshold. ' + msgs.warning(f'Less than 3 objects detected above {snr_thrshd} sigma threshold. ' f'Slitmask offset cannot be estimated in {self.detname}.') self.maskdef_offset = 0.0 else: - msgs.warn(f'Less than 3 objects detected above {snr_thrshd} sigma threshold. ' + msgs.warning(f'Less than 3 objects detected above {snr_thrshd} sigma threshold. ' f'Slitmask offset cannot be estimated in {self.detname}.') self.maskdef_offset = 0.0 @@ -1445,7 +1445,7 @@ def get_maskdef_extract_fwhm(self, sobjs, platescale, fwhm_parset, find_fwhm): msgs.info('Using median FWHM = {:.3f}" from detected objects.'.format(fwhm*platescale)) if fwhm is None: fwhm = find_fwhm - msgs.warn('The median FWHM cannot be determined because no objects were detected. ' + msgs.warning('The median FWHM cannot be determined because no objects were detected. ' 'Using `find_fwhm` = {:.3f}". if the user wants to provide a value ' 'set parameter `missing_objs_fwhm` in `SlitMaskPar`'.format(fwhm*platescale)) @@ -1483,7 +1483,7 @@ def user_mask(self, det, user_slits): self.mask[msk] = self.bitmask.turn_on(self.mask[msk], 'USERIGNORE') else: - msgs.error('Not ready for this method: {:s}'.format( + raise PypeItError('Not ready for this method: {:s}'.format( user_slits['method'])) def mask_flats(self, flatImages): @@ -1537,7 +1537,7 @@ def merge_user_slit(slitspatnum, maskIDs): return None # if slitspatnum is not None and maskIDs is not None: - msgs.error("These should not both have been set") + raise PypeItError("These should not both have been set") # MaskIDs user_slit_dict = {} if maskIDs is not None: @@ -1615,7 +1615,7 @@ def average_maskdef_offset(calib_slits, platescale, list_detectors): calib_slits = np.array(calib_slits) if list_detectors is None: - msgs.warn('No average slitmask offset computed') + msgs.warning('No average slitmask offset computed') return calib_slits # unpack list_detectors @@ -1634,10 +1634,10 @@ def average_maskdef_offset(calib_slits, platescale, list_detectors): if slitmask_offsets.size == 0: # If all detectors have maskdef_offset=0 give a warning - msgs.warn('No slitmask offset could be measured. Assumed to be zero. ') - msgs.warn('RA, DEC, OBJNAME assignment and forced extraction of undetected objects MAY BE WRONG! ' + msgs.warning('No slitmask offset could be measured. Assumed to be zero. ') + msgs.warning('RA, DEC, OBJNAME assignment and forced extraction of undetected objects MAY BE WRONG! ' 'Especially for dithered observations!') - msgs.warn('To provide a value set `slitmask_offset` in `SlitMaskPar`') + msgs.warning('To provide a value set `slitmask_offset` in `SlitMaskPar`') return calib_slits diff --git a/pypeit/spec2dobj.py b/pypeit/spec2dobj.py index 5e693e44c0..3095718414 100644 --- a/pypeit/spec2dobj.py +++ b/pypeit/spec2dobj.py @@ -132,7 +132,7 @@ def from_file(cls, ifile, detname, chk_version=True): # Check detname is valid detnames = np.unique([h.name.split('-')[0] for h in hdu[1:]]) if detname not in detnames: - msgs.error(f'Your --det={detname} is not available. \n Choose from: {detnames}') + raise PypeItError(f'Your --det={detname} is not available. \n Choose from: {detnames}') return cls.from_hdu(hdu, detname, chk_version=chk_version) @classmethod @@ -159,7 +159,7 @@ def from_hdu(cls, hdu, detname, chk_version=True): if len(ext) == 0: # No relevant extensions! - msgs.error(f'{detname} not available in any extension of the input HDUList.') + raise PypeItError(f'{detname} not available in any extension of the input HDUList.') mask_ext = f'{detname}-BPMMASK' has_mask = mask_ext in ext @@ -203,7 +203,7 @@ def _validate(self): # Check the detector/mosaic identifier has been provided (note this is a # property method) if self.detname is None: - msgs.error('Detector/Mosaic string identifier must be set at instantiation.') + raise PypeItError('Detector/Mosaic string identifier must be set at instantiation.') def _bundle(self): """ @@ -313,9 +313,9 @@ def update_slits(self, spec2DObj): """ # Quick checks if spec2DObj.detname != self.detname: - msgs.error("Objects are not even the same detector!!") + raise PypeItError("Objects are not even the same detector!!") if not np.array_equal(spec2DObj.slits.spat_id, spec2DObj.slits.spat_id): - msgs.error("SPAT_IDs are not in sync!") + raise PypeItError("SPAT_IDs are not in sync!") # Find the good ones on the input object # bpm = spec2DObj.slits.mask.astype(bool) @@ -524,7 +524,7 @@ def __setitem__(self, item, value): if not isinstance(value, Spec2DObj): raise KeyError('Any item not assigned to the meta dictionary must be a Spec2DObj.') if value.detname is not None and value.detname != item: - msgs.warn(f'Mismatch between keyword used to define the Spec2DObj item ({item}) ' + msgs.warning(f'Mismatch between keyword used to define the Spec2DObj item ({item}) ' f'and the name of the detector/mosaic ({value.detname}).') self.__dict__[item] = value @@ -637,7 +637,7 @@ def write_to_fits(self, outfile, pri_hdr=None, update_det=None, if _outfile.exists(): # Clobber? if not overwrite: - msgs.warn(f'File {_outfile} exits. Use -o to overwrite.') + msgs.warning(f'File {_outfile} exits. Use -o to overwrite.') return # Load up the original _allspecobj = AllSpec2DObj.from_fits(_outfile) @@ -658,7 +658,7 @@ def write_to_fits(self, outfile, pri_hdr=None, update_det=None, if det in dets: # Check version if self[det].version != _allspecobj[det].version: - msgs.error("Original spec2D object has a different version. Too risky to continue. Rerun both") + raise PypeItError("Original spec2D object has a different version. Too risky to continue. Rerun both") # Generate the slit "mask" slitmask = _allspecobj[det].slits.slit_img( flexure=_allspecobj[det].sci_spat_flexure) @@ -690,10 +690,10 @@ def write_to_fits(self, outfile, pri_hdr=None, update_det=None, try: prihdu.header[self.hdr_prefix+key.upper()] = self['meta'][key] except: - msgs.warn(f'Cannot add meta entry {key} to primary header!') + msgs.warning(f'Cannot add meta entry {key} to primary header!') continue if key.lower() != key: - msgs.warn('Keywords in the meta dictionary are always read back in as lower case. ' + msgs.warning('Keywords in the meta dictionary are always read back in as lower case. ' f'Subsequent reads of {_outfile} will have converted {key} to ' f'{key.lower()}!') @@ -744,7 +744,7 @@ def flexure_diagnostics(self, flexure_type='spat'): """ if flexure_type not in ['spat', 'spec']: - msgs.error(f'flexure_type must be spat or spec, not {flexure_type}') + raise PypeItError(f'flexure_type must be spat or spec, not {flexure_type}') return_flex = {} # Loop on Detectors for det in self.detectors: diff --git a/pypeit/specobj.py b/pypeit/specobj.py index a53d518ba3..2a7fc0226b 100644 --- a/pypeit/specobj.py +++ b/pypeit/specobj.py @@ -272,9 +272,9 @@ def from_arrays(cls, PYPELINE:str, wave:np.ndarray, counts:np.ndarray, ivar:np.n # Check the type of the flat field if it's not None if flat is not None: if not isinstance(flat, np.ndarray): - msgs.error('Flat must be a numpy array') + raise PypeItError('Flat must be a numpy array') if flat.shape != counts.shape: - msgs.error('Flat and counts must have the same shape') + raise PypeItError('Flat and counts must have the same shape') # Add in arrays for item, attr in zip([wave, counts, ivar, flat], ['_WAVE', '_COUNTS', '_COUNTS_IVAR', '_FLAT']): # Check if any of the arrays are None. If so, skip @@ -291,7 +291,7 @@ def _validate(self): """ pypelines = ['MultiSlit', 'SlicerIFU', 'Echelle'] if self.PYPELINE not in pypelines: - msgs.error(f'{self.PYPELINE} is not a known pipeline procedure. Options are: ' + raise PypeItError(f'{self.PYPELINE} is not a known pipeline procedure. Options are: ' f"{', '.join(pypelines)}") def _bundle(self, **kwargs): @@ -324,7 +324,7 @@ def to_hdu(self, **kwargs): set to True. """ if 'force_to_bintbl' in kwargs and not kwargs['force_to_bintbl']: - msgs.warn(f'Writing a {self.__class__.__name__} always requires force_to_bintbl=True') + msgs.warning(f'Writing a {self.__class__.__name__} always requires force_to_bintbl=True') del kwargs['force_to_bintbl'] return super().to_hdu(force_to_bintbl=True, **kwargs) @@ -337,7 +337,7 @@ def slit_order(self): elif self.PYPELINE == 'SlicerIFU': return self.SLITID else: - msgs.error("Bad PYPELINE") + raise PypeItError("Bad PYPELINE") @property @@ -349,7 +349,7 @@ def slit_orderindx(self): elif self.PYPELINE == 'SlicerIFU': return self.SLITID else: - msgs.error("Bad PYPELINE") + raise PypeItError("Bad PYPELINE") @property def mnx_wave(self): @@ -430,7 +430,7 @@ def get_spectrograph(self): """ # some checks first if self.spectrograph is None and self.PYP_SPEC is None: - msgs.error("PYP_SPEC must be set to access the spectrograph") + raise PypeItError("PYP_SPEC must be set to access the spectrograph") # get it if self.spectrograph is None: self.spectrograph = load_spectrograph(self.PYP_SPEC) @@ -493,7 +493,7 @@ def set_name(self): name += f'-{self.DET}' self.NAME = name else: - msgs.error(f'{self.PYPELINE} is not an understood pipeline.') + raise PypeItError(f'{self.PYPELINE} is not an understood pipeline.') def copy(self): """ @@ -547,7 +547,7 @@ def update_flex_shift(self, shift, flex_type='local'): elif flex_type == 'local': self.FLEX_SHIFT_LOCAL = shift else: - msgs.error("Spectral flexure type must be 'global' or 'local' only") + raise PypeItError("Spectral flexure type must be 'global' or 'local' only") # Now update the total flexure self.FLEX_SHIFT_TOTAL += shift @@ -656,7 +656,7 @@ def to_arrays(self, extraction='OPT', fluxed=True): swave = extraction+'_WAVE' smask = extraction+'_MASK' if self[swave] is None: - msgs.error("This object has not been extracted with extract={}.".format(extraction)) + raise PypeItError("This object has not been extracted with extract={}.".format(extraction)) # Fluxed? if fluxed: sflux = extraction+'_FLAM' @@ -711,8 +711,8 @@ def ready_for_extraction(self): passed = True for key in required: if self[key] is None: - msgs.warn("Item {} is missing from SpecObj. Failing vette".format(key)) - msgs.warn('{}'.format(self)) + msgs.warning("Item {} is missing from SpecObj. Failing vette".format(key)) + msgs.warning('{}'.format(self)) passed = False # return passed @@ -835,7 +835,7 @@ def best_ext_match(self, extract=None, fluxed=True): # If not set, prefer the optimal extraction over the boxcar one. _extract = 'OPT' if extract is None else extract if _extract not in ['OPT', 'BOX']: - msgs.error(f'Extraction type ({_extract}) not understood; must be OPT or BOX.') + raise PypeItError(f'Extraction type ({_extract}) not understood; must be OPT or BOX.') if _extract == 'OPT': if self.has_opt_ext(fluxed=fluxed): return 'OPT', fluxed @@ -851,5 +851,5 @@ def best_ext_match(self, extract=None, fluxed=True): if self.has_box_ext(fluxed=False): return 'BOX', False # If we make it here, we've got a problem! - msgs.error('Unable to find a relevant set of data!') + raise PypeItError('Unable to find a relevant set of data!') diff --git a/pypeit/specobjs.py b/pypeit/specobjs.py index 9e30739ec6..4409c1f80a 100644 --- a/pypeit/specobjs.py +++ b/pypeit/specobjs.py @@ -87,7 +87,7 @@ def from_fitsfile(cls, fits_file, det=None, chk_version=True): # Catch common error of trying to read a OneSpec file if 'DMODCLS' in hdul[1].header and hdul[1].header['DMODCLS'] == 'OneSpec': - msgs.error('This is a OneSpec file. You are treating it like a SpecObjs file.') + raise PypeItError('This is a OneSpec file. You are treating it like a SpecObjs file.') # Load the calibration association into the instance attribute `calibs` if 'CLBS_DIR' in slf.header: @@ -104,11 +104,11 @@ def from_fitsfile(cls, fits_file, det=None, chk_version=True): if 'DETECTOR' not in hdu.name: continue if 'DMODCLS' not in hdu.header: - msgs.error('HDUs with DETECTOR in the name must have DMODCLS in their header.') + raise PypeItError('HDUs with DETECTOR in the name must have DMODCLS in their header.') try: dmodcls = eval(hdu.header['DMODCLS']) except: - msgs.error(f"Unknown detector type datamodel class: {hdu.header['DMODCLS']}") + raise PypeItError(f"Unknown detector type datamodel class: {hdu.header['DMODCLS']}") # NOTE: This requires that any "detector" datamodel class has a # from_hdu method, and the name of the HDU must have a known format # (e.g., 'DET01-DETECTOR'). @@ -246,10 +246,10 @@ def unpack_object(self, ret_flam=False, log10blaze=False, min_blaze_value=1e-3, msg = f"{extract_type} extracted flux is not available for all slits/orders. " \ f"Consider trying the {other} extraction." if not remove_missing: - msgs.error(msg) + raise PypeItError(msg) else: msg += f"{msgs.newline()}-- The missing data will be removed --" - msgs.warn(msg) + msgs.warning(msg) # Remove missing data r_indx = np.where(none_flux)[0] self.remove_sobj(r_indx) @@ -257,7 +257,7 @@ def unpack_object(self, ret_flam=False, log10blaze=False, min_blaze_value=1e-3, if extract_blaze: none_blaze = [f is None for f in getattr(self, blaze_key)] if np.any(none_blaze): - msgs.error(f"{extract_type} extracted blaze is not available for all slits/orders. " + raise PypeItError(f"{extract_type} extracted blaze is not available for all slits/orders. " f"Consider trying the {other} extraction, or NOT using the flat.") # @@ -357,7 +357,7 @@ def get_std(self, multi_spec_det=None): this_det = self.DET == idet if not np.any(this_det): unique_det = np.unique(self.DET) - msgs.error(f'No matches for {idet} in spec1d file. Unique options found' + raise PypeItError(f'No matches for {idet} in spec1d file. Unique options found' f"are {', '.join(unique_det)}. Check usage of multi_spec_det.") istd = SNR[this_det].argmax() sobjs_std.add_sobj(self[this_det][istd]) @@ -395,7 +395,7 @@ def get_std(self, multi_spec_det=None): sobjs_std.header = self.header return sobjs_std else: - msgs.error('Unknown pypeline') + raise PypeItError('Unknown pypeline') def append_neg(self, sobjs_neg): """ @@ -406,7 +406,7 @@ def append_neg(self, sobjs_neg): """ if sobjs_neg.nobj == 0: - msgs.warn("No negative objects found...") + msgs.warning("No negative objects found...") return # Assign the sign and the objids sobjs_neg.sign = -1.0 @@ -418,7 +418,7 @@ def append_neg(self, sobjs_neg): elif sobjs_neg[0].PYPELINE == 'SlicerIFU': sobjs_neg.OBJID = -sobjs_neg.OBJID else: - msgs.error("The '{0:s}' PYPELINE is not defined".format(self[0].PYPELINE)) + raise PypeItError("The '{0:s}' PYPELINE is not defined".format(self[0].PYPELINE)) self.add_sobj(sobjs_neg) # Sort objects according to their spatial location. Necessary for the extraction to properly work @@ -439,7 +439,7 @@ def purge_neg(self): elif self[0].PYPELINE == 'SlicerIFU': index = self.OBJID < 0 else: - msgs.error("The '{0:s}' PYPELINE is not defined".format(self[0].PYPELINE)) + raise PypeItError("The '{0:s}' PYPELINE is not defined".format(self[0].PYPELINE)) self.remove_sobj(index) @@ -457,7 +457,7 @@ def make_neg_pos(self): elif self[0].PYPELINE == 'SlicerIFU': index = self.OBJID < 0 else: - msgs.error("Should not get here") + raise PypeItError("Should not get here") try: self[index].OPT_COUNTS *= -1 except (TypeError,ValueError): @@ -483,7 +483,7 @@ def slitorder_indices(self, slitorder): elif self[0].PYPELINE == 'SlicerIFU': indx = self.SLITID == slitorder else: - msgs.error("The '{0:s}' PYPELINE is not defined".format(self[0].PYPELINE)) + raise PypeItError("The '{0:s}' PYPELINE is not defined".format(self[0].PYPELINE)) return indx @@ -507,7 +507,7 @@ def name_indices(self, name): elif self[0].PYPELINE == 'SlicerIFU': indx = self.NAME == name else: - msgs.error("The '{0:s}' PYPELINE is not defined".format(self[0].PYPELINE)) + raise PypeItError("The '{0:s}' PYPELINE is not defined".format(self[0].PYPELINE)) return indx @@ -537,7 +537,7 @@ def slitorder_uniq_id_indices(self, uniq_id, order=None): elif self[0].PYPELINE == 'SlicerIFU': indx = self.SPAT_PIXPOS_ID == uniq_id else: - msgs.error("The '{0:s}' PYPELINE is not defined".format(self[0].PYPELINE)) + raise PypeItError("The '{0:s}' PYPELINE is not defined".format(self[0].PYPELINE)) return indx @@ -567,9 +567,9 @@ def add_sobj(self, sobj): self.specobjs = np.append(self.specobjs, isobj) return if not isinstance(sobj, (np.ndarray, list)): - msgs.error(f'Unable to add {type(sobj)} objects to SpecObjs') + raise PypeItError(f'Unable to add {type(sobj)} objects to SpecObjs') if any([not isinstance(s, specobj.SpecObj) for s in sobj]): - msgs.error('List or arrays of objects to add must all be of type SpecObj.') + raise PypeItError('List or arrays of objects to add must all be of type SpecObj.') self.specobjs = np.append(self.specobjs, sobj) def remove_sobj(self, index): @@ -659,7 +659,7 @@ def apply_flux_calib(self, par, spectrograph, sens, tell=False): extrap_sens=par['extrap_sens'], airmass=float(self.header['AIRMASS'])) else: - msgs.error('This should not happen, there is a problem with your sensitivity function.') + raise PypeItError('This should not happen, there is a problem with your sensitivity function.') elif spectrograph.pypeline == 'Echelle': @@ -686,10 +686,10 @@ def apply_flux_calib(self, par, spectrograph, sens, tell=False): msgs.info('Unable to flux calibrate order = {:} as it is not in your sensitivity function. ' 'Something is probably wrong with your sensitivity function.'.format(sci_obj.ECH_ORDER)) else: - msgs.error('This should not happen') + raise PypeItError('This should not happen') else: - msgs.error('Unrecognized pypeline: {0}'.format(spectrograph.pypeline)) + raise PypeItError('Unrecognized pypeline: {0}'.format(spectrograph.pypeline)) def copy(self): @@ -799,7 +799,7 @@ def write_to_fits(self, subheader, outfile, overwrite=True, update_det=None, If True, run in debug mode. """ if os.path.isfile(outfile) and not overwrite: - msgs.warn(f'{outfile} exits. Set overwrite=True to overwrite it.') + msgs.warning(f'{outfile} exits. Set overwrite=True to overwrite it.') return # If the file exists and update_det (and slit_spat_num) is provided, use the existing header @@ -873,7 +873,7 @@ def write_to_fits(self, subheader, outfile, overwrite=True, update_det=None, #exit() shdul = sobj.to_hdu() if len(shdul) not in [1, 2]: - msgs.error('CODING ERROR: SpecObj datamodel changed. to_hdu should return 1 or 2 ' + raise PypeItError('CODING ERROR: SpecObj datamodel changed. to_hdu should return 1 or 2 ' 'HDUs. If returned, the 2nd one should be the detector/mosaic.') if len(shdul) == 2: detector_hdus[sobj['DET']] = shdul[1] @@ -882,7 +882,7 @@ def write_to_fits(self, subheader, outfile, overwrite=True, update_det=None, shdu = shdul if len(shdu) != 1 or not isinstance(shdu[0], fits.hdu.table.BinTableHDU): - msgs.error('CODING ERROR: SpecObj datamodel changed.') + raise PypeItError('CODING ERROR: SpecObj datamodel changed.') # Name shdu[0].name = sobj.NAME @@ -1135,7 +1135,7 @@ def get_std_trace(detname, std_outfile, chk_version=True): elif 'SlicerIFU' in pypeline: std_tab = None else: - msgs.error('Unrecognized pypeline') + raise PypeItError('Unrecognized pypeline') else: std_tab = None diff --git a/pypeit/spectrographs/aat_uhrf.py b/pypeit/spectrographs/aat_uhrf.py index ea71c1ed10..ad313b93cd 100644 --- a/pypeit/spectrographs/aat_uhrf.py +++ b/pypeit/spectrographs/aat_uhrf.py @@ -173,7 +173,7 @@ def compound_meta(self, headarr, meta_key): zendist = 0.5*(headarr[0]['ZDSTART']+headarr[0]['ZDEND']) # Return the airmass based on the zenith distance return 1./np.cos(np.deg2rad(zendist)) - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def configuration_keys(self): """ @@ -227,7 +227,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def config_specific_par(self, scifile, inp_par=None): @@ -250,7 +250,7 @@ def config_specific_par(self, scifile, inp_par=None): par = super().config_specific_par(scifile, inp_par=inp_par) if par['calibrations']['wavelengths']['reid_arxiv'] is None: - msgs.warn("Wavelength setup not supported!" + msgs.newline() + msgs.newline() + + msgs.warning("Wavelength setup not supported!" + msgs.newline() + msgs.newline() + "Please perform your own wavelength calibration, and provide the path+filename using:" + msgs.newline() + msgs.pypeitpar_text(['calibrations', 'wavelengths', 'reid_arxiv = '])) # Return diff --git a/pypeit/spectrographs/apf_levy.py b/pypeit/spectrographs/apf_levy.py index 6243f7af6f..01da0b8065 100644 --- a/pypeit/spectrographs/apf_levy.py +++ b/pypeit/spectrographs/apf_levy.py @@ -187,12 +187,12 @@ def compound_meta(self, headarr, meta_key): elif "Pinhole" in decker_str: return 'Pinhole' else: - msgs.error(f"Unrecognized decker {decker_str}") + raise PypeItError(f"Unrecognized decker {decker_str}") if meta_key == 'binning': return f"{headarr[0]['RBIN']+1},{headarr[0]['CBIN']+1}" - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def configuration_keys(self): """ @@ -328,7 +328,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['pinhole']: return good_exp & (fitstbl['idname'] == 'NarrowFlat') & (fitstbl['decker'] == 'Pinhole') - msgs.warn(f'Cannot determine if frames are of type {ftype}.') + msgs.warning(f'Cannot determine if frames are of type {ftype}.') return np.zeros(len(fitstbl), dtype=bool) def is_science(self, fitstbl): @@ -426,7 +426,7 @@ def get_rawimage(self, raw_file, det, sec_includes_binning=True): """ # Check for file; allow for extra .gz, etc. suffix if not os.path.isfile(raw_file): - msgs.error(f'{raw_file} not found!') + raise PypeItError(f'{raw_file} not found!') hdu = io.fits_open(raw_file) head0 = hdu[0].header diff --git a/pypeit/spectrographs/bok_bc.py b/pypeit/spectrographs/bok_bc.py index b8e8748c28..bbfcbc195a 100644 --- a/pypeit/spectrographs/bok_bc.py +++ b/pypeit/spectrographs/bok_bc.py @@ -75,7 +75,7 @@ def compound_meta(self, headarr, meta_key): elif 'CCDSUM' in headarr[0]: # For really old files binspatial, binspec = headarr[0]['CCDSUM'].split() else: - msgs.error("Could not find a header keyword for the binning") + raise PypeItError("Could not find a header keyword for the binning") return parse.binning2string(binspatial, binspec) elif meta_key == 'mjd': """ @@ -95,7 +95,7 @@ def compound_meta(self, headarr, meta_key): return headarr[0]['COMPLAMP'] else: return 'off' - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def configuration_keys(self): """ @@ -310,7 +310,7 @@ def bpm(self, filename, det, shape=None, msbias=None): bpm_img[:, -1] = 1 else: - msgs.error(f"Invalid detector number, {det}, for Bok B&C (only one detector).") + raise PypeItError(f"Invalid detector number, {det}, for Bok B&C (only one detector).") return bpm_img @@ -386,7 +386,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return np.zeros(len(fitstbl), dtype=bool) if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['lampstat01'] != 'off') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/spectrographs/gemini_flamingos.py b/pypeit/spectrographs/gemini_flamingos.py index 9e9d459404..d1481a4ce1 100644 --- a/pypeit/spectrographs/gemini_flamingos.py +++ b/pypeit/spectrographs/gemini_flamingos.py @@ -206,7 +206,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & (fitstbl['idname'] == 'OBJECT') if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['idname'] == 'OBJECT') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @@ -337,6 +337,6 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & (fitstbl['idname'] == 'Science') if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['idname'] == 'Arc') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/spectrographs/gemini_gmos.py b/pypeit/spectrographs/gemini_gmos.py index f6e6c72868..af84467c0b 100644 --- a/pypeit/spectrographs/gemini_gmos.py +++ b/pypeit/spectrographs/gemini_gmos.py @@ -175,14 +175,14 @@ def compound_meta(self, headarr, meta_key): # binning in the spec2d file binning = headarr[0].get('BINNING') if binning is None: - msgs.error('Binning not found') + raise PypeItError('Binning not found') return binning elif meta_key == 'mjd': obsepoch = headarr[0].get('OBSEPOCH') if obsepoch is not None: return time.Time(obsepoch, format='jyear').mjd else: - msgs.warn('OBSEPOCH header keyword not found. Using today as the date.') + msgs.warning('OBSEPOCH header keyword not found. Using today as the date.') return time.Time.now().mjd def config_independent_frames(self): @@ -283,7 +283,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype == 'bias': return good_exp & (fitstbl['target'] == 'Bias')#& (fitstbl['idname'] == 'BIAS') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @classmethod @@ -427,7 +427,7 @@ def get_rawimage(self, raw_file, det): # Number of amplifiers is hard-coded as follows numamp = (len(hdu) - 1) // self.ndet if numamp != detectors[0].numamplifiers: - msgs.error(f'Unexpected number of amplifiers for {self.name} based on number of ' + raise PypeItError(f'Unexpected number of amplifiers for {self.name} based on number of ' f'extensions in {raw_file}.') # First read over the header info to determine the size of the output array... @@ -524,7 +524,7 @@ def get_mosaic_par(self, mosaic, hdu=None, msc_ord=0): detectors = np.array([self.get_detector_par(det, hdu=hdu) for det in mosaic]) # Binning *must* be consistent for all detectors if any(d.binning != detectors[0].binning for d in detectors[1:]): - msgs.error('Binning is somehow inconsistent between detectors in the mosaic!') + raise PypeItError('Binning is somehow inconsistent between detectors in the mosaic!') # Collect the offsets and rotations for *all unbinned* detectors in the # full instrument, ordered by the number of the detector. Detector @@ -699,7 +699,7 @@ def get_maskdef_slitedges(self, ccdnum=None, filename=None, debug=None, one contains the indices to order the slits from left to right in the PypeIt orientation """ if not isinstance(filename, list): - msgs.error('The mask design file input should be a comma separated list of two files') + raise PypeItError('The mask design file input should be a comma separated list of two files') # Parse maskfile = filename[0] diff --git a/pypeit/spectrographs/gemini_gnirs.py b/pypeit/spectrographs/gemini_gnirs.py index b30343dba7..d296f72642 100644 --- a/pypeit/spectrographs/gemini_gnirs.py +++ b/pypeit/spectrographs/gemini_gnirs.py @@ -144,37 +144,37 @@ def compound_meta(self, headarr, meta_key): try: return Time(headarr[0]['DATE-OBS'] + "T" + headarr[0]['TIME-OBS']) except KeyError: - msgs.warn("Time of observation is not in header") + msgs.warning("Time of observation is not in header") return 0.0 elif meta_key == 'pressure': try: return headarr[0]['PRESSUR2']/100.0 # Must be in astropy.units.mbar except KeyError: - msgs.warn("Pressure is not in header - The default pressure (611 mbar) will be assumed") + msgs.warning("Pressure is not in header - The default pressure (611 mbar) will be assumed") return 611.0 elif meta_key == 'temperature': try: return headarr[0]['TAMBIENT'] # Must be in astropy.units.deg_C except KeyError: - msgs.warn("Temperature is not in header - The default temperature (1.5 deg C) will be assumed") + msgs.warning("Temperature is not in header - The default temperature (1.5 deg C) will be assumed") return 1.5 # van Kooten & Izett, arXiv:2208.11794 elif meta_key == 'humidity': try: # Humidity expressed as a percentage, not a fraction return headarr[0]['HUMIDITY'] except KeyError: - msgs.warn("Humidity is not in header - The default relative humidity (20 %) will be assumed") + msgs.warning("Humidity is not in header - The default relative humidity (20 %) will be assumed") return 20.0 # van Kooten & Izett, arXiv:2208.11794 elif meta_key == 'parangle': try: # Humidity expressed as a percentage, not a fraction - msgs.warn("Parallactic angle is not available for GNIRS - DAR correction may be incorrect") + msgs.warning("Parallactic angle is not available for GNIRS - DAR correction may be incorrect") return headarr[0]['PARANGLE'] # Must be expressed in radians except KeyError: - msgs.warn("Parallactic angle is not in header - The default parallactic angle (0 degrees) will be assumed") + msgs.warning("Parallactic angle is not in header - The default parallactic angle (0 degrees) will be assumed") return 0.0 else: - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def configuration_keys(self): """ @@ -260,7 +260,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): elif '10/mmLBSX' in fitstbl['dispname'][0]: return good_exp & (fitstbl['idname'] == 'ARC') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @classmethod @@ -397,7 +397,7 @@ def config_specific_par(self, scifile, inp_par=None): # TODO :: Need to fill this in pass else: - msgs.error(f'Unrecognized GNIRS dispname: {self.dispname}') + raise PypeItError(f'Unrecognized GNIRS dispname: {self.dispname}') return par @@ -505,7 +505,7 @@ def config_specific_par(self, scifile, inp_par=None): # Tilts par['calibrations']['tilts']['tracethresh'] = [10, 10, 10, 10] else: - msgs.error('Unrecognized GNIRS dispname') + raise PypeItError('Unrecognized GNIRS dispname') return par @@ -541,7 +541,7 @@ def norders(self): elif '32/mm' in self.dispname: return 6 else: - msgs.error('Unrecognized disperser') + raise PypeItError('Unrecognized disperser') @property def order_spat_pos(self): @@ -558,7 +558,7 @@ def order_spat_pos(self): ##New data return np.array([0.2955097 , 0.37635756, 0.44952223, 0.51935601, 0.59489503, 0.70210309]) else: - msgs.error('Unrecognized disperser') + raise PypeItError('Unrecognized disperser') @property def orders(self): @@ -571,7 +571,7 @@ def orders(self): elif '32/mm' in self.dispname: return np.arange(8,2,-1,dtype=int) else: - msgs.error('Unrecognized disperser') + raise PypeItError('Unrecognized disperser') @property def spec_min_max(self): @@ -591,7 +591,7 @@ def spec_min_max(self): spec_min = np.asarray([512, 280, 0, 0, 0, 0]) return np.vstack((spec_min, spec_max)) else: - msgs.error('Unrecognized disperser') + raise PypeItError('Unrecognized disperser') class GNIRSIFUSpectrograph(GeminiGNIRSSpectrograph): @@ -726,7 +726,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): slscl = self.get_meta_value([hdr], 'slitwid') if spatial_scale is not None: if pxscl > spatial_scale / 3600.0: - msgs.warn("Spatial scale requested ({0:f}'') is less than the pixel scale ({1:f}'')".format(spatial_scale, pxscl*3600.0)) + msgs.warning("Spatial scale requested ({0:f}'') is less than the pixel scale ({1:f}'')".format(spatial_scale, pxscl*3600.0)) # Update the pixel scale pxscl = spatial_scale / 3600.0 # 3600 is to convert arcsec to degrees @@ -741,7 +741,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): coord = SkyCoord(raval, decval, unit=(units.deg, units.deg)) # Get rotator position - msgs.warn("CURRENTLY A HACK --- NEED TO FIGURE OUT RPOS and RREF FOR HRIFU FROM HEADER INFO") + msgs.warning("CURRENTLY A HACK --- NEED TO FIGURE OUT RPOS and RREF FOR HRIFU FROM HEADER INFO") if 'ROTPOSN' in hdr: rpos = hdr['ROTPOSN'] else: @@ -775,7 +775,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): crpix2 = slitlength / 2. crpix3 = 1. # Get the offset - msgs.warn("HACK FOR HRIFU --- Need to obtain offset from header?") + msgs.warning("HACK FOR HRIFU --- Need to obtain offset from header?") off1 = 0. off2 = 0. off1 /= binspec diff --git a/pypeit/spectrographs/gtc_osiris.py b/pypeit/spectrographs/gtc_osiris.py index 97d8bf0375..a64585c845 100644 --- a/pypeit/spectrographs/gtc_osiris.py +++ b/pypeit/spectrographs/gtc_osiris.py @@ -187,21 +187,21 @@ def compound_meta(self, headarr, meta_key): try: return headarr[0]['PRESSURE'] # Must be in astropy.units.mbar except KeyError: - msgs.warn("Pressure is not in header") + msgs.warning("Pressure is not in header") msgs.info("The default pressure will be assumed: 611 mbar") return 611.0 elif meta_key == 'temperature': try: return headarr[0]['TAMBIENT'] # Must be in astropy.units.deg_C except KeyError: - msgs.warn("Temperature is not in header") + msgs.warning("Temperature is not in header") msgs.info("The default temperature will be assumed: 1.5 deg C") return 1.5 elif meta_key == 'humidity': try: return headarr[0]['HUMIDITY'] except KeyError: - msgs.warn("Humidity is not in header") + msgs.warning("Humidity is not in header") msgs.info("The default relative humidity will be assumed: 20 %") return 20.0 elif meta_key == 'parangle': @@ -209,19 +209,19 @@ def compound_meta(self, headarr, meta_key): msgs.work("Parallactic angle is not available for MAAT - DAR correction may be incorrect") return headarr[0]['PARANG'] # Must be expressed in radians except KeyError: - msgs.error("Parallactic angle is not in header") + raise PypeItError("Parallactic angle is not in header") elif meta_key == 'obstime': return Time(headarr[0]['DATE-END']) elif meta_key == 'gain': return headarr[0]['GAIN'] elif meta_key == 'slitwid': if self.name == "gtc_maat": - msgs.warn("HACK FOR MAAT SIMS --- NEED TO GET SLICER SCALE FROM HEADER, IDEALLY") + msgs.warning("HACK FOR MAAT SIMS --- NEED TO GET SLICER SCALE FROM HEADER, IDEALLY") return 0.305 / 3600.0 elif self.name == "gtc_osiris_plus": return headarr[0]['SLITW']/3600.0 # Convert slit width from arcseconds to degrees else: - msgs.error("Could not determine slit width from header information") + raise PypeItError("Could not determine slit width from header information") def configuration_keys(self): """ @@ -290,7 +290,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype == 'bias': return good_exp & (np.char.lower(fitstbl['target']) == 'bias') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def config_independent_frames(self): @@ -398,7 +398,7 @@ def config_specific_par(self, scifile, inp_par=None): par['sensfunc']['algorithm'] = 'IR' par['sensfunc']['IR']['telgridfile'] = "TellPCA_3000_26000_R10000.fits" else: - msgs.warn('gtc_osiris.py: template arc missing for this grism! Trying holy-grail...') + msgs.warning('gtc_osiris.py: template arc missing for this grism! Trying holy-grail...') par['calibrations']['wavelengths']['method'] = 'holy-grail' # Return @@ -439,7 +439,7 @@ def bpm(self, filename, det, shape=None, msbias=None): head0 = fits.getheader(filename, ext=0) binning = self.get_meta_value([head0], 'binning') - msgs.warn("Bad pixel mask is not available for det={0:d} binning={1:s}".format(det, binning)) + msgs.warning("Bad pixel mask is not available for det={0:d} binning={1:s}".format(det, binning)) # Construct a list of the bad columns bc = [] # TODO :: Add BPM @@ -645,7 +645,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): slscl = self.get_meta_value([hdr], 'slitwid') if spatial_scale is not None: if pxscl > spatial_scale / 3600.0: - msgs.warn("Spatial scale requested ({0:f}'') is less than the pixel scale ({1:f}'')".format(spatial_scale, pxscl*3600.0)) + msgs.warning("Spatial scale requested ({0:f}'') is less than the pixel scale ({1:f}'')".format(spatial_scale, pxscl*3600.0)) # Update the pixel scale pxscl = spatial_scale / 3600.0 # 3600 is to convert arcsec to degrees @@ -660,7 +660,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): coord = SkyCoord(raval, decval, unit=(units.deg, units.deg)) # Get rotator position - msgs.warn("HACK FOR MAAT SIMS --- NEED TO FIGURE OUT RPOS and RREF FOR MAAT FROM HEADER INFO") + msgs.warning("HACK FOR MAAT SIMS --- NEED TO FIGURE OUT RPOS and RREF FOR MAAT FROM HEADER INFO") if 'ROTPOSN' in hdr: rpos = hdr['ROTPOSN'] else: @@ -694,7 +694,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): crpix2 = slitlength / 2. crpix3 = 1. # Get the offset - msgs.warn("HACK FOR MAAT SIMS --- Need to obtain offset from header?") + msgs.warning("HACK FOR MAAT SIMS --- Need to obtain offset from header?") off1 = 0. off2 = 0. off1 /= binspec @@ -1000,7 +1000,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype == 'bias': return good_exp & (fitstbl['target'] == 'BIAS') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def config_independent_frames(self): @@ -1091,7 +1091,7 @@ def config_specific_par(self, scifile, inp_par=None): par['sensfunc']['algorithm'] = 'IR' par['sensfunc']['IR']['telgridfile'] = "TellPCA_3000_26000_R10000.fits" else: - msgs.warn('gtc_osiris.py: template arc missing for this grism! Trying holy-grail...') + msgs.warning('gtc_osiris.py: template arc missing for this grism! Trying holy-grail...') par['calibrations']['wavelengths']['method'] = 'holy-grail' # Return @@ -1140,14 +1140,14 @@ def bpm(self, filename, det, shape=None, msbias=None): elif det == 2: if binning == '1 1': # The BPM is based on 2x2 binning data, so the 2x2 numbers are just multiplied by two - msgs.warn("BPM is likely over-estimated for 1x1 binning") + msgs.warning("BPM is likely over-estimated for 1x1 binning") bc = [[220, 222, 3892, 4100], [952, 954, 2304, 4100]] elif binning == '2 2': bc = [[110, 111, 1946, 2050], [476, 477, 1154, 2050]] else: - msgs.warn("Bad pixel mask is not available for det={0:d} binning={1:s}".format(det, binning)) + msgs.warning("Bad pixel mask is not available for det={0:d} binning={1:s}".format(det, binning)) bc = [] # Apply these bad columns to the mask diff --git a/pypeit/spectrographs/jwst_nirspec.py b/pypeit/spectrographs/jwst_nirspec.py index 80a8abb2f3..0a539acc54 100644 --- a/pypeit/spectrographs/jwst_nirspec.py +++ b/pypeit/spectrographs/jwst_nirspec.py @@ -236,7 +236,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype == 'science': return np.ones(len(fitstbl), dtype=bool) - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/spectrographs/keck_deimos.py b/pypeit/spectrographs/keck_deimos.py index 52896a66dc..ff371e7d8f 100644 --- a/pypeit/spectrographs/keck_deimos.py +++ b/pypeit/spectrographs/keck_deimos.py @@ -212,7 +212,7 @@ def get_detector_par(self, det, hdu=None): if hdu is not None: amp = self.get_meta_value(self.get_headarr(hdu), 'amp') if amp == 'DUAL:A+B': - msgs.error('PypeIt can only reduce images with AMPMODE == SINGLE:B or AMPMODE == SINGLE:A.') + raise PypeItError('PypeIt can only reduce images with AMPMODE == SINGLE:B or AMPMODE == SINGLE:A.') amp_folder = "ampA" if amp == 'SINGLE:A' else "ampB" # raw frame date in mjd date = time.Time(self.get_meta_value(self.get_headarr(hdu), 'mjd'), format='mjd').value @@ -508,14 +508,14 @@ def compound_meta(self, headarr, meta_key): elif headarr[0]['GRATEPOS'] == 4: return headarr[0]['G4TLTWAV'] else: - msgs.warn('This is probably a problem. Non-standard DEIMOS GRATEPOS={0}.'.format(headarr[0]['GRATEPOS'])) + msgs.warning('This is probably a problem. Non-standard DEIMOS GRATEPOS={0}.'.format(headarr[0]['GRATEPOS'])) elif meta_key == 'mjd': if headarr[0].get('MJD-OBS', None) is not None: return headarr[0]['MJD-OBS'] else: return time.Time('{}T{}'.format(headarr[0]['DATE-OBS'], headarr[0]['UTC'])).mjd else: - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def configuration_keys(self): """ @@ -678,7 +678,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & (fitstbl['idname'] == 'Line') & (fitstbl['hatch'] == 'closed') \ & (fitstbl['lampstat01'] != 'Off') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def get_rawimage(self, raw_file, det): @@ -739,9 +739,9 @@ def get_rawimage(self, raw_file, det): detectors = [self.get_detector_par(det, hdu=hdu)] if nimg == 1 else mosaic.detectors if hdu[0].header['AMPMODE'] not in ['SINGLE:B', 'SINGLE:A']: - msgs.error('PypeIt can only reduce images with AMPMODE == SINGLE:B or AMPMODE == SINGLE:A.') + raise PypeItError('PypeIt can only reduce images with AMPMODE == SINGLE:B or AMPMODE == SINGLE:A.') if hdu[0].header['MOSMODE'] != 'Spectral': - msgs.error('PypeIt can only reduce images with MOSMODE == Spectral.') + raise PypeItError('PypeIt can only reduce images with MOSMODE == Spectral.') # Get post, pre-pix values postpix = hdu[0].header['POSTPIX'] @@ -751,7 +751,7 @@ def get_rawimage(self, raw_file, det): # get the x and y binning factors... binning = hdu[0].header['BINNING'] if binning != '1,1': - msgs.error("This binning for DEIMOS might not work. But it might..") + raise PypeItError("This binning for DEIMOS might not work. But it might..") # get the chips to read in # DP: I don't know if this needs to still exist. I believe det is never None @@ -836,7 +836,7 @@ def get_mosaic_par(self, mosaic, hdu=None, msc_ord=5): detectors = np.array([self.get_detector_par(det, hdu=hdu) for det in mosaic]) # Binning *must* be consistent for all detectors if any(d.binning != detectors[0].binning for d in detectors[1:]): - msgs.error('Binning is somehow inconsistent between detectors in the mosaic!') + raise PypeItError('Binning is somehow inconsistent between detectors in the mosaic!') # Collect the offsets and rotations for *all unbinned* detectors in the # full instrument, ordered by the number of the detector. Detector @@ -1208,7 +1208,7 @@ def get_amapbmap(self, filename): self.bmap = fits.getdata(dataPaths.static_calibs.get_file_path( f'keck_deimos/bmap.s{slider}.2003mar04.fits')) else: - msgs.error(f'No amap/bmap available for slider {slider}. Set `use_maskdesign = False`') + raise PypeItError(f'No amap/bmap available for slider {slider}. Set `use_maskdesign = False`') #TODO: Figure out which amap and bmap to use for slider 2 return self.amap, self.bmap @@ -1370,13 +1370,13 @@ def get_maskdef_slitedges(self, ccdnum=None, filename=None, debug=None, self.get_amapbmap(filename) if self.amap is None and self.bmap is None: - msgs.error('Must select amap and bmap; provide a file or use get_amapbmap()') + raise PypeItError('Must select amap and bmap; provide a file or use get_amapbmap()') if self.slitmask is None: - msgs.error('Unable to read slitmask design info. Provide a file.') + raise PypeItError('Unable to read slitmask design info. Provide a file.') if ccdnum is None: - msgs.error('A detector number must be provided') + raise PypeItError('A detector number must be provided') # parse ccdnum nimg, _ccdnum = self.validate_det(ccdnum) @@ -1564,7 +1564,7 @@ def spec1d_match_spectra(self, sobjs): if np.sum(mtc) == 1: irobj = int(ridx[mtc]) if not np.isclose(sobj.DEC, sobjs[irobj].DEC): - msgs.error('DEC does not match RA!') + raise PypeItError('DEC does not match RA!') bmt.append(ibobj) rmt.append(irobj) # START ARRAY @@ -1577,7 +1577,7 @@ def spec1d_match_spectra(self, sobjs): # obj['objra'],obj['objdec'],obj['objname'],obj['maskdef_id'],obj['slit'])) #n=n+1 elif np.sum(mtc)>1: - msgs.error("Multiple RA matches?! No good..") + raise PypeItError("Multiple RA matches?! No good..") # TODO - confirm with Marla this block is NG ''' @@ -1750,7 +1750,7 @@ def __init__(self): # if isinstance(inp, str): # fil = glob.glob(inp + '*') # if len(fil) != 1: -# msgs.error('Found {0} files matching {1}'.format(len(fil), inp + '*')) +# raise PypeItError('Found {0} files matching {1}'.format(len(fil), inp + '*')) # # Read # try: # msgs.info("Reading DEIMOS file: {:s}".format(fil[0])) @@ -1778,7 +1778,7 @@ def __init__(self): # # get the x and y binning factors... # binning = head0['BINNING'] # if binning != '1,1': -# msgs.error("This binning for DEIMOS might not work. But it might..") +# raise PypeItError("This binning for DEIMOS might not work. But it might..") # # xbin, ybin = [int(ibin) for ibin in binning.split(',')] # diff --git a/pypeit/spectrographs/keck_esi.py b/pypeit/spectrographs/keck_esi.py index bdd7333e4a..f4509fb60a 100644 --- a/pypeit/spectrographs/keck_esi.py +++ b/pypeit/spectrographs/keck_esi.py @@ -216,7 +216,7 @@ def compound_meta(self, headarr, meta_key): return mjd_time.mjd except Exception as e: # A problem parsing the MJD, we'll try DATE-OBS and UT - msgs.warn("Problem parsing MJD-OBS, trying DATE-OBS and UT instead.") + msgs.warning("Problem parsing MJD-OBS, trying DATE-OBS and UT instead.") pass return Time('{}T{}'.format(headarr[0]['DATE-OBS'], headarr[0]['UT'])).mjd elif meta_key == 'dispname': @@ -226,7 +226,7 @@ def compound_meta(self, headarr, meta_key): dname = 'UNKNWN' return dname else: - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def configuration_keys(self): """ @@ -281,7 +281,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & (fitstbl['idname'] == 'Object') if ftype == 'standard': return good_exp & (fitstbl['idname'] == 'Object') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def bpm(self, filename, det, shape=None, msbias=None): @@ -463,7 +463,7 @@ def get_rawimage(self, raw_file, det, spectrim=None): """ # Check for file; allow for extra .gz, etc. suffix if not os.path.isfile(raw_file): - msgs.error(f'{raw_file} not found!') + raise PypeItError(f'{raw_file} not found!') hdu = io.fits_open(raw_file) head0 = hdu[0].header diff --git a/pypeit/spectrographs/keck_hires.py b/pypeit/spectrographs/keck_hires.py index 9dabab1721..454aa5e8e0 100644 --- a/pypeit/spectrographs/keck_hires.py +++ b/pypeit/spectrographs/keck_hires.py @@ -314,7 +314,7 @@ def compound_meta(self, headarr, meta_key): return 'IntFlat' else: - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def configuration_keys(self): """ @@ -430,7 +430,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): # Arc and tilt frames are typed together return good_exp & (fitstbl['idname'] == 'Line') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def vet_assigned_ftypes(self, type_bits, fitstbl): @@ -512,11 +512,11 @@ def parse_raw_files(self, fitstbl, det=1, ftype=None): if ftype == 'slitless_pixflat': # Check for the required info if len(fitstbl) == 0: - msgs.warn('Fitstbl provided is emtpy. No parsing done.') + msgs.warning('Fitstbl provided is emtpy. No parsing done.') # return empty array return np.array([], dtype=int) elif det is None: - msgs.warn('Detector number must be provided to parse slitless_pixflat frames. No parsing done.') + msgs.warning('Detector number must be provided to parse slitless_pixflat frames. No parsing done.') # return index array of length of fitstbl return np.arange(len(fitstbl)) @@ -537,7 +537,7 @@ def parse_raw_files(self, fitstbl, det=1, ftype=None): # red detector return np.where(np.int32(fitstbl['xdangle'].value) == -5)[0] else: - msgs.warn('The provided list of slitless_pixflat frames does not have exactly 3 unique XDANGLE values. ' + msgs.warning('The provided list of slitless_pixflat frames does not have exactly 3 unique XDANGLE values. ' 'Pypeit cannot determine which slitless_pixflat frame corresponds to the requested detector. ' 'All frames will be used.') return np.arange(len(fitstbl)) @@ -584,7 +584,7 @@ def get_rawimage(self, raw_file, det, spectrim=20): # Check for file; allow for extra .gz, etc. suffix if not os.path.isfile(raw_file): - msgs.error(f'{raw_file} not found!') + raise PypeItError(f'{raw_file} not found!') hdu = io.fits_open(raw_file) head0 = hdu[0].header @@ -603,7 +603,7 @@ def get_rawimage(self, raw_file, det, spectrim=20): binning = self.get_meta_value(self.get_headarr(hdu), 'binning') # # TODO: JFH I think this works fine # if binning != '3,1': -# msgs.warn("This binning for HIRES might not work. But it might..") +# msgs.warning("This binning for HIRES might not work. But it might..") # We are flipping this because HIRES stores the binning oppostire of the (binspec, binspat) pypeit convention. binspatial, binspec = parse.parse_binning(head0['BINNING']) @@ -699,7 +699,7 @@ def get_mosaic_par(self, mosaic, hdu=None, msc_ord=0): detectors = np.array([self.get_detector_par(det, hdu=hdu) for det in mosaic]) # Binning *must* be consistent for all detectors if any(d.binning != detectors[0].binning for d in detectors[1:]): - msgs.error('Binning is somehow inconsistent between detectors in the mosaic!') + raise PypeItError('Binning is somehow inconsistent between detectors in the mosaic!') # Collect the offsets and rotations for *all unbinned* detectors in the # full instrument, ordered by the number of the detector. Detector @@ -816,7 +816,7 @@ def get_detector_par(self, det, hdu=None): detector_dict2['gain'] = np.atleast_1d([0.86]) detector_dict3['gain'] = np.atleast_1d([0.84]) else: - msgs.error("Bad CCDGAIN mode for HIRES") + raise PypeItError("Bad CCDGAIN mode for HIRES") # Instantiate detector_dicts = [detector_dict1, detector_dict2, detector_dict3] diff --git a/pypeit/spectrographs/keck_kcwi.py b/pypeit/spectrographs/keck_kcwi.py index cf9dbb28b9..370d2cb601 100644 --- a/pypeit/spectrographs/keck_kcwi.py +++ b/pypeit/spectrographs/keck_kcwi.py @@ -149,7 +149,7 @@ def config_specific_par(self, scifile, inp_par=None): elif self.get_meta_value(headarr, 'dispname') == 'RH3': par['calibrations']['wavelengths']['reid_arxiv'] = 'keck_kcrm_RH3.fits' else: - msgs.warn("Full template solution is unavailable") + msgs.warning("Full template solution is unavailable") msgs.info("Adopting holy-grail algorithm - Check the wavelength solution!") par['calibrations']['wavelengths']['method'] = 'holy-grail' # FWHM @@ -217,20 +217,20 @@ def compound_meta(self, headarr, meta_key): try: hdrstr = 'TARGRA' if meta_key == 'ra' else 'TARGDEC' except KeyError: - msgs.error(f'Cannot determine the {meta_key} from the header') + raise PypeItError(f'Cannot determine the {meta_key} from the header') return headarr[0][hdrstr] elif meta_key == 'pressure': try: return headarr[0]['WXPRESS'] # Must be in astropy.units.mbar except KeyError: - msgs.warn("Pressure is not in header") + msgs.warning("Pressure is not in header") msgs.info("The default pressure will be assumed: 611 mbar") return 611.0 elif meta_key == 'temperature': try: return headarr[0]['WXOUTTMP'] # Must be in astropy.units.deg_C except KeyError: - msgs.warn("Temperature is not in header") + msgs.warning("Temperature is not in header") msgs.info("The default temperature will be assumed: 1.5 deg C") return 1.5 # van Kooten & Izett, arXiv:2208.11794 elif meta_key == 'humidity': @@ -238,7 +238,7 @@ def compound_meta(self, headarr, meta_key): # Humidity expressed as a percentage, not a fraction return headarr[0]['WXOUTHUM'] except KeyError: - msgs.warn("Humidity is not in header") + msgs.warning("Humidity is not in header") msgs.info("The default relative humidity will be assumed: 20 %") return 20.0 # van Kooten & Izett, arXiv:2208.11794 elif meta_key == 'parangle': @@ -246,7 +246,7 @@ def compound_meta(self, headarr, meta_key): # Parallactic angle expressed in radians return headarr[0]['PARANG'] * np.pi / 180.0 except KeyError: - msgs.error("Parallactic angle is not in header") + raise PypeItError("Parallactic angle is not in header") elif meta_key == 'obstime': return Time(headarr[0]['DATE-END']) elif meta_key == 'posang': @@ -264,7 +264,7 @@ def compound_meta(self, headarr, meta_key): skypa = rpos + rref # IFU position angle (degrees) return skypa else: - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") @classmethod @@ -402,7 +402,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): is_align = good_exp & (fitstbl['idname'] == 'CONTBARS') \ & (fitstbl['calpos'] == 'Mirror') & self.lamps(fitstbl, 'cont') if np.any(is_align & np.logical_not(self.lamps(fitstbl, 'cont_noarc'))): - msgs.warn('Alignment frames have both the continuum and arc lamps on (although ' + msgs.warning('Alignment frames have both the continuum and arc lamps on (although ' 'arc-lamp shutter might be closed)!') return is_align if ftype == 'arc': @@ -422,7 +422,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): # Don't type pinhole frames return np.zeros(len(fitstbl), dtype=bool) - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def lamps(self, fitstbl, status): @@ -632,7 +632,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): slscl = self.get_meta_value([hdr], 'slitwid') if spatial_scale is not None: if pxscl > spatial_scale / 3600.0: - msgs.warn("Spatial scale requested ({0:f}'') is less than the pixel scale ({1:f}'')".format(spatial_scale, pxscl*3600.0)) + msgs.warning("Spatial scale requested ({0:f}'') is less than the pixel scale ({1:f}'')".format(spatial_scale, pxscl*3600.0)) # Update the pixel scale pxscl = spatial_scale / 3600.0 # 3600 is to convert arcsec to degrees @@ -674,7 +674,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): # off1 = 0.05 # off2 = 5.6 # else: - # msgs.warn("Unknown IFU number: {0:d}".format(ifunum)) + # msgs.warning("Unknown IFU number: {0:d}".format(ifunum)) off1 = 0. off2 = 0. off1 /= binspec @@ -809,7 +809,7 @@ def bpm(self, filename, det, shape=None, msbias=None): # Check if the bad columns haven't been set if bc is None: - msgs.warn("KCRM bad pixel mask is not available for ampmode={0:s} binning={1:s}".format(ampmode, binning)) + msgs.warning("KCRM bad pixel mask is not available for ampmode={0:s} binning={1:s}".format(ampmode, binning)) bc = [] # Apply these bad columns to the mask @@ -857,7 +857,7 @@ def get_detector_par(self, det, hdu=None): gainarr = None ronarr = None # dsecarr = None -# msgs.error("A required keyword argument (hdu) was not supplied") +# raise PypeItError("A required keyword argument (hdu) was not supplied") else: # Some properties of the image binning = self.compound_meta(self.get_headarr(hdu), "binning") @@ -1137,7 +1137,7 @@ def scattered_light_archive(self, binning, dispname): -0.004790394657721825, 0.0032481886185675036, # Polynomial terms (coefficients of "spat" and "spat*spec") 0.07823077510724392, -0.0644638013233617, 0.01819438897935518]) # Polynomial terms (coefficients of spec**index) else: - msgs.warn(f"Initial scattered light model parameters have not been setup for grating {dispname}") + msgs.warning(f"Initial scattered light model parameters have not been setup for grating {dispname}") x0 = np.array([54.843502304988725 / specbin, 71.36603219575882 / spatbin, # Gaussian kernel widths 166.5990017834228 / specbin, 164.45188033168876 / spatbin, # Lorentzian kernel widths -5.759623374637964 / specbin, 5.01392929142184 / spatbin, # pixel offsets @@ -1250,7 +1250,7 @@ def get_detector_par(self, det, hdu=None): gainarr = None ronarr = None # dsecarr = None -# msgs.error("A required keyword argument (hdu) was not supplied") +# raise PypeItError("A required keyword argument (hdu) was not supplied") else: # Some properties of the image binning = self.compound_meta(self.get_headarr(hdu), "binning") @@ -1303,7 +1303,7 @@ def get_amplifiers(self, numamps): elif numamps == 4: return [0, 1, 2, 3] else: - msgs.error("PypeIt only supports 2 or 4 amplifier readout of KCRM data") + raise PypeItError("PypeIt only supports 2 or 4 amplifier readout of KCRM data") def init_meta(self): """ diff --git a/pypeit/spectrographs/keck_lris.py b/pypeit/spectrographs/keck_lris.py index 9252aeeddb..2117b6564e 100644 --- a/pypeit/spectrographs/keck_lris.py +++ b/pypeit/spectrographs/keck_lris.py @@ -48,9 +48,9 @@ def check_spectrograph(self, filename): instrume = self.get_meta_value(filename, 'instrument') if 'keck_lris_red' in self.name and instrume != 'LRIS': - msgs.error('This is not the correct spectrograph. You may want to use keck_lris_blue instead.') + raise PypeItError('This is not the correct spectrograph. You may want to use keck_lris_blue instead.') elif 'keck_lris_blue' in self.name and instrume == 'LRIS': - msgs.error('This is not the correct spectrograph. You may want to use keck_lris_red instead.') + raise PypeItError('This is not the correct spectrograph. You may want to use keck_lris_red instead.') @classmethod def default_pypeit_par(cls): @@ -136,7 +136,7 @@ def config_specific_par(self, scifile, inp_par=None): # This is a little risky as a user could put long into their maskname # But they would then need to over-ride in their PypeIt file if scifile is None: - msgs.error("You have not included a standard or science file in your PypeIt file to determine the configuration") + raise PypeItError("You have not included a standard or science file in your PypeIt file to determine the configuration") if 'long' in self.get_meta_value(scifile, 'decker'): par['calibrations']['slitedges']['sync_predict'] = 'nearest' # This might only be required for det=2, but we'll see.. @@ -209,13 +209,13 @@ def compound_meta(self, headarr, meta_key): # LRIS sometime misses RA and/or Dec in the header. When this happens, set them to 0 if meta_key == 'ra': if headarr[0].get('RA') is None: - msgs.warn('Keyword RA not found in header. Setting to 0') + msgs.warning('Keyword RA not found in header. Setting to 0') return '00:00:00.00' else: return headarr[0]['RA'] elif meta_key == 'dec': if headarr[0].get('DEC') is None: - msgs.warn('Keyword DEC not found in header. Setting to 0') + msgs.warning('Keyword DEC not found in header. Setting to 0') return '+00:00:00.0' else: return headarr[0]['DEC'] @@ -284,7 +284,7 @@ def compound_meta(self, headarr, meta_key): else: return 'off' else: - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def configuration_keys(self): """ @@ -400,7 +400,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp & self.lamps(fitstbl, 'arcs') & (fitstbl['hatch'] == 'closed') & no_img - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def vet_assigned_ftypes(self, type_bits, fitstbl): @@ -488,7 +488,7 @@ def lamps(self, fitstbl, status): elif status == 'internal': return np.array([lamp in ['Halogen', '2H'] for lamp in fitstbl['lampstat01']]) else: - msgs.error(f"Bad status option! {status}") + raise PypeItError(f"Bad status option! {status}") raise ValueError('No implementation for status = {0}'.format(status)) @@ -746,10 +746,10 @@ def get_maskdef_slitedges(self, ccdnum=None, filename=None, debug=None, if filename is not None: self.get_slitmask(filename) else: - msgs.error('The name of a science file should be provided for the mask info') + raise PypeItError('The name of a science file should be provided for the mask info') if self.slitmask is None: - msgs.error('Unable to read slitmask design info. Provide a file.') + raise PypeItError('Unable to read slitmask design info. Provide a file.') platescale = self.get_detector_par(det=1)['platescale'] @@ -803,13 +803,13 @@ def get_maskdef_slitedges(self, ccdnum=None, filename=None, debug=None, xstart = 2073//bin_spat good = centers < max_spat # No chip gap else: - msgs.error(f'Not ready to use slitmasks for {self.name}. Develop it!') + raise PypeItError(f'Not ready to use slitmasks for {self.name}. Develop it!') else: if self.name in ['keck_lris_red', 'keck_lris_blue']: good = centers >= 0. xstart = -48//bin_spat else: - msgs.error(f'Not ready to use slitmasks for {self.name}. Develop it!') + raise PypeItError(f'Not ready to use slitmasks for {self.name}. Develop it!') left_edges += xstart right_edges += xstart left_edges[~good] = -1 @@ -915,7 +915,7 @@ def get_detector_par(self, det, hdu=None): elif namps == 4: pass else: - msgs.error("Did not see this namps coming..") + raise PypeItError("Did not see this namps coming..") # Return return detector @@ -936,9 +936,9 @@ def check_spectrograph(self, filename): # last day of keck_lris_blue_orig date_orig = time.Time('2009-04-30', format='iso') if _dateobs <= date_orig and self.name in ['keck_lris_blue']: - msgs.error('This is not the correct spectrograph. Use keck_lris_blue_orig instead.') + raise PypeItError('This is not the correct spectrograph. Use keck_lris_blue_orig instead.') elif _dateobs > date_orig and self.name in ['keck_lris_blue_orig']: - msgs.error('This is not the correct spectrograph. Use keck_lris_blue instead.') + raise PypeItError('This is not the correct spectrograph. Use keck_lris_blue instead.') @classmethod def default_pypeit_par(cls): @@ -1153,7 +1153,7 @@ def get_rawimage(self, raw_file, det): image, hdul, elaptime, rawdatasec_img, oscansec_img = get_orig_rawimage(raw_file) # Cut down if np.max(rawdatasec_img) != 4: - msgs.error("Deal with not 2 AMP mode!!") + raise PypeItError("Deal with not 2 AMP mode!!") if det == 1: bad_amp = rawdatasec_img > 2 rawdatasec_img[bad_amp] = 0 @@ -1169,7 +1169,7 @@ def get_rawimage(self, raw_file, det): good_amp = timage > 2 timage[good_amp] -= 2 else: - msgs.error("Should not be here in keck_lris!") + raise PypeItError("Should not be here in keck_lris!") # Detector detector_par = self.get_detector_par(det-1, hdu=hdul) @@ -1282,16 +1282,16 @@ def get_detector_par(self, det, hdu=None): if date < t2020_1: pass elif date < t2020_2: # This is for the June 30 2020 run - msgs.warn("We are using LRISr gain/RN values based on WMKO estimates.") + msgs.warning("We are using LRISr gain/RN values based on WMKO estimates.") detector_dict1['gain'] = np.atleast_1d([37.6]) detector_dict2['gain'] = np.atleast_1d([1.26]) detector_dict1['ronoise'] = np.atleast_1d([99.]) detector_dict2['ronoise'] = np.atleast_1d([5.2]) elif date >= t2021_upgrade: # Note: We are unlikely to trip this. Other things probably failed first - msgs.error("This is the new detector. Use keck_lris_red_mark4") + raise PypeItError("This is the new detector. Use keck_lris_red_mark4") else: # This is the 2020 July 29 run - msgs.warn("We are using LRISr gain/RN values based on WMKO estimates.") + msgs.warning("We are using LRISr gain/RN values based on WMKO estimates.") detector_dict1['gain'] = np.atleast_1d([1.45]) detector_dict2['gain'] = np.atleast_1d([1.25]) detector_dict1['ronoise'] = np.atleast_1d([4.47]) @@ -1321,7 +1321,7 @@ def get_detector_par(self, det, hdu=None): elif namps == 4: pass else: - msgs.error("Did not see this namps coming..") + raise PypeItError("Did not see this namps coming..") # Return return detector @@ -1343,11 +1343,11 @@ def check_spectrograph(self, filename): # last day of keck_lris_red_orig date_orig = time.Time('2009-05-02', format='iso') if _dateobs <= date_orig and self.name in ['keck_lris_red_mark4', 'keck_lris_red']: - msgs.error('This is not the correct spectrograph. Use keck_lris_red_orig instead.') + raise PypeItError('This is not the correct spectrograph. Use keck_lris_red_orig instead.') elif _dateobs >= date_mark4 and self.name in ['keck_lris_red_orig', 'keck_lris_red']: - msgs.error('This is not the correct spectrograph. Use keck_lris_red_mark4 instead.') + raise PypeItError('This is not the correct spectrograph. Use keck_lris_red_mark4 instead.') elif date_orig < _dateobs < date_mark4 and self.name in ['keck_lris_red_orig', 'keck_lris_red_mark4']: - msgs.error('This is not the correct spectrograph. Use keck_lris_red instead.') + raise PypeItError('This is not the correct spectrograph. Use keck_lris_red instead.') @classmethod def default_pypeit_par(cls): @@ -1632,7 +1632,7 @@ def get_detector_par(self, det, hdu=None): date = time.Time(hdu[0].header['MJD'], format='mjd') if date < t2021_upgrade: - msgs.error("This is not the Mark4 detector. Use a different keck_lris_red spectrograph") + raise PypeItError("This is not the Mark4 detector. Use a different keck_lris_red spectrograph") # Deal with the intermediate headers if date < t_gdhead: @@ -1668,7 +1668,7 @@ def get_detector_par(self, det, hdu=None): 1.67*1.0052]) # U2 detector_dict1['ronoise'] = np.atleast_1d([3.64, 3.45, 3.65, 3.52]) else: - msgs.error("Did not see this namps coming..") + raise PypeItError("Did not see this namps coming..") detector_dict1['datasec'] = [] detector_dict1['oscansec'] = [] @@ -1873,7 +1873,7 @@ def get_detector_par(self, det, hdu=None): # Deal with number of amps if hdu is not None and hdu[0].header['NUMAMPS'] != 2: - msgs.error("Did not see this namps coming..") + raise PypeItError("Did not see this namps coming..") # Return return detector @@ -2011,10 +2011,10 @@ def lris_read_amp(inp, ext): #data = temp[xdata1-1:xdata2-1,*] #data = temp[xdata1:xdata2+1, :] if (xdata1-1) != precol: - msgs.error("Something wrong in LRIS datasec or precol") + raise PypeItError("Something wrong in LRIS datasec or precol") xshape = 1024 // xbin * (4//n_ext) # Allow for single amp if (xshape+precol+postpix) != temp.shape[0]: - msgs.warn("Unexpected size for LRIS detector. We expect you did some windowing...") + msgs.warning("Unexpected size for LRIS detector. We expect you did some windowing...") xshape = temp.shape[0] - precol - postpix data = temp[precol:precol+xshape,:] postdata = temp[nxt-postpix:nxt, :] diff --git a/pypeit/spectrographs/keck_mosfire.py b/pypeit/spectrographs/keck_mosfire.py index 1c79fe7f01..9f31ae096b 100644 --- a/pypeit/spectrographs/keck_mosfire.py +++ b/pypeit/spectrographs/keck_mosfire.py @@ -346,7 +346,7 @@ def compound_meta(self, headarr, meta_key): elif PWSTATA7 == 1 or PWSTATA8 == 1: return 'arclamp' else: - msgs.warn('Header keyword FLATSPEC, PWSTATA7, or PWSTATA8 may not exist') + msgs.warning('Header keyword FLATSPEC, PWSTATA7, or PWSTATA8 may not exist') return 'unknown' if meta_key == 'lampstat01': if headarr[0].get('PWSTATA7') == 1 or headarr[0].get('PWSTATA8') == 1: @@ -367,7 +367,7 @@ def compound_meta(self, headarr, meta_key): else: return 0.0 else: - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def configuration_keys(self): """ @@ -716,7 +716,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): is_arc = fitstbl['idname'] == 'arclamp' is_obj = (fitstbl['lampstat01'] == 'off') & (fitstbl['idname'] == 'object') & ('long2pos_specphot' not in fitstbl['decker']) return good_exp & (is_arc | is_obj) - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) # TODO: Is this supposed to be deprecated in favor of get_comb_group? @@ -924,7 +924,7 @@ def get_slitmask(self, filename): if (numslits.sum() != self._CSUnumslits()) and ('LONGSLIT' not in self.get_meta_value(filename, 'decker')) \ and ('long2pos' not in self.get_meta_value(filename, 'decker')): - msgs.error('The number of allocated CSU slits does not match the number of possible slits. ' + raise PypeItError('The number of allocated CSU slits does not match the number of possible slits. ' 'Slitmask design matching not possible. Turn parameter `use_maskdesign` off') targ_dist_center = np.array(ssl['Target_to_center_of_slit_distance'], dtype=float) @@ -1021,10 +1021,10 @@ def get_maskdef_slitedges(self, ccdnum=None, filename=None, debug=None, if filename is not None: self.get_slitmask(filename) else: - msgs.error('The name of a science file should be provided') + raise PypeItError('The name of a science file should be provided') if self.slitmask is None: - msgs.error('Unable to read slitmask design info. Provide a file.') + raise PypeItError('Unable to read slitmask design info. Provide a file.') platescale = self.get_detector_par(det=1)['platescale'] slit_gap = self._slit_gap(platescale) diff --git a/pypeit/spectrographs/keck_nires.py b/pypeit/spectrographs/keck_nires.py index b410a10d25..9144ff6d8a 100644 --- a/pypeit/spectrographs/keck_nires.py +++ b/pypeit/spectrographs/keck_nires.py @@ -204,7 +204,7 @@ def compound_meta(self, headarr, meta_key): else: return None else: - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def configuration_keys(self): """ diff --git a/pypeit/spectrographs/keck_nirspec.py b/pypeit/spectrographs/keck_nirspec.py index 568046d652..c461edd477 100644 --- a/pypeit/spectrographs/keck_nirspec.py +++ b/pypeit/spectrographs/keck_nirspec.py @@ -301,7 +301,7 @@ def config_specific_par(self, scifile, inp_par=None): # wavelength calibration supported_filters = ['NIRSPEC-1', 'NIRSPEC-3', 'NIRSPEC-5', 'NIRSPEC-7', 'Kband-new', 'KL'] if (self.filter1 not in supported_filters) and (self.filter2 not in supported_filters): - msgs.warn(f'Filter {self.filter1} or {self.filter2} may not be supported!!') + msgs.warning(f'Filter {self.filter1} or {self.filter2} may not be supported!!') if self.filter1 == 'Kband-new' or self.filter2 == 'NIRSPEC-7': par['calibrations']['wavelengths']['n_final'] = 3 @@ -498,7 +498,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): is_obj = self.lamps(fitstbl, 'off') & (hatch == 'Out') good_exp[is_obj] = fitstbl['exptime'].data[is_obj] > 60.0 return good_exp & (is_arc | is_obj) - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def lamps(self, fitstbl, status): @@ -717,7 +717,7 @@ def config_specific_par(self, scifile, inp_par=None): # wavelength calibration supported_filters = ['NIRSPEC-1', 'NIRSPEC-3', 'NIRSPEC-5', 'NIRSPEC-7', 'KL'] if (self.filter1 not in supported_filters) and (self.filter2 not in supported_filters): - msgs.warn(f'Filter {self.filter1} or {self.filter2} may not be supported!!') + msgs.warning(f'Filter {self.filter1} or {self.filter2} may not be supported!!') if self.filter2 == 'NIRSPEC-7': par['calibrations']['wavelengths']['n_final'] = 3 @@ -936,7 +936,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): is_obj = self.lamps(fitstbl, 'off') & (hatch == '0') good_exp[is_obj] = fitstbl['exptime'].data[is_obj] > 60.0 return good_exp & (is_arc | is_obj) - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def lamps(self, fitstbl, status): @@ -1086,7 +1086,7 @@ def get_rawimage(self, raw_file, det): #if raw_img[i].ndim != 2: # raw_img[i] = np.squeeze(raw_img[i]) if raw_img[i].ndim != 2: - msgs.error(f"Raw images must be 2D; check extension {detectors[i]['dataext']} " + raise PypeItError(f"Raw images must be 2D; check extension {detectors[i]['dataext']} " f"of {raw_file}.") for section in ['datasec', 'oscansec']: @@ -1128,7 +1128,7 @@ def get_rawimage(self, raw_file, det): return detectors[0], raw_img[0], hdu, exptime, rawdatasec_img[0], oscansec_img[0] if any([img.shape != raw_img[0].shape for img in raw_img[1:]]): - msgs.error('All raw images in a mosaic must have the same shape.') + raise PypeItError('All raw images in a mosaic must have the same shape.') # Return all images for mosaic return mosaic, np.array(raw_img), hdu, exptime, np.array(rawdatasec_img), np.array(oscansec_img) @@ -1366,7 +1366,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): is_obj = self.lamps(fitstbl, 'off') & (hatch == 0) \ & (fitstbl['idname'] == 'object') return good_exp & (is_arc | is_obj) - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def lamps(self, fitstbl, status): diff --git a/pypeit/spectrographs/lbt_luci.py b/pypeit/spectrographs/lbt_luci.py index 084f8e1800..3a5a0d765b 100644 --- a/pypeit/spectrographs/lbt_luci.py +++ b/pypeit/spectrographs/lbt_luci.py @@ -152,7 +152,7 @@ def compound_meta(self, headarr, meta_key): return 'dark' else: - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def configuration_keys(self): """ @@ -245,7 +245,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return (good_exp & ((fitstbl['idname'] == 'object') | (fitstbl['idname'] == 'arc'))) - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) # Detector information from official LBT LUCI website @@ -302,7 +302,7 @@ def get_detector_par(self, det, hdu=None): elif readmode == 'MER': ronoise = np.atleast_1d(5.1) else: - msgs.error("Read mode not recognized (options: LIR, MER)") + raise PypeItError("Read mode not recognized (options: LIR, MER)") camera = self.get_meta_value(self.get_headarr(hdu), 'camera') if camera == 'N1.8 Camera': @@ -312,7 +312,7 @@ def get_detector_par(self, det, hdu=None): elif camera == 'N30 Camera': # currently untested but should work in principle platescale = 0.0150 else: - msgs.error("Camera not recognized (options: N1.8, N3.75, N30)") + raise PypeItError("Camera not recognized (options: N1.8, N3.75, N30)") # Detector 1 detector_dict = dict( @@ -542,7 +542,7 @@ def get_detector_par(self, det, hdu=None): elif readmode == 'MER': ronoise = np.atleast_1d(4.5) else: - msgs.error("Read mode not recognized (options: LIR, MER)") + raise PypeItError("Read mode not recognized (options: LIR, MER)") raise ValueError() camera = self.get_meta_value(self.get_headarr(hdu), 'camera') @@ -553,7 +553,7 @@ def get_detector_par(self, det, hdu=None): elif camera == 'N30 Camera': # currently untested but should work in principle platescale = 0.0150 else: - msgs.error("Camera not recognized (options: N1.8, N3.75, N30)") + raise PypeItError("Camera not recognized (options: N1.8, N3.75, N30)") # Detector 1 detector_dict = dict( diff --git a/pypeit/spectrographs/lbt_mods.py b/pypeit/spectrographs/lbt_mods.py index d0aade80a4..714e04a49c 100644 --- a/pypeit/spectrographs/lbt_mods.py +++ b/pypeit/spectrographs/lbt_mods.py @@ -103,7 +103,7 @@ def compound_meta(self, headarr, meta_key): binspatial, binspec = parse.parse_binning(np.array([headarr[0]['CCDXBIN'], headarr[0]['CCDYBIN']])) binning = parse.binning2string(binspatial, binspec) return binning - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def configuration_keys(self): """ @@ -182,7 +182,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['idname'] == 'COMP') & (fitstbl['dispname'] != 'Flat') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def get_rawimage(self, raw_file, det): diff --git a/pypeit/spectrographs/ldt_deveny.py b/pypeit/spectrographs/ldt_deveny.py index a71f057637..bd99b7a0d5 100644 --- a/pypeit/spectrographs/ldt_deveny.py +++ b/pypeit/spectrographs/ldt_deveny.py @@ -179,9 +179,9 @@ def compound_meta(self, headarr:list, meta_key:str) -> object: "600/6750":"DV7", "831/8000":"DV8", "1200/5000":"DV9", "2160/5000":"DV10", "UNKNOWN":"DVxx"} if (grating_kwd := headarr[0]['GRATING']) not in gratings: - msgs.error(f"Grating value {grating_kwd} not recognized.") + raise PypeItError(f"Grating value {grating_kwd} not recognized.") if grating_kwd == "UNKNOWN": - msgs.warn(f"Grating not selected in the LOUI; {msgs.newline()}" + msgs.warning(f"Grating not selected in the LOUI; {msgs.newline()}" "Fix the header keyword GRATING before proceeding.") return f"{gratings[grating_kwd]} ({grating_kwd})" @@ -201,7 +201,7 @@ def compound_meta(self, headarr:list, meta_key:str) -> object: # Extract lines/mm, catch 'UNKNOWN' grating if (grating_kwd := headarr[0]["GRATING"]) == "UNKNOWN": lpmm = np.inf - msgs.warn(f"Grating angle not selected in the LOUI; {msgs.newline()}" + msgs.warning(f"Grating angle not selected in the LOUI; {msgs.newline()}" "Fix the header keyword GRANGLE before proceeding.") else: lpmm = float(grating_kwd.split("/")[0]) @@ -229,7 +229,7 @@ def compound_meta(self, headarr:list, meta_key:str) -> object: else headarr[0]["OBJNAME"].strip() ) - msgs.error(f"Not ready for compound meta {meta_key} for LDT/DeVeny") + raise PypeItError(f"Not ready for compound meta {meta_key} for LDT/DeVeny") def configuration_keys(self): """ @@ -413,7 +413,7 @@ def check_frame_type(self, ftype:str, fitstbl:Table, exprng=None): if ftype in ['pinhole', 'align', 'sky', 'lampoffflats', 'scattlight']: # DeVeny doesn't have any of these types of frames return np.zeros(len(fitstbl), dtype=bool) - msgs.warn(f"Cannot determine if frames are of type {ftype}") + msgs.warning(f"Cannot determine if frames are of type {ftype}") return np.zeros(len(fitstbl), dtype=bool) def pypeit_file_keys(self): @@ -614,7 +614,7 @@ def calc_pattern_freq(self, frame, rawdatasec_img, oscansec_img, hdu): patt_freqs : :obj:`list` List of pattern frequencies. """ - msgs.error(f"Pattern noise removal is not yet implemented for spectrograph {self.name}") + raise PypeItError(f"Pattern noise removal is not yet implemented for spectrograph {self.name}") return [] def tweak_standard(self, wave_in, counts_in, counts_ivar_in, gpm_in, meta_table, diff --git a/pypeit/spectrographs/magellan_fire.py b/pypeit/spectrographs/magellan_fire.py index b6b27f8aa3..c8e83da8a7 100644 --- a/pypeit/spectrographs/magellan_fire.py +++ b/pypeit/spectrographs/magellan_fire.py @@ -240,7 +240,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & (fitstbl['idname'] == 'Science') if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['idname'] == 'Science') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @property @@ -459,6 +459,6 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & (fitstbl['idname'] == 'Science') if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['idname'] == 'Arc') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/spectrographs/magellan_mage.py b/pypeit/spectrographs/magellan_mage.py index c7916399d8..6b81620996 100644 --- a/pypeit/spectrographs/magellan_mage.py +++ b/pypeit/spectrographs/magellan_mage.py @@ -192,7 +192,7 @@ def compound_meta(self, headarr, meta_key): ttime = Time(time, format='isot') return ttime.mjd else: - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def configuration_keys(self): """ diff --git a/pypeit/spectrographs/mdm_modspec.py b/pypeit/spectrographs/mdm_modspec.py index 25f0f78077..d0654ee6e2 100644 --- a/pypeit/spectrographs/mdm_modspec.py +++ b/pypeit/spectrographs/mdm_modspec.py @@ -73,7 +73,7 @@ def get_detector_par(self, det, hdu=None): binning = self.compound_meta(self.get_headarr(hdu), 'binning') if binning != '1,1': - msgs.error("Not ready for any binning except 1x1; contact the developers") + raise PypeItError("Not ready for any binning except 1x1; contact the developers") # Detector 1 continued detector_dict = dict( @@ -219,7 +219,7 @@ def compound_meta(self, headarr, meta_key): if meta_key == 'cenwave': return 5100.0 else: - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def configuration_keys(self): """ @@ -285,7 +285,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['illumflat', 'trace']: # Twilight Flats return good_exp & (fitstbl['idname'] == 'Flat') & (fitstbl['mirror'] == 'OUT') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) \ No newline at end of file diff --git a/pypeit/spectrographs/mdm_osmos.py b/pypeit/spectrographs/mdm_osmos.py index 14fb845f11..14f81e2519 100644 --- a/pypeit/spectrographs/mdm_osmos.py +++ b/pypeit/spectrographs/mdm_osmos.py @@ -147,7 +147,7 @@ def compound_meta(self, headarr, meta_key): binspec = headarr[0]['CCDYBIN'] return parse.binning2string(binspec, binspatial) else: - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def configuration_keys(self): """ @@ -231,7 +231,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return np.zeros(len(fitstbl), dtype=bool) if ftype in ['arc','tilt']: return good_exp & np.array([ilamp in ['Ar','Xe'] for ilamp in fitstbl['lampstat01']]) & (fitstbl['idname'] == 'COMP') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @@ -324,7 +324,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return np.zeros(len(fitstbl), dtype=bool) if ftype in ['arc','tilt']: return good_exp & (fitstbl['idname'] == 'COMP') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @classmethod diff --git a/pypeit/spectrographs/mmt_binospec.py b/pypeit/spectrographs/mmt_binospec.py index 90e178b658..c6fdd78522 100644 --- a/pypeit/spectrographs/mmt_binospec.py +++ b/pypeit/spectrographs/mmt_binospec.py @@ -346,7 +346,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['pixelflat', 'trace', 'illumflat']: return good_exp & (fitstbl['lampstat01'] == 'off') & (fitstbl['lampstat02'] == 'deployed') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def get_rawimage(self, raw_file, det): diff --git a/pypeit/spectrographs/mmt_bluechannel.py b/pypeit/spectrographs/mmt_bluechannel.py index 156e8c1f7c..6ecbb51c1c 100644 --- a/pypeit/spectrographs/mmt_bluechannel.py +++ b/pypeit/spectrographs/mmt_bluechannel.py @@ -176,7 +176,7 @@ def compound_meta(self, headarr, meta_key): else: return 'off' - msgs.error(f"Not ready for compound meta, {meta_key}, for MMT Blue Channel.") + raise PypeItError(f"Not ready for compound meta, {meta_key}, for MMT Blue Channel.") def configuration_keys(self): """ @@ -353,7 +353,7 @@ def bpm(self, filename, det, shape=None, msbias=None): bpm_img[-1, :] = 1 else: - msgs.error(f"Invalid detector number, {det}, for MMT Blue Channel (only one detector).") + raise PypeItError(f"Invalid detector number, {det}, for MMT Blue Channel (only one detector).") return bpm_img @@ -459,7 +459,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): # i think the bright lamp, BC, is the only one ever used for this. imagetyp should always be set to flat. return good_exp & (fitstbl['lampstat01'] == 'off') & (fitstbl['target'] == 'skyflat') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def get_rawimage(self, raw_file, det): diff --git a/pypeit/spectrographs/mmt_mmirs.py b/pypeit/spectrographs/mmt_mmirs.py index 4425543c29..a04e9ed4d3 100644 --- a/pypeit/spectrographs/mmt_mmirs.py +++ b/pypeit/spectrographs/mmt_mmirs.py @@ -76,7 +76,7 @@ def compound_meta(self, headarr, meta_key): time = headarr[1]['DATE-OBS'] ttime = Time(time, format='isot') return ttime.mjd - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def raw_header_cards(self): """ @@ -271,7 +271,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & (fitstbl['idname'] == 'object') if ftype == 'dark': return good_exp & (fitstbl['idname'] == 'dark') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def bpm(self, filename, det, shape=None, msbias=None): diff --git a/pypeit/spectrographs/not_alfosc.py b/pypeit/spectrographs/not_alfosc.py index 42589cc3e4..a822acd53b 100644 --- a/pypeit/spectrographs/not_alfosc.py +++ b/pypeit/spectrographs/not_alfosc.py @@ -195,7 +195,7 @@ def compound_meta(self, headarr, meta_key): elif meta_key == 'ra': objra = headarr[0]['OBJRA'] # Given in hours, not deg return objra*15. - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def configuration_keys(self): """ @@ -266,7 +266,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return np.zeros(len(fitstbl), dtype=bool) if ftype in ['arc','tilt']: return good_exp & (fitstbl['idname'] == 'WAVE,LAMP') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def config_specific_par(self, scifile, inp_par=None): @@ -316,7 +316,7 @@ def config_specific_par(self, scifile, inp_par=None): elif self.get_meta_value(scifile, 'dispname') == 'Grism_#20': par['calibrations']['wavelengths']['reid_arxiv'] = 'not_alfosc_grism20.fits' else: - msgs.warn('not_alfosc.py: YOU NEED TO ADD IN THE WAVELENGTH SOLUTION FOR THIS GRISM') + msgs.warning('not_alfosc.py: YOU NEED TO ADD IN THE WAVELENGTH SOLUTION FOR THIS GRISM') # Return return par diff --git a/pypeit/spectrographs/ntt_efosc2.py b/pypeit/spectrographs/ntt_efosc2.py index 8af1a1742e..ace9a1637d 100644 --- a/pypeit/spectrographs/ntt_efosc2.py +++ b/pypeit/spectrographs/ntt_efosc2.py @@ -99,7 +99,7 @@ def compound_meta(self, headarr, meta_key): oscan_x+1*xbin, max_x-1*xbin) # Actually two overscan regions, here I only dealing with the region on x-axis return oscansec else: - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def config_independent_frames(self): """ @@ -351,7 +351,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp & ((fitstbl['target'] == 'WAVE')) - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def bpm(self, filename, det, shape=None, msbias=None): diff --git a/pypeit/spectrographs/opticalmodel.py b/pypeit/spectrographs/opticalmodel.py index 2b94f0e421..4f319aafb0 100644 --- a/pypeit/spectrographs/opticalmodel.py +++ b/pypeit/spectrographs/opticalmodel.py @@ -137,7 +137,7 @@ def reflect(self, r, nslits, wave=None, order=1): """ if wave is None and self.central_wave is None: - msgs.error('Must define a wavelength for the calculation.') + raise PypeItError('Must define a wavelength for the calculation.') if wave is None: msgs.info('Using central wavelength for calculation.') _wave = numpy.array([self.central_wave]) if wave is None else numpy.atleast_1d(wave) diff --git a/pypeit/spectrographs/p200_dbsp.py b/pypeit/spectrographs/p200_dbsp.py index 18f8632401..ffb6025f3a 100644 --- a/pypeit/spectrographs/p200_dbsp.py +++ b/pypeit/spectrographs/p200_dbsp.py @@ -80,7 +80,7 @@ def compound_meta(self, headarr: List[fits.Header], meta_key: str): try: return Angle(headarr[0]['ANGLE'].lower()).deg except Exception as e: - msgs.warn("Could not read dispangle from header:" + msgs.newline() + str(headarr[0]['ANGLE'])) + msgs.warning("Could not read dispangle from header:" + msgs.newline() + str(headarr[0]['ANGLE'])) raise e else: return None @@ -163,7 +163,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return np.zeros(len(fitstbl), dtype=bool) if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['lampstat01'] != '0000000') & (fitstbl['idname'] == 'cal') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def get_rawimage(self, raw_file, det): @@ -214,7 +214,7 @@ def compound_meta(self, headarr: List[fits.Header], meta_key: str): if meta_key == 'binning': binspatial, binspec = headarr[0]['CCDSUM'].split(' ') return parse.binning2string(binspec, binspatial) - msgs.error(f"Not ready for this compound meta: {meta_key}") + raise PypeItError(f"Not ready for this compound meta: {meta_key}") def get_detector_par(self, det: int, hdu: Optional[fits.HDUList] = None): """ @@ -380,7 +380,7 @@ def config_specific_par(self, scifile, inp_par=None): # blue wavelength coverage with a 1200 lines/mm grating is about 1550 A diff = np.abs(best_wv - cen_wv_AA) if diff > 775: - msgs.warn("Closest matching archived wavelength solutions" + msgs.warning("Closest matching archived wavelength solutions" f"differs in central wavelength by {diff:4.0f} A. The" "wavelength solution may be unreliable. If wavelength" "calibration fails, try using the holy grail method by" @@ -390,7 +390,7 @@ def config_specific_par(self, scifile, inp_par=None): "\t\tmethod = holy-grail") par['calibrations']['wavelengths']['reid_arxiv'] = reids[best_wv] except KeyError: - msgs.warn("Your grating " + grating + " doesn't have a template spectrum for the blue arm of DBSP.") + msgs.warning("Your grating " + grating + " doesn't have a template spectrum for the blue arm of DBSP.") else: if grating == '600/4000' and dichroic == 'D55': par['calibrations']['wavelengths']['reid_arxiv'] = 'p200_dbsp_blue_600_4000_d55.fits' @@ -399,7 +399,7 @@ def config_specific_par(self, scifile, inp_par=None): elif grating == '300/3990' and dichroic == 'D55': par['calibrations']['wavelengths']['reid_arxiv'] = 'p200_dbsp_blue_300_3990_d55.fits' else: - msgs.warn("Your grating " + grating + " doesn't have a template spectrum for the blue arm of DBSP.") + msgs.warning("Your grating " + grating + " doesn't have a template spectrum for the blue arm of DBSP.") return par @@ -438,7 +438,7 @@ def compound_meta(self, headarr: List[fits.Header], meta_key: str): binspec, binspatial = headarr[0]['CCDSUM'].split(' ') return parse.binning2string(binspec, binspatial) else: - msgs.error(f"Not ready for this compound meta: {meta_key}") + raise PypeItError(f"Not ready for this compound meta: {meta_key}") def get_detector_par(self, det: int, hdu: Optional[fits.HDUList] = None): """ @@ -610,7 +610,7 @@ def config_specific_par(self, scifile, inp_par=None): # red wavelength coverage with a 1200 lines/mm grating is about 1600 A diff = np.abs(best_wv - cen_wv_AA) if diff > 800: - msgs.warn("Closest matching archived wavelength solutions" + msgs.warning("Closest matching archived wavelength solutions" f"differs in central wavelength by {diff:4.0f} A. The" "wavelength solution may be unreliable. If wavelength" "calibration fails, try using the holy grail method by" @@ -620,14 +620,14 @@ def config_specific_par(self, scifile, inp_par=None): "\t\tmethod = holy-grail") par['calibrations']['wavelengths']['reid_arxiv'] = reids[best_wv] except KeyError: - msgs.warn("Your grating " + grating + " doesn't have a template spectrum for the red arm of DBSP.") + msgs.warning("Your grating " + grating + " doesn't have a template spectrum for the red arm of DBSP.") else: if grating == '316/7500' and dichroic == 'D55': par['calibrations']['wavelengths']['reid_arxiv'] = 'p200_dbsp_red_316_7500_d55.fits' elif grating == '600/10000' and dichroic == 'D55': par['calibrations']['wavelengths']['reid_arxiv'] = 'p200_dbsp_red_600_10000_d55.fits' else: - msgs.warn("Your grating " + grating + " doesn't have a template spectrum for the red arm of DBSP.") + msgs.warning("Your grating " + grating + " doesn't have a template spectrum for the red arm of DBSP.") return par diff --git a/pypeit/spectrographs/p200_ngps.py b/pypeit/spectrographs/p200_ngps.py index 67820ab3fa..6d9390c3f1 100644 --- a/pypeit/spectrographs/p200_ngps.py +++ b/pypeit/spectrographs/p200_ngps.py @@ -146,7 +146,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & (fitstbl['idname'] == 'THAR') # Temporary fix, do not use FEAR arcs - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @@ -218,7 +218,7 @@ def compound_meta(self, headarr: List[fits.Header], meta_key: str): elif meta_key == 'dichroic': return None else: - msgs.error(f"Not ready for this compound meta: {meta_key}") + raise PypeItError(f"Not ready for this compound meta: {meta_key}") def get_detector_par(self, det: int, hdu: Optional[fits.HDUList] = None): @@ -387,7 +387,7 @@ def compound_meta(self, headarr: List[fits.Header], meta_key: str): elif meta_key == 'dichroic': return None else: - msgs.error("Not ready for this compound meta: ", meta_key) + raise PypeItError("Not ready for this compound meta: ", meta_key) def get_detector_par(self, det: int, hdu: Optional[fits.HDUList] = None): diff --git a/pypeit/spectrographs/p200_tspec.py b/pypeit/spectrographs/p200_tspec.py index 641dc2d60e..16b2f23eb4 100644 --- a/pypeit/spectrographs/p200_tspec.py +++ b/pypeit/spectrographs/p200_tspec.py @@ -71,7 +71,7 @@ def compound_meta(self, headarr, meta_key): ttime = Time(time, format='isot') return ttime.mjd else: - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def configuration_keys(self): """ diff --git a/pypeit/spectrographs/shane_kast.py b/pypeit/spectrographs/shane_kast.py index 5d3ebb1a2a..cfbd1dbef3 100644 --- a/pypeit/spectrographs/shane_kast.py +++ b/pypeit/spectrographs/shane_kast.py @@ -101,7 +101,7 @@ def compound_meta(self, headarr, meta_key): time = headarr[0]['DATE'] ttime = Time(time, format='isot') return ttime.mjd - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def configuration_keys(self): """ @@ -153,7 +153,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp & self.lamps(fitstbl, 'arcs')# & (fitstbl['target'] == 'Arcs') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def lamps(self, fitstbl, status): @@ -298,7 +298,7 @@ def config_specific_par(self, scifile, inp_par=None): elif self.get_meta_value(scifile, 'dispname') == '830/3460': # NOT YET TESTED par['calibrations']['wavelengths']['reid_arxiv'] = 'shane_kast_blue_830.fits' else: - msgs.error("NEED TO ADD YOUR GRISM HERE!") + raise PypeItError("NEED TO ADD YOUR GRISM HERE!") # Return return par @@ -421,7 +421,7 @@ def get_detector_par(self, det, hdu=None): # Allow for reading only Amp 2! if x1_1 < 3: - msgs.warn("Only Amp 2 data was written. Ignoring Amp 1") + msgs.warning("Only Amp 2 data was written. Ignoring Amp 1") detector_dict['numamplifiers'] = 1 detector_dict['gain'] = np.atleast_1d(detector_dict['gain'][0]) detector_dict['ronoise'] = np.atleast_1d(detector_dict['ronoise'][0]) diff --git a/pypeit/spectrographs/soar_goodman.py b/pypeit/spectrographs/soar_goodman.py index eda9049174..2d4bc93b5c 100644 --- a/pypeit/spectrographs/soar_goodman.py +++ b/pypeit/spectrographs/soar_goodman.py @@ -80,7 +80,7 @@ def compound_meta(self, headarr, meta_key): ttime = Time(headarr[1]['DATE-OBS'], format='isot') return ttime.mjd else: - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def configuration_keys(self): """ @@ -204,7 +204,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return np.zeros(len(fitstbl), dtype=bool) if ftype in ['arc', 'tilt']: return good_exp & self.lamps(fitstbl, 'arc') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @@ -283,7 +283,7 @@ def get_detector_par(self, det, hdu=None): osec = f"[:,1:{int(col0*2)-2}:]" detector_dict['oscansec'] = np.atleast_1d(osec) else: - msgs.error("Ask the developers to add your binning. Or add it yourself.") + raise PypeItError("Ask the developers to add your binning. Or add it yourself.") # Return return detector_container.DetectorContainer(**detector_dict) diff --git a/pypeit/spectrographs/spectrograph.py b/pypeit/spectrographs/spectrograph.py index 15f5cef41b..3940c03ded 100644 --- a/pypeit/spectrographs/spectrograph.py +++ b/pypeit/spectrographs/spectrograph.py @@ -165,7 +165,7 @@ def __init__(self): self.init_meta() self.validate_metadata() if self.pypeline == 'Echelle' and self.ech_fixed_format is None: - msgs.error('ech_fixed_format must be set for echelle spectrographs') + raise PypeItError('ech_fixed_format must be set for echelle spectrographs') # TODO: Is there a better way to do this? # Validate the instance by checking that the class has defined the @@ -306,7 +306,7 @@ def find_raw_files(cls, root, extension=None): _ext = [extension] if isinstance(extension, str) else extension _ext = [e for e in _ext if e in cls.allowed_extensions] if len(_ext) == 0: - msgs.error(f'{extension} is not or does not include allowed extensions for ' + raise PypeItError(f'{extension} is not or does not include allowed extensions for ' f'{cls.name}; choose from {cls.allowed_extensions}.') return io.files_from_extension(root, extension=_ext) @@ -326,12 +326,12 @@ def _check_extensions(self, filename): try: tsthdr = fits.getheader(_filename, ext=0) except IOError: - msgs.error("Cannot open the file: {0}".format(_filename)) + raise PypeItError("Cannot open the file: {0}".format(_filename)) if 'PIPELINE' in tsthdr and tsthdr['PIPELINE'] == 'PYPEIT': return # Perform the extensions check if not any([_filename.name.endswith(ext) for ext in self.allowed_extensions]): - msgs.error(f'The input file ({_filename.name}) does not have a recognized ' + raise PypeItError(f'The input file ({_filename.name}) does not have a recognized ' f'extension. The allowed extensions for ' f'{self.name} include {",".join(self.allowed_extensions)}.') @@ -437,7 +437,7 @@ def subheader_for_spec(self, row_fitstbl, raw_header, extra_header_cards=None, subheader[key] = (row_fitstbl[key], core_meta[key]['comment']) except KeyError: if not allow_missing: - msgs.error(f"Core Meta Key: {key} not present in your fitstbl/Header") + raise PypeItError(f"Core Meta Key: {key} not present in your fitstbl/Header") # Configuration Keys -- In addition to Core Meta, # other Config-Specific values; optional for key in self.configuration_keys(): @@ -446,7 +446,7 @@ def subheader_for_spec(self, row_fitstbl, raw_header, extra_header_cards=None, subheader[key] = row_fitstbl[key] except KeyError: # If configuration_key is not in row_fitstbl, warn but move on - msgs.warn(f"Configuration Key: {key} not present in your fitstbl/Header") + msgs.warning(f"Configuration Key: {key} not present in your fitstbl/Header") # Add a few more for key in ['filename']: # For fluxing subheader[key] = row_fitstbl[key] @@ -553,7 +553,7 @@ def empty_bpm(self, filename, det, shape=None): # Shape must be defined at this point. if _shape is None: - msgs.error('Must specify shape if filename is None.') + raise PypeItError('Must specify shape if filename is None.') # Generate # TODO: Why isn't this a boolean array? @@ -580,7 +580,7 @@ def bpm_frombias(self, msbias, bpm_img, thresh=10.): """ # Check that the bias has the correct shape if msbias.image.shape != bpm_img.shape: - msgs.error(f'Shape mismatch between processed bias {msbias.image.shape} and expected ' + raise PypeItError(f'Shape mismatch between processed bias {msbias.image.shape} and expected ' f'BPM {bpm_img.shape}.') # Setup nimg = 1 if bpm_img.ndim == 2 else bpm_img.shape[0] @@ -673,7 +673,7 @@ def list_detectors(self, mosaic=False): axis. """ if mosaic and len(self.allowed_mosaics) == 0: - msgs.error(f'Spectrograph {self.name} does not have any defined detector mosaics.') + raise PypeItError(f'Spectrograph {self.name} does not have any defined detector mosaics.') dets = self.allowed_mosaics if mosaic else range(1,self.ndet+1) return np.array([self.get_det_name(det) for det in dets]) @@ -707,7 +707,7 @@ def get_lamps(self, fitstbl): This method is not defined for all spectrographs. This base-class method raises an exception. """ - msgs.error('This spectrograph does not support the use of lamps list from header. ' + raise PypeItError('This spectrograph does not support the use of lamps list from header. ' 'provide a list of lamps using the parameter `lamps` in WavelengthSolutionPar') def get_slitmask(self, filename): @@ -726,7 +726,7 @@ def mask_to_pixel_coordinates(self, x=None, y=None, wave=None, order=1, filename method raises an exception. This may be because ``use_maskdesign`` has been set to True for a spectrograph that does not support it. """ - msgs.error('This spectrograph does not support the use of mask design. ' + raise PypeItError('This spectrograph does not support the use of mask design. ' 'Set `use_maskdesign=False`') def get_maskdef_slitedges(self, ccdnum=None, filename=None, debug=None, @@ -746,7 +746,7 @@ def get_maskdef_slitedges(self, ccdnum=None, filename=None, debug=None, debug (:obj:`bool`, optional): Debug ccdnum (:obj:`int`, optional): detector number """ - msgs.error('This spectrograph does not support the use of mask design. ' + raise PypeItError('This spectrograph does not support the use of mask design. ' 'Set `use_maskdesign=False`') def configuration_keys(self): @@ -794,10 +794,10 @@ def same_configuration(self, configs, check_keys=True): # Check that the relevant keys are in the first configuration for key in cfg_meta: if key not in configs[cfg_id[0]].keys(): - msgs.error(f'Configuration {cfg_id[0]} missing required key, {key}. Cannot ' + raise PypeItError(f'Configuration {cfg_id[0]} missing required key, {key}. Cannot ' 'determine if configurations are the same!') if key not in self.meta.keys(): - msgs.error(f'CODING ERROR: {key} is a configuration key but not defined in ' + raise PypeItError(f'CODING ERROR: {key} is a configuration key but not defined in ' f'the metadata dictionary for {self.__class__.__name__}!') else: cfg_meta = configs[cfg_id[0]].keys() @@ -807,7 +807,7 @@ def same_configuration(self, configs, check_keys=True): matched = [] for key in cfg_meta: if key not in configs[_cfg_id].keys(): - msgs.error(f'Configuration {_cfg_id} missing required key, {key}. Cannot ' + raise PypeItError(f'Configuration {_cfg_id} missing required key, {key}. Cannot ' 'determine if configurations are the same!') # TODO: Instead check if 'rtol' exists and is not None? if isinstance(configs[cfg_id[0]][key], (float, np.floating)) \ @@ -1037,12 +1037,12 @@ def get_det_name(self, det): if isinstance(det, tuple): # The "detector" is a mosaic. if det not in self.allowed_mosaics: - msgs.error(f'{det} is not an allowed mosaic for {self.name}.') + raise PypeItError(f'{det} is not an allowed mosaic for {self.name}.') return Mosaic.get_name(self.allowed_mosaics.index(det)+1) # Single detector if det <= 0 or det > self.ndet: - msgs.error(f'{det} is not a valid detector for {self.name}.') + raise PypeItError(f'{det} is not a valid detector for {self.name}.') return DetectorContainer.get_name(det) def get_det_id(self, det): @@ -1062,12 +1062,12 @@ def get_det_id(self, det): if isinstance(det, tuple): # The "detector" is a mosaic. if det not in self.allowed_mosaics: - msgs.error(f'{det} is not an allowed mosaic for {self.name}.') + raise PypeItError(f'{det} is not an allowed mosaic for {self.name}.') return self.allowed_mosaics.index(det)+1 # Single detector if det <= 0 or det > self.ndet: - msgs.error(f'{det} is not a valid detector for {self.name}.') + raise PypeItError(f'{det} is not a valid detector for {self.name}.') return det def select_detectors(self, subset=None): @@ -1127,7 +1127,7 @@ def select_detectors(self, subset=None): allowed = np.arange(1, self.ndet+1).tolist() + self.allowed_mosaics if any([s not in allowed for s in _subset]): - msgs.error('Selected detectors or detector mosaics contain invalid values.') + raise PypeItError('Selected detectors or detector mosaics contain invalid values.') # Require the list contains unique items # DP: I had to modify this, because list(set(_subset)) was changing the order of the detectors @@ -1167,10 +1167,10 @@ def validate_det(self, det): """ if isinstance(det, tuple): if det not in self.allowed_mosaics: - msgs.error(f'Selected detectors {det} are not an allowed mosaic for {self.name}.') + raise PypeItError(f'Selected detectors {det} are not an allowed mosaic for {self.name}.') return len(det), det if not isinstance(det, (int, np.integer)): - msgs.error(f'Provided det must have type tuple or integer, not {type(det)}.') + raise PypeItError(f'Provided det must have type tuple or integer, not {type(det)}.') return 1, (det,) def get_rawimage(self, raw_file, det, sec_includes_binning=False): @@ -1279,7 +1279,7 @@ def get_rawimage(self, raw_file, det, sec_includes_binning=False): if raw_img[i].ndim != 2: raw_img[i] = np.squeeze(raw_img[i]) if raw_img[i].ndim != 2: - msgs.error(f"Raw images must be 2D; check extension {detectors[i]['dataext']} " + raise PypeItError(f"Raw images must be 2D; check extension {detectors[i]['dataext']} " f"of {raw_file}.") for section in ['datasec', 'oscansec']: @@ -1311,7 +1311,7 @@ def get_rawimage(self, raw_file, det, sec_includes_binning=False): return detectors[0], raw_img[0], hdu, exptime, rawdatasec_img[0], oscansec_img[0] if any([img.shape != raw_img[0].shape for img in raw_img[1:]]): - msgs.error('All raw images in a mosaic must have the same shape.') + raise PypeItError('All raw images in a mosaic must have the same shape.') # Return all images for mosaic return mosaic, np.array(raw_img), hdu, exptime, np.array(rawdatasec_img), \ np.array(oscansec_img) @@ -1391,11 +1391,11 @@ def get_meta_value(self, inp, meta_key, required=False, elif isinstance(inp, fits.Header): headarr = [inp] else: - msgs.error(f'Unrecognized type for input: {type(inp)}') + raise PypeItError(f'Unrecognized type for input: {type(inp)}') if headarr is None: if required: - msgs.error(f'Unable to access required metadata value for {meta_key}. Input is ' + raise PypeItError(f'Unable to access required metadata value for {meta_key}. Input is ' f'either a bad file or an invalid argument to get_meta_value: {inp}.') return None @@ -1406,9 +1406,9 @@ def get_meta_value(self, inp, meta_key, required=False, # Are we prepared to provide this meta data? if meta_key not in self.meta.keys(): if required: - msgs.error("Need to allow for meta_key={} in your meta data".format(meta_key)) + raise PypeItError("Need to allow for meta_key={} in your meta data".format(meta_key)) else: - msgs.warn("Requested meta data for meta_key={} does not exist...".format(meta_key)) + msgs.warning("Requested meta data for meta_key={} does not exist...".format(meta_key)) return None # Is this meta required for this frame type (Spectrograph specific) @@ -1434,13 +1434,13 @@ def get_meta_value(self, inp, meta_key, required=False, elif 'compound' in self.meta[meta_key].keys(): value = self.compound_meta(headarr, meta_key) else: - msgs.error(f"Failed to load spectrograph value for meta: {meta_key}") + raise PypeItError(f"Failed to load spectrograph value for meta: {meta_key}") else: # Grab from the header, if we can value = headarr[self.meta[meta_key]['ext']][self.meta[meta_key]['card']] except (KeyError, TypeError) as e: if ignore_bad_header or not required: - msgs.warn(f"Bad Header key ({meta_key}), but we'll try to continue on..") + msgs.warning(f"Bad Header key ({meta_key}), but we'll try to continue on..") else: raise e @@ -1455,7 +1455,7 @@ def get_meta_value(self, inp, meta_key, required=False, ra, dec = meta.convert_radec(self.get_meta_value(headarr, 'ra', no_fussing=True), self.get_meta_value(headarr, 'dec', no_fussing=True)) except: - msgs.warn('Encounter invalid value of your coordinates. Give zeros for both RA and DEC') + msgs.warning('Encounter invalid value of your coordinates. Give zeros for both RA and DEC') ra, dec = 0.0, 0.0 value = ra if meta_key == 'ra' else dec @@ -1471,7 +1471,7 @@ def get_meta_value(self, inp, meta_key, required=False, retvalue = float(value) elif self.meta_data_model[meta_key]['dtype'] == tuple: if not isinstance(value, tuple): - msgs.error('dtype for {0} is tuple, but value '.format(meta_key) + raise PypeItError('dtype for {0} is tuple, but value '.format(meta_key) + 'provided is {0}. Casting is not possible.'.format(type(value))) retvalue = value castable = True @@ -1497,10 +1497,10 @@ def get_meta_value(self, inp, meta_key, required=False, kerror = True # Bomb out? if kerror: - msgs.error('Required meta "{0}" did not load!'.format(meta_key) + raise PypeItError('Required meta "{0}" did not load!'.format(meta_key) + 'You may have a corrupt header.') else: - msgs.warn('Required card {0} missing '.format(self.meta[meta_key]['card']) + msgs.warning('Required card {0} missing '.format(self.meta[meta_key]['card']) + 'from your header. Proceeding with risk...') return None @@ -1536,7 +1536,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): Returns: `astropy.wcs.WCS`_: The world-coordinate system. """ - msgs.warn("No WCS setup for spectrograph: {0:s}".format(self.name)) + msgs.warning("No WCS setup for spectrograph: {0:s}".format(self.name)) return None def get_datacube_bins(self, slitlength, minmax, num_wave): @@ -1561,7 +1561,7 @@ def get_datacube_bins(self, slitlength, minmax, num_wave): when constructing a histogram of the spec2d files. The elements are :math:`(x,y,\lambda)`. """ - msgs.warn("No datacube setup for spectrograph: {0:s}".format(self.name)) + msgs.warning("No datacube setup for spectrograph: {0:s}".format(self.name)) return None def fit_2d_det_response(self, det_resp, gpmask): @@ -1577,7 +1577,7 @@ def fit_2d_det_response(self, det_resp, gpmask): Returns: `numpy.ndarray`_: A model fit to the detector response. """ - msgs.warn("2D detector response is not implemented for spectrograph: {0:s}".format(self.name)) + msgs.warning("2D detector response is not implemented for spectrograph: {0:s}".format(self.name)) return np.ones_like(det_resp) def validate_metadata(self): @@ -1597,20 +1597,20 @@ def validate_metadata(self): core_keys = np.array(list(core_meta.keys())) indx = np.invert(np.isin(core_keys, list(self.meta.keys()))) if np.any(indx): - msgs.error('Required keys {0} not defined by spectrograph!'.format(core_keys[indx])) + raise PypeItError('Required keys {0} not defined by spectrograph!'.format(core_keys[indx])) # Check for rtol for config keys that are type float config_keys = np.array(self.configuration_keys()) indx = ['rtol' not in self.meta[key].keys() if self.meta_data_model[key]['dtype'] == float else False for key in config_keys] if np.any(indx): - msgs.error('rtol not set for {0} keys in spectrograph meta!'.format(config_keys[indx])) + raise PypeItError('rtol not set for {0} keys in spectrograph meta!'.format(config_keys[indx])) # Now confirm all meta are in the data model meta_keys = np.array(list(self.meta.keys())) indx = np.invert(np.isin(meta_keys, list(self.meta_data_model.keys()))) if np.any(indx): - msgs.error('Meta data keys {0} not in metadata model'.format(meta_keys[indx])) + raise PypeItError('Meta data keys {0} not in metadata model'.format(meta_keys[indx])) def get_headarr(self, inp, strict=True): """ @@ -1642,15 +1642,15 @@ def get_headarr(self, inp, strict=True): hdul = io.fits_open(inp) except: if strict: - msgs.error(f'Cannot open {inp}.') + raise PypeItError(f'Cannot open {inp}.') else: - msgs.warn(f'Cannot open {inp}. Proceeding, but consider removing this file!') + msgs.warning(f'Cannot open {inp}. Proceeding, but consider removing this file!') return None elif isinstance(inp, (list, fits.HDUList)): # TODO: If a list, check that the list elements are HDUs? hdul = inp else: - msgs.error(f'Input to get_headarr has incorrect type: {type(inp)}.') + raise PypeItError(f'Input to get_headarr has incorrect type: {type(inp)}.') return [hdu.header for hdu in hdul] def check_frame_type(self, ftype, fitstbl, exprng=None): @@ -1707,9 +1707,9 @@ def vet_assigned_ftypes(self, type_bits, fitstbl): indx = fitstbl.type_bitmask.flagged(type_bits, flag='standard') & \ fitstbl.type_bitmask.flagged(type_bits, flag='science') if np.any(indx): - msgs.warn('Some frames are assigned both science and standard types. Choosing the most likely type.') + msgs.warning('Some frames are assigned both science and standard types. Choosing the most likely type.') if 'ra' not in fitstbl.keys() or 'dec' not in fitstbl.keys(): - msgs.warn('Sky coordinates are not available. Standard stars cannot be identified.') + msgs.warning('Sky coordinates are not available. Standard stars cannot be identified.') # turn off the standard flag for all frames type_bits[indx] = fitstbl.type_bitmask.turn_off(type_bits[indx], flag='standard') return type_bits @@ -1717,7 +1717,7 @@ def vet_assigned_ftypes(self, type_bits, fitstbl): none_coords = indx & ((fitstbl['ra'] == 'None') | (fitstbl['dec'] == 'None') | np.isnan(fitstbl['ra']) | np.isnan(fitstbl['dec'])) if np.any(none_coords): - msgs.warn('The following frames have None coordinates. ' + msgs.warning('The following frames have None coordinates. ' 'They could be a twilight flat frame that was missed by the automatic identification') [msgs.prindent(f) for f in fitstbl['filename'][none_coords]] # turn off the standard star flag for these frames @@ -1785,7 +1785,7 @@ def get_echelle_angle_files(self): Returns: list: List of files """ - msgs.error(f'Echelle angle files not ready for {self.name}') + raise PypeItError(f'Echelle angle files not ready for {self.name}') def order_platescale(self, order_vec, binning=None): """ @@ -1819,7 +1819,7 @@ def check_disperser(self): Ensure that the disperser is defined. """ if self.dispname is None: - msgs.error('Disperser used for observations is required. Reinit with an example ' + raise PypeItError('Disperser used for observations is required. Reinit with an example ' 'science frame.') @property @@ -1918,7 +1918,7 @@ def spec1d_match_spectra(self, sobjs): :obj:`tuple`: Arrays that provide the indices of slits matched across multiple detectors. """ - msgs.error(f'Method to match slits across detectors not defined for {self.name}') + raise PypeItError(f'Method to match slits across detectors not defined for {self.name}') def tweak_standard(self, wave_in, counts_in, counts_ivar_in, gpm_in, meta_table, trim_std_pixs=None, log10_blaze_function=None): @@ -1971,7 +1971,7 @@ def tweak_standard(self, wave_in, counts_in, counts_ivar_in, gpm_in, meta_table, if trim_std_pixs is not None: # make sure that the trim_pixs is a list of 2 integers if not isinstance(trim_std_pixs, (list, tuple)) or len(trim_std_pixs) != 2: - msgs.error("trim_std_pixs must be a list or tuple of two integers.") + raise PypeItError("trim_std_pixs must be a list or tuple of two integers.") # Mask the first and last trim_std_pixs pixels s = int(trim_std_pixs[0]) e = int(trim_std_pixs[1]) @@ -2015,7 +2015,7 @@ def calc_pattern_freq(self, frame, rawdatasec_img, oscansec_img, hdu): patt_freqs : :obj:`list` List of pattern frequencies. """ - msgs.warn(f"Pattern noise removal is not implemented for spectrograph {self.name}") + msgs.warning(f"Pattern noise removal is not implemented for spectrograph {self.name}") return [] def scattered_light_archive(self, binning, dispname): @@ -2040,7 +2040,7 @@ def scattered_light_archive(self, binning, dispname): # Grab the binning for convenience specbin, spatbin = parse.parse_binning(binning) - msgs.warn(f"Initial scattered light model parameters have not been setup for grating {dispname} of {self.name}") + msgs.warning(f"Initial scattered light model parameters have not been setup for grating {dispname} of {self.name}") x0 = np.array([200/specbin, 100/spatbin, # Gaussian kernel widths 200/specbin, 100/spatbin, # Lorentzian kernel widths 0.0/specbin, 0.0/spatbin, # pixel offsets diff --git a/pypeit/spectrographs/subaru_focas.py b/pypeit/spectrographs/subaru_focas.py index a3d3314405..7a479d8f66 100644 --- a/pypeit/spectrographs/subaru_focas.py +++ b/pypeit/spectrographs/subaru_focas.py @@ -143,7 +143,7 @@ def compound_meta(self, headarr, meta_key): binspec = headarr[0]['BIN-FCT2'] # Y return parse.binning2string(binspec, binspatial) else: - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def check_frame_type(self, ftype, fitstbl, exprng=None): """ @@ -176,7 +176,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['idname'] == 'COMPARISON') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @@ -299,7 +299,7 @@ def get_detector_par(self, det, hdu=None): elif chip == '2': return detector_container.DetectorContainer(**detector_dict2) else: - msgs.error(f'Unknown chip: {chip}!') + raise PypeItError(f'Unknown chip: {chip}!') def config_specific_par(self, scifile, inp_par=None): """ @@ -404,7 +404,7 @@ def config_specific_par(self, scifile, inp_par=None): # So, removing them should fix the problem. # ---------------------------- # else: - # msgs.error(f'Not ready for this grism {self.get_meta_value(scifile, "dispname")}') + # raise PypeItError(f'Not ready for this grism {self.get_meta_value(scifile, "dispname")}') return par @@ -562,7 +562,7 @@ def get_rawimage(self, raw_file, det): # collect correct data & overscan for binning in spatial (X) axis oscan_arr = overscan[bin_x][(det - 1)*4:][:4] if len(oscan_arr) != 4: - msgs.error(f'FOCAS detector {det} has an unexpected number of overscan regions: {len(oscan_arr)}. ' + raise PypeItError(f'FOCAS detector {det} has an unexpected number of overscan regions: {len(oscan_arr)}. ' f'Expected 4 (2 for each chip). Please check the overscan definitions in the code.') # fill in rawdatasec_img and oscansec_img arrays according to diff --git a/pypeit/spectrographs/tng_dolores.py b/pypeit/spectrographs/tng_dolores.py index e79883c72b..b6abf6c7cd 100644 --- a/pypeit/spectrographs/tng_dolores.py +++ b/pypeit/spectrographs/tng_dolores.py @@ -125,7 +125,7 @@ def config_specific_par(self, scifile, inp_par=None): par['calibrations']['wavelengths']['lamps'] = ['NeI', 'HgI'] else: par['calibrations']['wavelengths']['method'] = 'holy-grail' - msgs.warn('Check wavelength calibration file.') + msgs.warning('Check wavelength calibration file.') # Return return par @@ -176,7 +176,7 @@ def compound_meta(self, headarr, meta_key): elif meta_key == 'ra': radeg = headarr[0]['RA-RAD']*180.0/np.pi # Convert radians to decimal degrees return radeg - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def configuration_keys(self): """ @@ -248,7 +248,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['idname'] == 'CALIB') & ( (fitstbl['lampstat01'] == 'Ne+Hg') | (fitstbl['lampstat01'] == 'Helium') ) \ & (fitstbl['dispname'] != 'OPEN') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/spectrographs/util.py b/pypeit/spectrographs/util.py index efcac82e23..7cf45e4891 100644 --- a/pypeit/spectrographs/util.py +++ b/pypeit/spectrographs/util.py @@ -53,10 +53,10 @@ def load_spectrograph(spec): spectrograph.dispname = header['DISPNAME'] return spectrograph else: - msgs.error(f'Unknown PYP_SPEC {pyp_spec} found in {spec}') + raise PypeItError(f'Unknown PYP_SPEC {pyp_spec} found in {spec}') else: - msgs.error(f'{spec} did not contain PYP_SPEC in its header') + raise PypeItError(f'{spec} did not contain PYP_SPEC in its header') - msgs.error('{0} is not a supported spectrograph.'.format(spec)) + raise PypeItError('{0} is not a supported spectrograph.'.format(spec)) diff --git a/pypeit/spectrographs/vlt_fors.py b/pypeit/spectrographs/vlt_fors.py index dd327fedc4..a7f1f16afd 100644 --- a/pypeit/spectrographs/vlt_fors.py +++ b/pypeit/spectrographs/vlt_fors.py @@ -137,9 +137,9 @@ def compound_meta(self, headarr, meta_key): # This is for the bias frames return None else: - msgs.error(f"PypeIt does not currently support VLT/FORS2 '{mode}' data reduction.") + raise PypeItError(f"PypeIt does not currently support VLT/FORS2 '{mode}' data reduction.") else: - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def configuration_keys(self): """ @@ -203,7 +203,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & ((fitstbl['target'] == 'LAMP,WAVE') | (fitstbl['target'] == 'WAVE,LAMP')) - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @@ -299,7 +299,7 @@ def get_detector_par(self, det, hdu=None): elif chip == 'CHIP2': return detector_container.DetectorContainer(**detector_dict2) else: - msgs.error(f'Unknown chip: {chip}!') + raise PypeItError(f'Unknown chip: {chip}!') def config_specific_par(self, scifile, inp_par=None): """ @@ -436,7 +436,7 @@ def parse_dither_pattern(self, file_list, ext=None): ra, dec = meta.convert_radec(self.get_meta_value(hdr, 'ra', no_fussing=True), self.get_meta_value(hdr, 'dec', no_fussing=True)) except: - msgs.warn('Encounter invalid value of your coordinates. Give zeros for both RA and DEC. Check that this does not cause problems with the offsets') + msgs.warning('Encounter invalid value of your coordinates. Give zeros for both RA and DEC. Check that this does not cause problems with the offsets') ra, dec = 0.0, 0.0 if ifile == 0: coord_ref = SkyCoord(ra*units.deg, dec*units.deg) @@ -454,7 +454,7 @@ def parse_dither_pattern(self, file_list, ext=None): u_hat_this = np.array([ra_off.to('arcsec').value/separation, dec_off.to('arcsec').value/separation]) dot_product = np.dot(u_hat_slit, u_hat_this) if not np.isclose(np.abs(dot_product),1.0, atol=1e-2): - msgs.error('The slit appears misaligned with the angle between the coordinates: dot_product={:7.5f}'.format(dot_product) + msgs.newline() + + raise PypeItError('The slit appears misaligned with the angle between the coordinates: dot_product={:7.5f}'.format(dot_product) + msgs.newline() + 'The position angle in the headers {:5.3f} differs from that computed from the coordinates {:5.3f}'.format(posang_this, posang_ref)) offset_arcsec[ifile] = separation*np.sign(dot_product) diff --git a/pypeit/spectrographs/vlt_sinfoni.py b/pypeit/spectrographs/vlt_sinfoni.py index df29ac8653..126fd1e2c2 100644 --- a/pypeit/spectrographs/vlt_sinfoni.py +++ b/pypeit/spectrographs/vlt_sinfoni.py @@ -212,7 +212,7 @@ def compound_meta(self, headarr, meta_key): return None return decker else: - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def configuration_keys(self): """ @@ -313,7 +313,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['sky']: return good_exp & (fitstbl['idname'] == 'SINFONI_IFS_SKY') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/spectrographs/vlt_xshooter.py b/pypeit/spectrographs/vlt_xshooter.py index 9160f834e3..c82e427fcf 100644 --- a/pypeit/spectrographs/vlt_xshooter.py +++ b/pypeit/spectrographs/vlt_xshooter.py @@ -81,7 +81,7 @@ def compound_meta(self, headarr, meta_key): else: binspec = 1 return parse.binning2string(binspec, binspatial) - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def config_independent_frames(self): """ @@ -193,7 +193,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['target'] == 'LAMP,WAVE') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @@ -426,7 +426,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp & ((fitstbl['target'] == 'LAMP,WAVE') | (fitstbl['target'] == 'SCIENCE')) - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def bpm(self, filename, det, shape=None, msbias=None): diff --git a/pypeit/spectrographs/wht_isis.py b/pypeit/spectrographs/wht_isis.py index 7464e459b8..cb1c8765b1 100644 --- a/pypeit/spectrographs/wht_isis.py +++ b/pypeit/spectrographs/wht_isis.py @@ -67,7 +67,7 @@ def compound_meta(self, headarr, meta_key): binspatial = headarr[0]['CCDXBIN'] binspec = headarr[0]['CCDYBIN'] return parse.binning2string(binspec, binspatial) - msgs.error("Not ready for this compound meta") + raise PypeItError("Not ready for this compound meta") def configuration_keys(self): """ @@ -262,7 +262,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return np.zeros(len(fitstbl), dtype=bool) if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['lampstat01'] == 'CuNe+CuAr') & (fitstbl['idname'] == 'arc') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @@ -408,7 +408,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return np.zeros(len(fitstbl), dtype=bool) if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['lampstat01'] == 'CuNe+CuAr') & (fitstbl['idname'] == 'arc') - msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/specutils/pypeit_loaders.py b/pypeit/specutils/pypeit_loaders.py index 17f773f7de..eebf118a3f 100644 --- a/pypeit/specutils/pypeit_loaders.py +++ b/pypeit/specutils/pypeit_loaders.py @@ -79,12 +79,12 @@ def _enforce_monotonic_wavelengths(wave, flux, ivar, strict=True): if strict: # Wavelengths are not monotonic, but the user expects them to be, so # fault. - msgs.error('Wavelengths are not monotonically increasing! Circumvent this fault by ' + raise PypeItError('Wavelengths are not monotonically increasing! Circumvent this fault by ' 'setting strict=False, but BEWARE that this is likely the result of an ' 'error in the data reduction!') # Wavelengths are not monotonic, but the user wants to keep going. - msgs.warn('Wavelengths are not monotonically increasing! Strict was set to False, so ' + msgs.warning('Wavelengths are not monotonically increasing! Strict was set to False, so ' 'measurements after a negative step in wavelength are removed from the constructed ' 'spectrum. BEWARE that this is likely the result of an error in the data ' 'reduction!') @@ -191,7 +191,7 @@ def pypeit_spec1d_loader(filename, extract=None, fluxed=True, strict=True, chk_v sobjs = specobjs.SpecObjs.from_fitsfile(filename, chk_version=chk_version) except PypeItError: file_pypeit_version = astropy.io.fits.getval(filename, 'VERSPYP', 'PRIMARY') - msgs.error(f'Unable to ingest {filename.name} using pypeit.specobjs module from your version ' + raise PypeItError(f'Unable to ingest {filename.name} using pypeit.specobjs module from your version ' f'of PypeIt ({__version__}). The version used to write the file is ' f'{file_pypeit_version}. If these are different, you may need to re-reduce ' 'your data using your current PypeIt version or install the matching version ' @@ -204,9 +204,9 @@ def pypeit_spec1d_loader(filename, extract=None, fluxed=True, strict=True, chk_v _wave, _flux, _ivar, _gpm = sobj.get_box_ext(fluxed=_cal) if _ext == 'BOX' \ else sobj.get_opt_ext(fluxed=_cal) if not np.all(_gpm): - msgs.warn(f'Ignoring {np.sum(np.logical_not(_gpm))} masked pixels.') + msgs.warning(f'Ignoring {np.sum(np.logical_not(_gpm))} masked pixels.') if not np.any(_gpm): - msgs.warn(f'Spectrum {sobj.NAME} is fully masked and will be ignored!') + msgs.warning(f'Spectrum {sobj.NAME} is fully masked and will be ignored!') continue _wave, _flux, _ivar = _enforce_monotonic_wavelengths(_wave[_gpm], _flux[_gpm], _ivar[_gpm], strict=strict) @@ -261,7 +261,7 @@ def pypeit_onespec_loader(filename, grid=False, strict=True, chk_version=True, * spec = onespec.OneSpec.from_file(filename, chk_version=chk_version) except PypeItError: file_pypeit_version = astropy.io.fits.getval(filename, 'VERSPYP', 'PRIMARY') - msgs.error(f'Unable to ingest {filename.name} using pypeit.specobjs module from your version ' + raise PypeItError(f'Unable to ingest {filename.name} using pypeit.specobjs module from your version ' f'of PypeIt ({__version__}). The version used to write the file is ' f'{file_pypeit_version}. If these are different, you may need to re-reduce ' 'your data using your current PypeIt version or install the matching version ' @@ -327,5 +327,5 @@ def pypeit_spec1d_loader_nolist(filename, extract=None, fluxed=True, **kwargs): calibration hasn't been performed or ``fluxed=False``, the spectrum is returned in counts. """ - msgs.error(f'The spec1d file {filename.name} cannot be ingested into a Spectrum object.' + raise PypeItError(f'The spec1d file {filename.name} cannot be ingested into a Spectrum object.' f'{msgs.newline()}Please use the SpectrumList object for spec1d files.') diff --git a/pypeit/tests/test_msgs.py b/pypeit/tests/test_msgs.py index 84288d7dd4..474ec7c2f0 100644 --- a/pypeit/tests/test_msgs.py +++ b/pypeit/tests/test_msgs.py @@ -27,7 +27,7 @@ def test_log_write(): def test_msgs(): msgs = pypmsgs.Messages(None, verbosity=1) msgs.info("test 123") - msgs.warn("test 123") + msgs.warning("test 123") msgs.bug("test 123") msgs.work("test 123") msgs.close() diff --git a/pypeit/tracepca.py b/pypeit/tracepca.py index 14ef7a3a44..06ea4f2ec6 100644 --- a/pypeit/tracepca.py +++ b/pypeit/tracepca.py @@ -235,7 +235,7 @@ def predict(self, x): if self.is_empty: raise ValueError('TracePCA object is empty; re-instantiate or run decompose().') if self.pca_coeffs_model is None: - msgs.error('PCA coefficients have not been modeled; run build_interpolator first.') + raise PypeItError('PCA coefficients have not been modeled; run build_interpolator first.') return pca.pca_predict(x, self.pca_coeffs_model, self.pca_components, self.pca_mean, x).T def _bundle(self, ext='PCA'): @@ -268,7 +268,7 @@ def _parse(cls, hdu, hdu_prefix=None, **kwargs): # This should only ever read one hdu! if len(parsed_hdus) > 1: - msgs.error('CODING ERROR: Parsing saved TracePCA instances should only parse 1 HDU, ' + raise PypeItError('CODING ERROR: Parsing saved TracePCA instances should only parse 1 HDU, ' 'independently of the PCA PypeItFit models.') # Check if any models exist @@ -405,7 +405,7 @@ def pca_trace_object(trace_cen, order=None, trace_bpm=None, min_length=0.6, npca if _order.size == 1: _order = np.clip(order - np.arange(cenpca.npca), 1, None).astype(int) if _order.size != cenpca.npca: - msgs.error('Number of polynomial orders does not match the number of PCA components.') + raise PypeItError('Number of polynomial orders does not match the number of PCA components.') msgs.info('Order of function fit to each component: {0}'.format(_order)) # Apply a 10% relative error to each coefficient. This performs diff --git a/pypeit/utils.py b/pypeit/utils.py index 70e7d0efe2..5e1f457a0b 100644 --- a/pypeit/utils.py +++ b/pypeit/utils.py @@ -200,7 +200,7 @@ def concat_to_setup_list(concat, norders, nexps): the exposure number. """ if len(norders) != len(nexps): - msgs.error('The number of elements in norders and nexps must match') + raise PypeItError('The number of elements in norders and nexps must match') nsetups = len(norders) setup_list = [] ind_start = 0 @@ -564,7 +564,7 @@ def spec_atleast_2d(wave, flux, ivar, gpm, log10_blaze_function=None, copy=False # Check the input if wave.shape[0] != flux.shape[0] or ivar.shape != flux.shape or gpm.shape != flux.shape \ or wave.ndim == 2 and wave.shape != flux.shape: - msgs.error('Input spectral arrays have mismatching shapes.') + raise PypeItError('Input spectral arrays have mismatching shapes.') if flux.ndim == 1: # Input flux is 1D @@ -763,7 +763,7 @@ def boxcar_smooth_rows(img, nave, wgt=None, mode='nearest', replace='original'): if wgt is not None and img.shape != wgt.shape: raise ValueError('Input image to smooth and weights must have the same shape.') if nave > img.shape[0]: - msgs.warn('Smoothing box is larger than the image size!') + msgs.warning('Smoothing box is larger than the image size!') # Construct the kernel for mean calculation _nave = np.fmin(nave, img.shape[0]) @@ -782,7 +782,7 @@ def boxcar_smooth_rows(img, nave, wgt=None, mode='nearest', replace='original'): elif replace == 'zero': smoothed_img[smoothed_img.mask] = 0.0 else: - msgs.error('Unrecognized value of replace') + raise PypeItError('Unrecognized value of replace') return smoothed_img.data @@ -920,7 +920,7 @@ def rebin_slice(a, newshape): rebinning to shape newshape """ if not len(a.shape) == len(newshape): - msgs.error('Dimension of a image does not match dimension of new requested image shape') + raise PypeItError('Dimension of a image does not match dimension of new requested image shape') slices = [slice(0, old, float(old) / new) for old, new in zip(a.shape, newshape)] coordinates = np.mgrid[slices] @@ -950,7 +950,7 @@ def rebinND(img, shape): rem0, rem1 = img.shape[0] % shape[0], img.shape[1] % shape[1] if rem0 != 0 or rem1 != 0: # In this case, the shapes are not an integer multiple... need to slice - msgs.warn("Input image shape is not an integer multiple of the requested shape. Flux is not conserved.") + msgs.warning("Input image shape is not an integer multiple of the requested shape. Flux is not conserved.") return rebin_slice(img, shape) # Convert input 2D image into a 4D array to make the rebinning easier sh = shape[0], img.shape[0] // shape[0], shape[1], img.shape[1] // shape[1] @@ -1112,9 +1112,9 @@ def fast_running_median(seq, window_size): # upon return (very bad). Added by JFH. Should we print out an error here? if (window_size > (len(seq) - 1)): - msgs.warn('window_size > len(seq)-1. Truncating window_size to len(seq)-1, but something is probably wrong....') + msgs.warning('window_size > len(seq)-1. Truncating window_size to len(seq)-1, but something is probably wrong....') if (window_size < 0): - msgs.warn( + msgs.warning( 'window_size is negative. This does not make sense something is probably wrong. Setting window size to 1') window_size = int(np.fmax(np.fmin(int(window_size), len(seq) - 1), 1)) @@ -1166,9 +1166,9 @@ def cross_correlate(x, y, maxlag): x = np.asarray(x) y = np.asarray(y) if x.ndim != 1: - msgs.error('x must be one-dimensional.') + raise PypeItError('x must be one-dimensional.') if y.ndim != 1: - msgs.error('y must be one-dimensional.') + raise PypeItError('y must be one-dimensional.') # py = np.pad(y.conj(), 2*maxlag, mode=mode) py = np.pad(y, 2 * maxlag, mode='constant') @@ -1432,7 +1432,7 @@ def replace_bad(frame, bpm): """ # Do some checks on the inputs if frame.shape != bpm.shape: - msgs.error("Input frame and BPM have different shapes") + raise PypeItError("Input frame and BPM have different shapes") # Replace bad pixels with the nearest (good) neighbour msgs.info("Replacing bad pixels") ind = scipy.ndimage.distance_transform_edt(bpm, return_distances=False, return_indices=True) @@ -1944,9 +1944,9 @@ def find_single_file(file_pattern, required: bool=False) -> pathlib.Path: """ files = sorted(glob.glob(file_pattern)) if len(files) > 1: - msgs.warn(f'Found multiple files matching {file_pattern}; using {files[0]}') + msgs.warning(f'Found multiple files matching {file_pattern}; using {files[0]}') if len(files) == 0 and required: - msgs.error(f'No files matching pattern: {file_pattern}') + raise PypeItError(f'No files matching pattern: {file_pattern}') return None if len(files) == 0 else pathlib.Path(files[0]) diff --git a/pypeit/wavecalib.py b/pypeit/wavecalib.py index 18600f8a38..e92d23b2bb 100644 --- a/pypeit/wavecalib.py +++ b/pypeit/wavecalib.py @@ -100,7 +100,7 @@ def _bundle(self): # Spat_ID are always first if self.spat_ids is None: - msgs.error('Cannot write WaveCalib without spat_ids!') + raise PypeItError('Cannot write WaveCalib without spat_ids!') _d.append(dict(spat_ids=self.spat_ids)) # Echelle orders if self.ech_orders is not None: @@ -256,7 +256,7 @@ def chk_synced(self, slits): """ if not np.array_equal(self.spat_ids, slits.spat_id): - msgs.error('Your wavelength solutions are out of sync with your slits. Remove ' + raise PypeItError('Your wavelength solutions are out of sync with your slits. Remove ' 'Calibrations and restart from scratch.') def build_fwhmimg(self, tilts, slits, initial=False, spat_flexure=None): @@ -278,7 +278,7 @@ def build_fwhmimg(self, tilts, slits, initial=False, spat_flexure=None): """ # Check spatial flexure type if (spat_flexure is not None) and (not isinstance(spat_flexure, float)): - msgs.error("Spatial flexure must be None or float") + raise PypeItError("Spatial flexure must be None or float") # Generate the slit mask and slit edges - pad slitmask by 1 for edge effects slitmask = slits.slit_img(pad=1, initial=initial, flexure=spat_flexure) slits_left, slits_right, _ = slits.select_edges(initial=initial, flexure=spat_flexure) @@ -323,7 +323,7 @@ def build_waveimg(self, tilts, slits, spat_flexure=None, spec_flexure=None): """ # Check spatial flexure type if (spat_flexure is not None) and (not isinstance(spat_flexure, float)): - msgs.error("Spatial flexure must be None or float") + raise PypeItError("Spatial flexure must be None or float") # Check spectral flexure type if spec_flexure is None: spec_flex = np.zeros(slits.nslits) elif isinstance(spec_flexure, float): spec_flex = spec_flexure*np.ones(slits.nslits) @@ -344,14 +344,14 @@ def build_waveimg(self, tilts, slits, spat_flexure=None, spec_flexure=None): if self.par['ech_separate_2d']: # Error checking if self.det_img is None: - msgs.error("This WaveCalib object was not generated with ech_separate_2d=True") + raise PypeItError("This WaveCalib object was not generated with ech_separate_2d=True") # Unpack some 2-d fit parameters if this is echelle for islit in np.where(ok_slits)[0]: slit_spat = slits.spat_id[islit] thismask = (slitmask == slit_spat) if not np.any(thismask): - msgs.error("Something failed in wavelengths or masking..") + raise PypeItError("Something failed in wavelengths or masking..") if self.par['echelle'] and self.par['ech_2dfit']: # evaluate solution -- if self.par['ech_separate_2d']: @@ -501,7 +501,7 @@ def __init__(self, msarc, slits, spectrograph, par, lamps, # TODO: This should be a stop-gap to avoid instantiation of this with # any Nones. if None in [msarc, slits, spectrograph, par, lamps]: - msgs.error('CODING ERROR: Cannot instantiate BuildWaveCalib with Nones.') + raise PypeItError('CODING ERROR: Cannot instantiate BuildWaveCalib with Nones.') # Required parameters self.msarc = msarc @@ -515,7 +515,7 @@ def __init__(self, msarc, slits, spectrograph, par, lamps, # Optional parameters self.bpm = self.msarc.select_flag(flag='BPM') if msbpm is None else msbpm.astype(bool) if self.bpm.shape != self.msarc.shape: - msgs.error('Bad-pixel mask is not the same shape as the arc image.') + raise PypeItError('Bad-pixel mask is not the same shape as the arc image.') self.qa_path = qa_path self.det = det @@ -690,7 +690,7 @@ def build_wv_calib(self, arccen, method, skip_QA=False, elif method == 'full_template': # Now preferred if self.binspectral is None: - msgs.error("You must specify binspectral for the full_template method!") + raise PypeItError("You must specify binspectral for the full_template method!") final_fit, order_vec = autoid.full_template(arccen, self.lamps, self.par, ok_mask_idx, self.det, self.binspectral, slit_ids=self.slits.slitord_id, measured_fwhms=self.measured_fwhms, @@ -735,7 +735,7 @@ def build_wv_calib(self, arccen, method, skip_QA=False, self.arccen = arccen else: - msgs.error('Unrecognized wavelength calibration method: {:}'.format(method)) + raise PypeItError('Unrecognized wavelength calibration method: {:}'.format(method)) # Build the DataContainer if self.par['redo_slits'] is not None: @@ -846,7 +846,7 @@ def redo_echelle_orders(self, bad_orders:np.ndarray, dets:np.ndarray, order_dets # Are there few enough? max_bad = int(len(order_dets[idet])*bad_orders_maxfrac) if np.sum(in_det) > max_bad: - msgs.warn(f"Too many bad orders in detector={dets[idet]} to attempt a refit.") + msgs.warning(f"Too many bad orders in detector={dets[idet]} to attempt a refit.") continue # Loop for order in bad_orders[in_det]: @@ -871,7 +871,7 @@ def redo_echelle_orders(self, bad_orders:np.ndarray, dets:np.ndarray, order_dets fwhm=fwhm) if not patt_dict_slit['acceptable']: - msgs.warn(f"Order {order} is still not acceptable after attempt to reidentify.") + msgs.warning(f"Order {order} is still not acceptable after attempt to reidentify.") continue # Fit me -- RMS may be too high again @@ -909,7 +909,7 @@ def redo_echelle_orders(self, bad_orders:np.ndarray, dets:np.ndarray, order_dets self.wvc_bpm[iord] = False fixed = True else: - msgs.warn(f'New RMS is too high (>{frac_rms_thresh}xRMS threshold). ' + msgs.warning(f'New RMS is too high (>{frac_rms_thresh}xRMS threshold). ' f'Not updating wavelength solution.') # return fixed @@ -942,7 +942,7 @@ def echelle_2dfit(self, wv_calib, debug=False, skip_QA=False): List of integer lists providing list of the orders. """ if self.spectrograph.pypeline != 'Echelle': - msgs.error('Cannot execute echelle_2dfit for a non-echelle spectrograph.') + raise PypeItError('Cannot execute echelle_2dfit for a non-echelle spectrograph.') msgs.info('Fitting 2-d wavelength solution for echelle....') @@ -1003,7 +1003,7 @@ def echelle_2dfit(self, wv_calib, debug=False, skip_QA=False): # Fit if len(all_order) < 2: - msgs.warn(f"Fewer than 2 orders to fit for detector {idet}. Skipping") + msgs.warning(f"Fewer than 2 orders to fit for detector {idet}. Skipping") save_order_dets.append([]) # Add a dummy fit fit2ds.append(fitting.PypeItFit()) @@ -1021,7 +1021,7 @@ def echelle_2dfit(self, wv_calib, debug=False, skip_QA=False): # QA if not skip_QA: if wv_calib.calib_key is None: - msgs.warn('WaveCalib object provided does not have a defined calibration ' + msgs.warning('WaveCalib object provided does not have a defined calibration ' 'key. The QA files will not include this key in the file name, ' 'meaning that existing QA files may be overwritten.') calib_key = '' @@ -1143,7 +1143,7 @@ def run(self, skip_QA=False, debug=False, bad_rms = rms > wave_rms_thresh if np.any(bad_rms): self.wvc_bpm[bad_rms] = True - msgs.warn("Masking one or more bad orders (RMS)") + msgs.warning("Masking one or more bad orders (RMS)") # Fit fit2ds, dets, order_dets = self.echelle_2dfit( self.wv_calib, skip_QA = skip_QA, debug=debug) @@ -1169,7 +1169,7 @@ def run(self, skip_QA=False, debug=False, # Check that we have at least one good 2D fit if not np.any([fit2d.success for fit2d in self.wv_calib.wv_fit2d]): - msgs.error("No successful 2D Wavelength fits. Cannot proceed.") + raise PypeItError("No successful 2D Wavelength fits. Cannot proceed.") # Deal with mask self.update_wvmask() diff --git a/pypeit/wavemodel.py b/pypeit/wavemodel.py index 3d1b124b38..4ea3c190e7 100644 --- a/pypeit/wavemodel.py +++ b/pypeit/wavemodel.py @@ -497,10 +497,10 @@ def optical_modelThAr(resolution, waveminmax=(3000.,10500.), dlam=40.0, thar_spec[thar_spec<0.] = 0. # Remove regions of the spectrum outside the wavelength covered by the ThAr model if wv_minnp.max(th_wv)] = 0. if thar_outfile is not None: @@ -776,7 +776,7 @@ def create_OHlinelist(resolution, waveminmax=(0.8,2.6), dlam=40.0, flgd=True, ni flgd=flgd, nirsky_outfile=nirsky_outfile, debug=debug) if fwhm is None: - msgs.warn("No min FWHM for the line detection set. Derived from the resolution at the center of the spectrum") + msgs.warning("No min FWHM for the line detection set. Derived from the resolution at the center of the spectrum") wl_cent = np.average(wavelength) wl_fwhm = wl_cent / resolution wl_bin = np.abs((wavelength-np.roll(wavelength,1))[np.where(np.abs(wavelength-wl_cent)==np.min(np.abs(wavelength-wl_cent)))]) @@ -784,15 +784,15 @@ def create_OHlinelist(resolution, waveminmax=(0.8,2.6), dlam=40.0, flgd=True, ni # the minimum fwhm of the spectrum fwhm = 1.1 * wl_fwhm / wl_bin[0] if fwhm < 1.: - msgs.warn("Lines are unresolved. Setting FWHM=2.pixels") + msgs.warning("Lines are unresolved. Setting FWHM=2.pixels") fwhm = 2. if line_name is None: - msgs.warn("No line_name as been set. The file will contain XXX as ion") + msgs.warning("No line_name as been set. The file will contain XXX as ion") line_name = 'XXX' if file_root_name is None: - msgs.warn("No file_root_name as been set. The file will called OH_SKY_lines.dat") + msgs.warning("No file_root_name as been set. The file will called OH_SKY_lines.dat") file_root_name = 'OH_SKY' create_linelist(wavelength, spec, fwhm=fwhm, sigdetec=sigdetec, line_name=line_name, @@ -851,7 +851,7 @@ def create_ThArlinelist(resolution, waveminmax=(3000.,10500.), dlam=40.0, flgd=T wavelength, spec = optical_modelThAr(resolution, waveminmax=waveminmax, dlam=dlam, flgd=flgd, thar_outfile=thar_outfile, debug=debug) if fwhm is None: - msgs.warn("No min FWHM for the line detection set. Derived from the resolution at the center of the spectrum") + msgs.warning("No min FWHM for the line detection set. Derived from the resolution at the center of the spectrum") wl_cent = np.average(wavelength) wl_fwhm = wl_cent / resolution wl_bin = np.abs((wavelength-np.roll(wavelength,1))[np.where(np.abs(wavelength-wl_cent)==np.min(np.abs(wavelength-wl_cent)))]) @@ -859,15 +859,15 @@ def create_ThArlinelist(resolution, waveminmax=(3000.,10500.), dlam=40.0, flgd=T # the minimum fwhm of the spectrum fwhm = 1.1 * wl_fwhm / wl_bin[0] if fwhm < 1.: - msgs.warn("Lines are unresolved. Setting FWHM=2.*pixels") + msgs.warning("Lines are unresolved. Setting FWHM=2.*pixels") fwhm = 2. if line_name is None: - msgs.warn("No line_name as been set. The file will contain XXX as ion") + msgs.warning("No line_name as been set. The file will contain XXX as ion") line_name = 'XXX' if file_root_name is None: - msgs.warn("No file_root_name as been set. The file will called ThAr_lines.dat") + msgs.warning("No file_root_name as been set. The file will called ThAr_lines.dat") file_root_name = 'ThAr' create_linelist(wavelength, spec, fwhm=fwhm, sigdetec=sigdetec, line_name=line_name, diff --git a/pypeit/wavetilts.py b/pypeit/wavetilts.py index b638cfc4c1..eebe19e78e 100644 --- a/pypeit/wavetilts.py +++ b/pypeit/wavetilts.py @@ -121,7 +121,7 @@ def is_synced(self, slits): """ if not np.array_equal(self.spat_id, slits.spat_id): - msgs.error('Your tilt solutions are out of sync with your slits. Remove calibrations ' + raise PypeItError('Your tilt solutions are out of sync with your slits. Remove calibrations ' 'and restart from scratch.') def fit2tiltimg(self, slitmask, flexure=None): @@ -210,7 +210,7 @@ def show(self, waveimg=None, wcs_match=True, in_ginga=True, show_traces=False, if cal_file.exists(): tilt_img_dict = buildimage.TiltImage.from_file(cal_file, chk_version=chk_version) else: - msgs.error(f'Tilt image {str(cal_file)} NOT FOUND.') + raise PypeItError(f'Tilt image {str(cal_file)} NOT FOUND.') # get slits slitmask = None @@ -226,7 +226,7 @@ def show(self, waveimg=None, wcs_match=True, in_ginga=True, show_traces=False, right = arc.resize_slits2arc(tilt_img_dict.image.shape, _slitmask.shape, _right) else: slits = None - msgs.warn(f'Slits file {str(cal_file)} NOT FOUND.') + msgs.warning(f'Slits file {str(cal_file)} NOT FOUND.') # get waveimg same_size = (slits.nspec, slits.nspat) == tilt_img_dict.image.shape @@ -237,7 +237,7 @@ def show(self, waveimg=None, wcs_match=True, in_ginga=True, show_traces=False, tilts = self.fit2tiltimg(slitmask, flexure=self.spat_flexure) waveimg = wv_calib.build_waveimg(tilts, slits, spat_flexure=self.spat_flexure) else: - msgs.warn('Could not load Wave image to show with tilts image.') + msgs.warning('Could not load Wave image to show with tilts image.') # Show # tilt image @@ -755,7 +755,7 @@ def run(self, doqa=True, debug=False, show=False): bpm=self.arccen_bpm[:,slit_idx], debug=debug) if self.lines_spec is None: - msgs.warn('Did not recover any lines for slit/order = {:d}'.format(self.slits.slitord_id[slit_idx]) + + msgs.warning('Did not recover any lines for slit/order = {:d}'.format(self.slits.slitord_id[slit_idx]) + '. This slit/order will not reduced!') self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADTILTCALIB') continue @@ -771,7 +771,7 @@ def run(self, doqa=True, debug=False, show=False): # IF there are < 2 usable arc lines for tilt tracing, PCA fit does not work and the reduction crushes # TODO investigate why some slits have <2 usable arc lines if np.sum(self.trace_dict['use_tilt']) < 2: - msgs.warn('Less than 2 usable arc lines for slit/order = {:d}'.format(self.slits.slitord_id[slit_idx]) + + msgs.warning('Less than 2 usable arc lines for slit/order = {:d}'.format(self.slits.slitord_id[slit_idx]) + '. This slit/order will not reduced!') self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADTILTCALIB') continue @@ -782,7 +782,7 @@ def run(self, doqa=True, debug=False, show=False): use_tilt_spec_cov = (self.trace_dict['tilts_spec'][:, self.trace_dict['use_tilt']].max() - self.trace_dict['tilts_spec'][:, self.trace_dict['use_tilt']].min()) / self.arccen.shape[0] if use_tilt_spec_cov < 0.1: - msgs.warn(f'The spectral coverage of the usable arc lines is {use_tilt_spec_cov:.3f} (less than 10%).' + + msgs.warning(f'The spectral coverage of the usable arc lines is {use_tilt_spec_cov:.3f} (less than 10%).' + ' This slit/order will not be reduced!') self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADTILTCALIB') continue @@ -801,7 +801,7 @@ def run(self, doqa=True, debug=False, show=False): # TODO: Is 95% the right threshold? _gpm = self.all_fit_dict[slit_idx]['pypeitFit'].bool_gpm if np.sum(np.logical_not(_gpm)) > 0.95 * _gpm.size: - msgs.warn(f'Large number of pixels rejected in the fit. This slit/order will not be reduced!') + msgs.warning(f'Large number of pixels rejected in the fit. This slit/order will not be reduced!') self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADTILTCALIB') continue self.coeffs[:self.spec_order[slit_idx]+1,:self.spat_order[slit_idx]+1,slit_idx] = coeff_out @@ -817,7 +817,7 @@ def run(self, doqa=True, debug=False, show=False): # Check that the tilts image has values that span a reasonable range # TODO: Is this the right threshold? if np.nanmax(self.tilts) - np.nanmin(self.tilts) < 0.8: - msgs.warn('Tilts image fit not good. This slit/order will not be reduced!') + msgs.warning('Tilts image fit not good. This slit/order will not be reduced!') self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADTILTCALIB') continue # Save to final image @@ -881,7 +881,7 @@ def make_tbl_tilt_traces(self): """ if self.all_trace_dict is None: - msgs.error('No tilts have been traced and fit yet. Run the run() method first.') + raise PypeItError('No tilts have been traced and fit yet. Run the run() method first.') # slit_ids slit_ids = np.array([]) @@ -948,7 +948,7 @@ def make_tbl_tilt_traces(self): tbl_tilt_traces[tbl_keys[i]] = np.expand_dims(arr, axis=0) if len(tbl_tilt_traces) == 0: - msgs.warn('No traced and fitted tilts have been found.') + msgs.warning('No traced and fitted tilts have been found.') return None return tbl_tilt_traces @@ -1011,7 +1011,7 @@ def show_tilts_mpl(tilt_img, tilt_traces, show_traces=False, left_edges=None, """ if tilt_traces is None: - return msgs.error('No tilts have been traced or fitted') + return raise PypeItError('No tilts have been traced or fitted') if cut is None: cut = utils.growth_lim(tilt_img, 0.98, fac=1) From 6436aa732255a7fb2965d0d86e861e45266995a6 Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Wed, 8 Oct 2025 15:06:46 -0700 Subject: [PATCH 04/33] exceptions --- pypeit/__init__.py | 2 ++ pypeit/alignframe.py | 1 + pypeit/archive.py | 1 + pypeit/cache.py | 2 +- pypeit/calibframe.py | 2 +- pypeit/calibrations.py | 1 + pypeit/coadd1d.py | 1 + pypeit/coadd2d.py | 1 + pypeit/coadd3d.py | 1 + pypeit/core/arc.py | 1 + pypeit/core/coadd.py | 1 + pypeit/core/collate.py | 2 ++ pypeit/core/combine.py | 1 + pypeit/core/datacube.py | 1 + pypeit/core/extract.py | 1 + pypeit/core/findobj_skymask.py | 1 + pypeit/core/fitting.py | 1 + pypeit/core/flexure.py | 1 + pypeit/core/flux_calib.py | 1 + pypeit/core/framematch.py | 9 +++--- pypeit/core/mosaic.py | 1 + pypeit/core/parse.py | 2 +- pypeit/core/pixels.py | 1 + pypeit/core/procimg.py | 3 +- pypeit/core/pydl.py | 1 + pypeit/core/scattlight.py | 1 + pypeit/core/skysub.py | 2 ++ pypeit/core/telluric.py | 1 + pypeit/core/trace.py | 1 + pypeit/core/transform.py | 1 + pypeit/core/wave.py | 1 + pypeit/core/wavecal/autoid.py | 1 + pypeit/core/wavecal/waveio.py | 1 + pypeit/core/wavecal/wv_fitting.py | 2 +- pypeit/core/wavecal/wvutils.py | 2 +- pypeit/datamodel.py | 39 ++++++++++++++---------- pypeit/display/display.py | 3 +- pypeit/edgetrace.py | 8 +++-- pypeit/exceptions.py | 22 +++++++++++++ pypeit/extraction.py | 1 + pypeit/find_objects.py | 1 + pypeit/flatfield.py | 2 +- pypeit/images/bitmaskarray.py | 9 +++--- pypeit/images/buildimage.py | 1 + pypeit/images/combineimage.py | 1 + pypeit/images/mosaic.py | 1 + pypeit/images/pypeitimage.py | 1 + pypeit/images/rawimage.py | 1 + pypeit/inputfiles.py | 3 +- pypeit/io.py | 10 ++---- pypeit/manual_extract.py | 1 + pypeit/metadata.py | 1 + pypeit/par/pypeitpar.py | 1 + pypeit/par/util.py | 2 +- pypeit/pypeit.py | 1 + pypeit/pypeitdata.py | 13 +++++--- pypeit/pypeitsetup.py | 1 + pypeit/pypmsgs.py | 22 ++++++------- pypeit/scripts/arxiv_solution.py | 1 + pypeit/scripts/chk_flexure.py | 1 + pypeit/scripts/chk_noise_2dspec.py | 1 + pypeit/scripts/chk_plugins.py | 1 + pypeit/scripts/chk_scattlight.py | 2 +- pypeit/scripts/chk_wavecalib.py | 1 + pypeit/scripts/clean_cache.py | 1 + pypeit/scripts/coadd_1dspec.py | 1 + pypeit/scripts/coadd_datacube.py | 1 + pypeit/scripts/collate_1d.py | 1 + pypeit/scripts/compile_wvarxiv.py | 1 + pypeit/scripts/extract_datacube.py | 1 + pypeit/scripts/flux_calib.py | 1 + pypeit/scripts/identify.py | 1 + pypeit/scripts/parse_slits.py | 1 + pypeit/scripts/print_bpm.py | 2 +- pypeit/scripts/ql.py | 2 +- pypeit/scripts/run_pypeit.py | 1 + pypeit/scripts/run_to_calibstep.py | 1 + pypeit/scripts/sensfunc.py | 1 + pypeit/scripts/setup_coadd2d.py | 1 + pypeit/scripts/show_1dspec.py | 1 + pypeit/scripts/show_2dspec.py | 3 +- pypeit/scripts/show_pixflat.py | 1 + pypeit/scripts/skysub_regions.py | 1 + pypeit/scripts/tellfit.py | 1 + pypeit/scripts/trace_edges.py | 1 + pypeit/scripts/view_fits.py | 1 + pypeit/sensfilearchive.py | 2 +- pypeit/sensfunc.py | 1 + pypeit/slittrace.py | 10 +++--- pypeit/spec2dobj.py | 1 + pypeit/specobj.py | 1 + pypeit/specobjs.py | 1 + pypeit/spectrographs/aat_uhrf.py | 1 + pypeit/spectrographs/apf_levy.py | 1 + pypeit/spectrographs/bok_bc.py | 1 + pypeit/spectrographs/gemini_flamingos.py | 1 + pypeit/spectrographs/gemini_gmos.py | 1 + pypeit/spectrographs/gemini_gnirs.py | 1 + pypeit/spectrographs/gtc_osiris.py | 1 + pypeit/spectrographs/jwst_nircam.py | 1 + pypeit/spectrographs/jwst_nirspec.py | 1 + pypeit/spectrographs/keck_deimos.py | 1 + pypeit/spectrographs/keck_esi.py | 1 + pypeit/spectrographs/keck_hires.py | 1 + pypeit/spectrographs/keck_kcwi.py | 1 + pypeit/spectrographs/keck_lris.py | 1 + pypeit/spectrographs/keck_mosfire.py | 1 + pypeit/spectrographs/keck_nires.py | 1 + pypeit/spectrographs/keck_nirspec.py | 1 + pypeit/spectrographs/lbt_luci.py | 1 + pypeit/spectrographs/lbt_mods.py | 1 + pypeit/spectrographs/ldt_deveny.py | 1 + pypeit/spectrographs/magellan_fire.py | 1 + pypeit/spectrographs/magellan_mage.py | 1 + pypeit/spectrographs/mdm_modspec.py | 1 + pypeit/spectrographs/mdm_osmos.py | 1 + pypeit/spectrographs/mmt_binospec.py | 1 + pypeit/spectrographs/mmt_bluechannel.py | 1 + pypeit/spectrographs/mmt_mmirs.py | 1 + pypeit/spectrographs/not_alfosc.py | 1 + pypeit/spectrographs/ntt_efosc2.py | 1 + pypeit/spectrographs/opticalmodel.py | 6 +++- pypeit/spectrographs/p200_dbsp.py | 1 + pypeit/spectrographs/p200_ngps.py | 1 + pypeit/spectrographs/p200_tspec.py | 1 + pypeit/spectrographs/shane_kast.py | 1 + pypeit/spectrographs/soar_goodman.py | 1 + pypeit/spectrographs/spectrograph.py | 1 + pypeit/spectrographs/subaru_focas.py | 1 + pypeit/spectrographs/tng_dolores.py | 1 + pypeit/spectrographs/util.py | 1 + pypeit/spectrographs/vlt_fors.py | 1 + pypeit/spectrographs/vlt_sinfoni.py | 1 + pypeit/spectrographs/vlt_xshooter.py | 1 + pypeit/spectrographs/wht_isis.py | 1 + pypeit/specutils/pypeit_loaders.py | 2 +- pypeit/tests/test_calibframe.py | 2 +- pypeit/tests/test_coadd.py | 2 +- pypeit/tests/test_collate_1d.py | 2 +- pypeit/tests/test_flux.py | 2 +- pypeit/tests/test_fluxspec.py | 2 +- pypeit/tests/test_match.py | 2 +- pypeit/tests/test_mosaic.py | 2 +- pypeit/tests/test_msgs.py | 34 --------------------- pypeit/tests/test_parse.py | 2 +- pypeit/tests/test_pkgdata.py | 2 +- pypeit/tests/test_pydl.py | 12 -------- pypeit/tests/test_pypeitimage.py | 2 +- pypeit/tests/test_sensfilearchive.py | 2 +- pypeit/tests/test_spec2dobj.py | 4 +-- pypeit/tests/test_spectrographs.py | 2 +- pypeit/tests/test_specutils.py | 2 +- pypeit/tracepca.py | 1 + pypeit/utils.py | 1 + pypeit/wavecalib.py | 1 + pypeit/wavetilts.py | 3 +- 156 files changed, 245 insertions(+), 134 deletions(-) create mode 100644 pypeit/exceptions.py delete mode 100644 pypeit/tests/test_msgs.py delete mode 100644 pypeit/tests/test_pydl.py diff --git a/pypeit/__init__.py b/pypeit/__init__.py index 34a03a4b18..c2c4c6cd69 100644 --- a/pypeit/__init__.py +++ b/pypeit/__init__.py @@ -22,6 +22,8 @@ import logging from pypeit import logger msgs = logger.get_logger(level=logging.DEBUG) +# Import all the exceptions +from pypeit.exceptions import * # Import and instantiate the data path parser # NOTE: This *MUST* come after msgs and __version__ are defined above diff --git a/pypeit/alignframe.py b/pypeit/alignframe.py index 02ccaac2b8..7531fe77d6 100644 --- a/pypeit/alignframe.py +++ b/pypeit/alignframe.py @@ -14,6 +14,7 @@ from pypeit import datamodel from pypeit import calibframe from pypeit import msgs +from pypeit import PypeItError class Alignments(calibframe.CalibFrame): diff --git a/pypeit/archive.py b/pypeit/archive.py index de29f6f222..9f69ff4eec 100644 --- a/pypeit/archive.py +++ b/pypeit/archive.py @@ -59,6 +59,7 @@ def get_target_metadata(file_info): from astropy.table import Table from pypeit import msgs +from pypeit import PypeItError class ArchiveMetadata(): diff --git a/pypeit/cache.py b/pypeit/cache.py index d6abde0fb4..c66fdd5cff 100644 --- a/pypeit/cache.py +++ b/pypeit/cache.py @@ -56,8 +56,8 @@ # NOTE: To avoid circular imports, avoid (if possible) importing anything from # pypeit into this module! Objects created or available in pypeit/__init__.py # are the exceptions, for now. -from pypeit.pypmsgs import PypeItPathError from pypeit import msgs +from pypeit import PypeItError, PypeItPathError from pypeit import __version__ diff --git a/pypeit/calibframe.py b/pypeit/calibframe.py index 91c01db676..add316e4a6 100644 --- a/pypeit/calibframe.py +++ b/pypeit/calibframe.py @@ -14,7 +14,7 @@ from astropy.io import fits from pypeit import msgs -from pypeit.pypmsgs import PypeItError +from pypeit import PypeItError from pypeit import datamodel from pypeit import io diff --git a/pypeit/calibrations.py b/pypeit/calibrations.py index 358038467f..338fca61d8 100644 --- a/pypeit/calibrations.py +++ b/pypeit/calibrations.py @@ -16,6 +16,7 @@ from pypeit import __version__ from pypeit import msgs +from pypeit import PypeItError from pypeit import alignframe from pypeit import flatfield from pypeit import edgetrace diff --git a/pypeit/coadd1d.py b/pypeit/coadd1d.py index dce4bcf607..d617686c0c 100644 --- a/pypeit/coadd1d.py +++ b/pypeit/coadd1d.py @@ -21,6 +21,7 @@ from pypeit import sensfunc from pypeit import specobjs from pypeit import msgs +from pypeit import PypeItError from pypeit.core import coadd, flux_calib from pypeit.history import History diff --git a/pypeit/coadd2d.py b/pypeit/coadd2d.py index 517efac203..072606ae77 100644 --- a/pypeit/coadd2d.py +++ b/pypeit/coadd2d.py @@ -17,6 +17,7 @@ from astropy.io import fits from pypeit import msgs +from pypeit import PypeItError from pypeit import utils from pypeit import specobjs from pypeit import slittrace diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 853b2a1a1b..2939e5d3d8 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -15,6 +15,7 @@ import numpy as np from pypeit import msgs +from pypeit import PypeItError from pypeit import alignframe, datamodel, flatfield, io, sensfunc, spec2dobj, utils from pypeit.core.flexure import calculate_image_phase from pypeit.core import datacube, extract, flux_calib, parse diff --git a/pypeit/core/arc.py b/pypeit/core/arc.py index eea697e546..8d62d2dda8 100644 --- a/pypeit/core/arc.py +++ b/pypeit/core/arc.py @@ -17,6 +17,7 @@ from astropy import stats from pypeit import msgs +from pypeit import PypeItError from pypeit import utils from pypeit.core import fitting from IPython import embed diff --git a/pypeit/core/coadd.py b/pypeit/core/coadd.py index 0f728da635..2e3aa2d525 100644 --- a/pypeit/core/coadd.py +++ b/pypeit/core/coadd.py @@ -23,6 +23,7 @@ from astropy import convolution from pypeit import msgs +from pypeit import PypeItError from pypeit import dataPaths from pypeit import utils from pypeit.core import fitting diff --git a/pypeit/core/collate.py b/pypeit/core/collate.py index 1ddc6be057..ffb6dc3dd0 100644 --- a/pypeit/core/collate.py +++ b/pypeit/core/collate.py @@ -21,6 +21,8 @@ from pypeit import specobjs from pypeit.spectrographs.util import load_spectrograph from pypeit import msgs +from pypeit import PypeItError + class SourceObject: diff --git a/pypeit/core/combine.py b/pypeit/core/combine.py index 22db2479e1..f58066ff64 100644 --- a/pypeit/core/combine.py +++ b/pypeit/core/combine.py @@ -7,6 +7,7 @@ from astropy import stats from pypeit import msgs +from pypeit import PypeItError from pypeit import utils from IPython import embed diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index c6958f8110..2011df3db9 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -15,6 +15,7 @@ import numpy as np from pypeit import msgs, utils, specobj, specobjs +from pypeit import PypeItError from pypeit.core import coadd, extract, flux_calib # Use a fast histogram for speed! diff --git a/pypeit/core/extract.py b/pypeit/core/extract.py index 3f1f526a74..e01e769915 100644 --- a/pypeit/core/extract.py +++ b/pypeit/core/extract.py @@ -15,6 +15,7 @@ from IPython import embed from pypeit import msgs +from pypeit import PypeItError from pypeit import utils from pypeit import bspline from pypeit.core import pydl diff --git a/pypeit/core/findobj_skymask.py b/pypeit/core/findobj_skymask.py index 08ee18a1c1..f8128db854 100644 --- a/pypeit/core/findobj_skymask.py +++ b/pypeit/core/findobj_skymask.py @@ -14,6 +14,7 @@ from astropy import table from pypeit import msgs +from pypeit import PypeItError from pypeit import utils from pypeit import specobj from pypeit import specobjs diff --git a/pypeit/core/fitting.py b/pypeit/core/fitting.py index e2b0901695..d16e201f6d 100644 --- a/pypeit/core/fitting.py +++ b/pypeit/core/fitting.py @@ -16,6 +16,7 @@ from pypeit.core import pydl from pypeit import bspline from pypeit import msgs +from pypeit import PypeItError from pypeit.datamodel import DataContainer from IPython import embed diff --git a/pypeit/core/flexure.py b/pypeit/core/flexure.py index 01f385e99b..28ac90973b 100644 --- a/pypeit/core/flexure.py +++ b/pypeit/core/flexure.py @@ -27,6 +27,7 @@ from linetools.spectra import xspectrum1d from pypeit import msgs +from pypeit import PypeItError from pypeit import dataPaths from pypeit import io from pypeit.core.wavecal import autoid diff --git a/pypeit/core/flux_calib.py b/pypeit/core/flux_calib.py index 19c994efc5..6822b73599 100644 --- a/pypeit/core/flux_calib.py +++ b/pypeit/core/flux_calib.py @@ -21,6 +21,7 @@ from astropy import stats from pypeit import msgs +from pypeit import PypeItError from pypeit import utils from pypeit import bspline from pypeit import io diff --git a/pypeit/core/framematch.py b/pypeit/core/framematch.py index 0d4a8bcc13..52bb4711ad 100644 --- a/pypeit/core/framematch.py +++ b/pypeit/core/framematch.py @@ -10,6 +10,7 @@ import numpy as np from pypeit import msgs +from pypeit import PypeItError from pypeit.bitmask import BitMask class FrameTypeBitMask(BitMask): @@ -94,13 +95,11 @@ def valid_frametype(frametype, quiet=False, raise_error=False): """ good_frametype = frametype in FrameTypeBitMask().keys() if not good_frametype: - _f = None + message = f'{frametype} is not a valid PypeIt frame type.' if not quiet and not raise_error: - _f = msgs.warning + msgs.warning(message) elif raise_error: - _f = raise PypeItError - if _f is not None: - _f(f'{frametype} is not a valid PypeIt frame type.') + raise PypeItError(message) return good_frametype diff --git a/pypeit/core/mosaic.py b/pypeit/core/mosaic.py index 7c09378c16..4fe0b58bcc 100644 --- a/pypeit/core/mosaic.py +++ b/pypeit/core/mosaic.py @@ -11,6 +11,7 @@ from scipy import ndimage from pypeit import msgs +from pypeit import PypeItError from pypeit.core import transform from pypeit.utils import inverse diff --git a/pypeit/core/parse.py b/pypeit/core/parse.py index 5dfeb615df..a29764cee7 100644 --- a/pypeit/core/parse.py +++ b/pypeit/core/parse.py @@ -13,7 +13,7 @@ # Logging from pypeit import msgs - +from pypeit import PypeItError def load_sections(string, fmt_iraf=True): """ diff --git a/pypeit/core/pixels.py b/pypeit/core/pixels.py index 38ecdfe133..7b72bcafb1 100644 --- a/pypeit/core/pixels.py +++ b/pypeit/core/pixels.py @@ -9,6 +9,7 @@ import numpy as np from pypeit import msgs +from pypeit import PypeItError def phys_to_pix(array, pixlocn, axis): diff --git a/pypeit/core/procimg.py b/pypeit/core/procimg.py index cd58e35c18..c0401d7a13 100644 --- a/pypeit/core/procimg.py +++ b/pypeit/core/procimg.py @@ -16,6 +16,7 @@ import scipy.signal from pypeit import msgs +from pypeit import PypeItError from pypeit import utils @@ -1127,7 +1128,7 @@ def trim_frame(frame, mask): `numpy.ndarray`_: Trimmed image Raises: - :class:`~pypeit.pypmsgs.PypeItError`: + :class:`~pypeit.exceptions.PypeItError`: Error raised if the trimmed image includes masked values because the shape of the valid region is odd. """ diff --git a/pypeit/core/pydl.py b/pypeit/core/pydl.py index 078f215b4c..983924e7fe 100644 --- a/pypeit/core/pydl.py +++ b/pypeit/core/pydl.py @@ -6,6 +6,7 @@ import numpy as np from pypeit import msgs +from pypeit import PypeItError from pypeit import utils from pypeit.core import basis from pypeit.core import fitting diff --git a/pypeit/core/scattlight.py b/pypeit/core/scattlight.py index b794cc3851..5d027ba4d5 100644 --- a/pypeit/core/scattlight.py +++ b/pypeit/core/scattlight.py @@ -10,6 +10,7 @@ from IPython import embed from pypeit import msgs, utils +from pypeit import PypeItError def pad_frame(_frame, detpad=300): diff --git a/pypeit/core/skysub.py b/pypeit/core/skysub.py index 4183fc415c..b59bfb66d3 100644 --- a/pypeit/core/skysub.py +++ b/pypeit/core/skysub.py @@ -17,8 +17,10 @@ from pypeit.core import fitting from pypeit.core import procimg from pypeit import msgs, utils, bspline, slittrace +from pypeit import PypeItError from pypeit.display import display + def skysub_npoly(thismask): """ Utility routine used by global_skysub and local_skysub_extract. diff --git a/pypeit/core/telluric.py b/pypeit/core/telluric.py index aace313b69..fc6b1c433a 100644 --- a/pypeit/core/telluric.py +++ b/pypeit/core/telluric.py @@ -17,6 +17,7 @@ from astropy.io import fits from pypeit import msgs +from pypeit import PypeItError from pypeit import dataPaths from pypeit import io from pypeit.core import flux_calib diff --git a/pypeit/core/trace.py b/pypeit/core/trace.py index 701726eaa5..4b8dc5ba30 100644 --- a/pypeit/core/trace.py +++ b/pypeit/core/trace.py @@ -22,6 +22,7 @@ from astropy.stats import sigma_clipped_stats, sigma_clip from pypeit import msgs +from pypeit import PypeItError from pypeit import utils from pypeit import sampling from pypeit.core import arc diff --git a/pypeit/core/transform.py b/pypeit/core/transform.py index 2fcc3ad187..8bb779985e 100644 --- a/pypeit/core/transform.py +++ b/pypeit/core/transform.py @@ -10,6 +10,7 @@ import numpy as np from pypeit import msgs +from pypeit import PypeItError def affine_transform_matrix(scale=None, rotation=None, translation=None): diff --git a/pypeit/core/wave.py b/pypeit/core/wave.py index 913e47bfe6..c4c43f3b81 100644 --- a/pypeit/core/wave.py +++ b/pypeit/core/wave.py @@ -19,6 +19,7 @@ from pypeit import msgs +from pypeit import PypeItError from IPython import embed diff --git a/pypeit/core/wavecal/autoid.py b/pypeit/core/wavecal/autoid.py index 02b14bc892..ebc181de30 100644 --- a/pypeit/core/wavecal/autoid.py +++ b/pypeit/core/wavecal/autoid.py @@ -30,6 +30,7 @@ from pypeit import utils from pypeit import msgs +from pypeit import PypeItError from matplotlib import pyplot as plt from matplotlib import gridspec diff --git a/pypeit/core/wavecal/waveio.py b/pypeit/core/wavecal/waveio.py index 580d0b198f..ccc1771625 100644 --- a/pypeit/core/wavecal/waveio.py +++ b/pypeit/core/wavecal/waveio.py @@ -10,6 +10,7 @@ import numpy as np from pypeit import msgs +from pypeit import PypeItError from pypeit import dataPaths from pypeit import cache from pypeit.core.wavecal import defs diff --git a/pypeit/core/wavecal/wv_fitting.py b/pypeit/core/wavecal/wv_fitting.py index 10211bdb8c..7b201c2db1 100644 --- a/pypeit/core/wavecal/wv_fitting.py +++ b/pypeit/core/wavecal/wv_fitting.py @@ -13,7 +13,7 @@ from pypeit.core.wavecal import defs from pypeit.core import fitting from pypeit import msgs - +from pypeit import PypeItError from pypeit import datamodel from IPython import embed diff --git a/pypeit/core/wavecal/wvutils.py b/pypeit/core/wavecal/wvutils.py index 866706db41..c335cc63d9 100644 --- a/pypeit/core/wavecal/wvutils.py +++ b/pypeit/core/wavecal/wvutils.py @@ -20,7 +20,7 @@ from pypeit import cache from pypeit import utils from pypeit.core import arc -from pypeit.pypmsgs import PypeItError +from pypeit import PypeItError from IPython import embed diff --git a/pypeit/datamodel.py b/pypeit/datamodel.py index 7f30e3fa22..be1a57924b 100644 --- a/pypeit/datamodel.py +++ b/pypeit/datamodel.py @@ -468,7 +468,7 @@ def _validate(self): from astropy.table import Table from pypeit import io -from pypeit import msgs +from pypeit import msgs, PypeItDataModelError # TODO: There are methods in, e.g., doc/scripts/build_specobj_rst.py that output # datamodels for specific datacontainers. It would be useful if we had @@ -676,8 +676,10 @@ def __init__(self, d=None): else: break if dc is None: - raise PypeItError(f'Could not assign dictionary element {key} to datamodel ' - f'for {self.__class__.__name__}.', cls='PypeItDataModelError') + raise PypeItDataModelError( + f'Could not assign dictionary element {key} to datamodel for ' + f'{self.__class__.__name__}.' + ) setattr(self, key, dc) continue @@ -824,8 +826,10 @@ def _bundle(self, ext=None, transpose_arrays=False): try: d = Table(d) except: - raise PypeItError(f'Cannot force all elements of {self.__class__.__name__} datamodel' - 'into a single-row astropy Table!', cls='PypeItDataModelError') + raise PypeItDataModelError( + f'Cannot force all elements of {self.__class__.__name__} datamodelinto a ' + 'single-row astropy Table!' + ) return [d] if ext is None else [{ext:d}] @@ -959,8 +963,10 @@ def _parse(cls, hdu, ext=None, ext_pseudo=None, transpose_table_arrays=False, _ext_pseudo = _ext if ext_pseudo is None else np.atleast_1d(ext_pseudo) if len(_ext_pseudo) != len(_ext): - raise PypeItError(f'Length of provided extension pseudonym list must match number of ' - f'extensions selected: {len(_ext)}.', cls='PypeItDataModelError') + raise PypeItDataModelError( + f'Length of provided extension pseudonym list must match number of extensions ' + f'selected: {len(_ext)}.' + ) str_ext = np.logical_not([isinstance(e, (int, np.integer)) for e in _ext_pseudo]) @@ -1144,13 +1150,12 @@ def _check_parsed(cls, version_passed, type_passed, chk_version=True): Flag to impose strict version checking. """ if not type_passed: - raise PypeItError(f'The HDU(s) cannot be parsed by a {cls.__name__} object!', - cls='PypeItDataModelError') + raise PypeItDataModelError(f'The HDU(s) cannot be parsed by a {cls.__name__} object!') if not version_passed: msg = f'Current version of {cls.__name__} object in code ({cls.version}) ' \ 'does not match version used to write your HDU(s)!' if chk_version: - raise PypeItError(msg, cls='PypeItDataModelError') + raise PypeItDataModelError(msg) else: msgs.warning(msg) @@ -1382,9 +1387,10 @@ def to_hdu(self, hdr=None, add_primary=False, primary_hdr=None, hdr_keys = np.array([k.upper() for k in self.keys()]) indx = np.isin(hdr_keys, list(_primary_hdr.keys())) if np.sum(indx) > 1: - raise PypeItError('CODING ERROR: Primary header should not contain keywords that are the ' - 'same as the datamodel for {0}.'.format(self.__class__.__name__), - cls='PypeItDataModelError') + raise PypeItDataModelError( + 'CODING ERROR: Primary header should not contain keywords that are the same ' + f'as the datamodel for {self.__class__.__name__}.' + ) # Initialize the base header _hdr = self._base_header(hdr=hdr) @@ -1392,9 +1398,10 @@ def to_hdu(self, hdr=None, add_primary=False, primary_hdr=None, # with any datamodel keys. if _hdr is not None \ and np.any(np.isin([k.upper() for k in self.keys()], list(_hdr.keys()))): - raise PypeItError('CODING ERROR: Baseline header should not contain keywords that are the ' - 'same as the datamodel for {0}.'.format(self.__class__.__name__), - cls='PypeItDataModelError') + raise PypeItDataModelError( + 'CODING ERROR: Baseline header should not contain keywords that are the same as ' + f'the datamodel for {self.__class__.__name__}.' + ) # Construct the list of HDUs hdu = [] diff --git a/pypeit/display/display.py b/pypeit/display/display.py index a836167105..52e40b8b3e 100644 --- a/pypeit/display/display.py +++ b/pypeit/display/display.py @@ -23,6 +23,7 @@ from ginga.util import grc from pypeit import msgs +from pypeit import PypeItError from pypeit import io from pypeit import utils @@ -577,7 +578,7 @@ def show_tilts(viewer, ch, tilt_traces, yoff=0., xoff=0., points=True, nspec=Non """ if tilt_traces is None: - return raise PypeItError('No tilts have been traced or fitted') + raise PypeItError('No tilts have been traced or fitted') canvas = viewer.canvas(ch._chname) if clear_canvas: diff --git a/pypeit/edgetrace.py b/pypeit/edgetrace.py index ae2f246595..41a2c9f9e2 100644 --- a/pypeit/edgetrace.py +++ b/pypeit/edgetrace.py @@ -45,6 +45,7 @@ from astropy import table from pypeit import msgs +from pypeit import PypeItError from pypeit import utils from pypeit import sampling from pypeit import slittrace @@ -1253,9 +1254,10 @@ def from_hdu(cls, hdu, hdu_prefix=None, chk_version=True): # Check the bitmasks hdr_bitmask = BitMask.from_header(hdu['SOBELSIG'].header) if chk_version and hdr_bitmask.bits != self.bitmask.bits: - raise PypeItError('The bitmask in this fits file appear to be out of date! Recreate this ' - 'file by re-running the relevant script or set chk_version=False.', - cls='PypeItBitMaskError') + raise PypeItBitMaskError( + 'The bitmask in this fits file appear to be out of date! Recreate this file by ' + 're-running the relevant script or set chk_version=False.' + ) return self diff --git a/pypeit/exceptions.py b/pypeit/exceptions.py new file mode 100644 index 0000000000..05af3c3515 --- /dev/null +++ b/pypeit/exceptions.py @@ -0,0 +1,22 @@ +""" +Provides pypeit specific exceptions. +""" + +__all__ = [ + 'PypeItError', + 'PypeItBitMaskError', + 'PypeItDataModelError', + 'PypeItPathError' +] + +class PypeItError(Exception): + pass + +class PypeItBitMaskError(PypeItError): + pass + +class PypeItDataModelError(PypeItError): + pass + +class PypeItPathError(PypeItError): + pass \ No newline at end of file diff --git a/pypeit/extraction.py b/pypeit/extraction.py index b1c62e8778..d72ba6c7d7 100644 --- a/pypeit/extraction.py +++ b/pypeit/extraction.py @@ -14,6 +14,7 @@ from abc import ABCMeta from pypeit import msgs, utils +from pypeit import PypeItError from pypeit.display import display from pypeit.core import skysub, extract, flexure, flat from IPython import embed diff --git a/pypeit/find_objects.py b/pypeit/find_objects.py index a351bf547b..bd4b07e9bc 100644 --- a/pypeit/find_objects.py +++ b/pypeit/find_objects.py @@ -15,6 +15,7 @@ from pypeit import specobjs from pypeit import msgs, utils +from pypeit import PypeItError from pypeit.display import display from pypeit.core import skysub, qa, parse, flat, flexure from pypeit.core import procimg diff --git a/pypeit/flatfield.py b/pypeit/flatfield.py index c992aee0a7..c78773a9f0 100644 --- a/pypeit/flatfield.py +++ b/pypeit/flatfield.py @@ -20,7 +20,7 @@ from IPython import embed from pypeit import msgs -from pypeit.pypmsgs import PypeItDataModelError +from pypeit import PypeItError, PypeItDataModelError from pypeit import utils from pypeit import bspline diff --git a/pypeit/images/bitmaskarray.py b/pypeit/images/bitmaskarray.py index ed307bf829..0cf3543496 100644 --- a/pypeit/images/bitmaskarray.py +++ b/pypeit/images/bitmaskarray.py @@ -18,7 +18,7 @@ from pypeit.datamodel import DataContainer from pypeit.bitmask import BitMask from pypeit import msgs - +from pypeit import PypeItError class BitMaskArray(DataContainer): """ @@ -189,9 +189,10 @@ def from_hdu(cls, hdu, chk_version=True, **kwargs): hdr = hdu[parsed_hdus[0]].header if isinstance(hdu, fits.HDUList) else hdu.header hdr_bitmask = BitMask.from_header(hdr) if chk_version and hdr_bitmask.bits != self.bitmask.bits: - raise PypeItError('The bitmask in this fits file appear to be out of date! Recreate this ' - 'file by re-running the relevant script or set chk_version=False.', - cls='PypeItBitMaskError') + raise PypeItBitMaskError( + 'The bitmask in this fits file appear to be out of date! Recreate this file by ' + 're-running the relevant script or set chk_version=False.' + ) return self diff --git a/pypeit/images/buildimage.py b/pypeit/images/buildimage.py index e9501208e6..aa658adadf 100644 --- a/pypeit/images/buildimage.py +++ b/pypeit/images/buildimage.py @@ -9,6 +9,7 @@ import numpy as np from pypeit import msgs +from pypeit import PypeItError from pypeit.par import pypeitpar from pypeit.images import rawimage from pypeit.images import combineimage diff --git a/pypeit/images/combineimage.py b/pypeit/images/combineimage.py index 30c0020ff8..37a5f764dc 100644 --- a/pypeit/images/combineimage.py +++ b/pypeit/images/combineimage.py @@ -9,6 +9,7 @@ import numpy as np from pypeit import msgs +from pypeit import PypeItError from pypeit.core import combine from pypeit.core import procimg from pypeit.par import pypeitpar diff --git a/pypeit/images/mosaic.py b/pypeit/images/mosaic.py index 9b79171962..f496f5c06d 100644 --- a/pypeit/images/mosaic.py +++ b/pypeit/images/mosaic.py @@ -16,6 +16,7 @@ from pypeit import io from pypeit.images.detector_container import DetectorContainer from pypeit import msgs +from pypeit import PypeItError class Mosaic(datamodel.DataContainer): diff --git a/pypeit/images/pypeitimage.py b/pypeit/images/pypeitimage.py index 99022fa6e5..27abdbac2c 100644 --- a/pypeit/images/pypeitimage.py +++ b/pypeit/images/pypeitimage.py @@ -12,6 +12,7 @@ from astropy.io import fits from pypeit import msgs +from pypeit import PypeItError from pypeit.images.imagebitmask import ImageBitMaskArray from pypeit.images.detector_container import DetectorContainer from pypeit.images.mosaic import Mosaic diff --git a/pypeit/images/rawimage.py b/pypeit/images/rawimage.py index 1d4da0f741..abce8f3239 100644 --- a/pypeit/images/rawimage.py +++ b/pypeit/images/rawimage.py @@ -14,6 +14,7 @@ from astropy import stats from pypeit import msgs +from pypeit import PypeItError from pypeit.core import arc from pypeit.core import parse from pypeit.core import procimg diff --git a/pypeit/inputfiles.py b/pypeit/inputfiles.py index f2da8fffbc..7b564b26fd 100644 --- a/pypeit/inputfiles.py +++ b/pypeit/inputfiles.py @@ -18,6 +18,7 @@ from pypeit import utils from pypeit.io import files_from_extension from pypeit import msgs, __version__ +from pypeit import PypeItError from pypeit.spectrographs.util import load_spectrograph from pypeit.par.pypeitpar import PypeItPar @@ -601,7 +602,7 @@ def get_spectrograph(self): parameter. Raises: - :class:`~pypeit.pypmsgs.PypeItError`: + :class:`~pypeit.exceptions.PypeItError`: Raised if the relevant configuration parameter is not available. """ if 'rdx' not in self.config.keys() or 'spectrograph' not in self.config['rdx'].keys(): diff --git a/pypeit/io.py b/pypeit/io.py index e0e948bd8b..4f6a318dd8 100644 --- a/pypeit/io.py +++ b/pypeit/io.py @@ -37,6 +37,7 @@ from linetools.spectra import xspectrum1d from pypeit import msgs +from pypeit import PypeItError from pypeit import dataPaths from pypeit import __version__ @@ -363,10 +364,6 @@ def header_version_check(hdr, warning_only=True): msg = '{0} version used to create the file ({1}) '.format(package, hdr_version) \ + 'does not match the current system version ({0})!'.format(sys_version) if warning_only: - # TODO: I had to change pypeit/__init__.py to get these - # to show up. We eventually need to make pypmsgs play - # nice with warnings and other logging, or just give up - # on pypmsgs... warnings.warn(msg) else: raise ValueError(msg) @@ -674,11 +671,10 @@ def write_to_fits(d, ofile, name=None, hdr=None, overwrite=False, checksum=True) # Compress the file if the output filename has a '.gz' extension; # this is slow but still faster than if you have astropy.io.fits do # it directly - # TODO: use pypmsgs? if _ofile is not ofile: - pypeit.msgs.info('Compressing file: {0}'.format(_ofile)) + msgs.info('Compressing file: {0}'.format(_ofile)) compress_file(_ofile, overwrite=True) - pypeit.msgs.info('File written to: {0}'.format(ofile)) + msgs.info('File written to: {0}'.format(ofile)) def hdu_iter_by_ext(hdu, ext=None, hdu_prefix=None): diff --git a/pypeit/manual_extract.py b/pypeit/manual_extract.py index a15028c0e9..a3401894f9 100644 --- a/pypeit/manual_extract.py +++ b/pypeit/manual_extract.py @@ -10,6 +10,7 @@ import numpy as np from pypeit import msgs +from pypeit import PypeItError from pypeit import datamodel from pypeit.core import parse diff --git a/pypeit/metadata.py b/pypeit/metadata.py index 24c17493da..d88948b550 100644 --- a/pypeit/metadata.py +++ b/pypeit/metadata.py @@ -18,6 +18,7 @@ from astropy import table, time from pypeit import msgs +from pypeit import PypeItError from pypeit import inputfiles from pypeit.core import framematch from pypeit.core import parse diff --git a/pypeit/par/pypeitpar.py b/pypeit/par/pypeitpar.py index cbe045bee7..81ad157cec 100644 --- a/pypeit/par/pypeitpar.py +++ b/pypeit/par/pypeitpar.py @@ -74,6 +74,7 @@ def __init__(self, existing_par=None, foo=None): from pypeit.core.framematch import FrameTypeBitMask from pypeit.core import parse from pypeit import msgs +from pypeit import PypeItError from pypeit import dataPaths diff --git a/pypeit/par/util.py b/pypeit/par/util.py index 8e4412f638..f3b80c9d8e 100644 --- a/pypeit/par/util.py +++ b/pypeit/par/util.py @@ -11,7 +11,7 @@ from configobj import ConfigObj from pypeit import msgs, __version__ -from pypeit.pypmsgs import PypeItError +from pypeit import PypeItError #----------------------------------------------------------------------- diff --git a/pypeit/pypeit.py b/pypeit/pypeit.py index b856463417..7d333235fe 100644 --- a/pypeit/pypeit.py +++ b/pypeit/pypeit.py @@ -20,6 +20,7 @@ from pypeit.calibframe import CalibFrame from pypeit.core import parse, wave, qa from pypeit import msgs +from pypeit import PypeItError from pypeit import calibrations from pypeit.images import buildimage from pypeit.display import display diff --git a/pypeit/pypeitdata.py b/pypeit/pypeitdata.py index 5027dae124..dc24f43c26 100644 --- a/pypeit/pypeitdata.py +++ b/pypeit/pypeitdata.py @@ -60,6 +60,7 @@ from IPython import embed from pypeit import msgs +from pypeit import PypeItError, PypeItPathError from pypeit import cache # NOTE: A better approach may be to subclass from Path. I briefly tried that, @@ -148,7 +149,7 @@ def __truediv__(self, p): where the output type is based on the type of ``p``. Raises: - :class:`~pypeit.pypmsgs.PypeItPathError`: + :class:`~pypeit.exceptions.PypeItPathError`: Raised if the requested contents *do not exist*. """ if (self.path / p).is_dir(): @@ -158,8 +159,10 @@ def __truediv__(self, p): remote_host=self.host) if (self.path / p).is_file(): return self.path / p - raise PypeItError(f'{str(self.path / p)} is not a valid PypeIt data path or is a file ' - 'that does not exist.', cls='PypeItPathError') + raise PypeItPathError( + f'{str(self.path / p)} is not a valid PypeIt data path or is a file that does not ' + 'exist.' + ) @staticmethod def check_isdir(path:pathlib.Path) -> pathlib.Path: @@ -174,11 +177,11 @@ def check_isdir(path:pathlib.Path) -> pathlib.Path: `Path`_: The input path is returned if it is valid. Raises: - :class:`~pypeit.pypmsgs.PypeItPathError`: + :class:`~pypeit.exceptions.PypeItPathError`: Raised if the path does not exist or is not a directory. """ if not path.is_dir(): - raise PypeItError(f"Unable to find {path}. Check your installation.", cls='PypeItPathError') + raise PypeItPathError(f'Unable to find {path}. Check your installation.') return path @staticmethod diff --git a/pypeit/pypeitsetup.py b/pypeit/pypeitsetup.py index 3c78cbe192..44bc3d652d 100644 --- a/pypeit/pypeitsetup.py +++ b/pypeit/pypeitsetup.py @@ -11,6 +11,7 @@ from IPython import embed from pypeit import msgs +from pypeit import PypeItError from pypeit.metadata import PypeItMetaData from pypeit import inputfiles from pypeit.par import PypeItPar diff --git a/pypeit/pypmsgs.py b/pypeit/pypmsgs.py index 902fcb6b1f..5f5589a3fe 100644 --- a/pypeit/pypmsgs.py +++ b/pypeit/pypmsgs.py @@ -26,17 +26,17 @@ developers = ['ema', 'joe', 'milvang', 'rcooke', 'thsyu', 'xavier'] -class PypeItError(Exception): - pass - -class PypeItBitMaskError(PypeItError): - pass - -class PypeItDataModelError(PypeItError): - pass - -class PypeItPathError(PypeItError): - pass +#class PypeItError(Exception): +# pass +# +#class PypeItBitMaskError(PypeItError): +# pass +# +#class PypeItDataModelError(PypeItError): +# pass +# +#class PypeItPathError(PypeItError): +# pass class Messages: diff --git a/pypeit/scripts/arxiv_solution.py b/pypeit/scripts/arxiv_solution.py index 88d11d4630..f4afc7f8fc 100644 --- a/pypeit/scripts/arxiv_solution.py +++ b/pypeit/scripts/arxiv_solution.py @@ -7,6 +7,7 @@ """ import time from pypeit import msgs +from pypeit import PypeItError from pypeit import par from pypeit import inputfiles from pypeit import utils diff --git a/pypeit/scripts/chk_flexure.py b/pypeit/scripts/chk_flexure.py index d26d609ff9..54ef522acb 100644 --- a/pypeit/scripts/chk_flexure.py +++ b/pypeit/scripts/chk_flexure.py @@ -32,6 +32,7 @@ def main(args): from IPython import embed from astropy.io import fits from pypeit import msgs + from pypeit import PypeItError from pypeit import specobjs from pypeit import spec2dobj diff --git a/pypeit/scripts/chk_noise_2dspec.py b/pypeit/scripts/chk_noise_2dspec.py index 56a7bff5ea..eb35aa0f21 100644 --- a/pypeit/scripts/chk_noise_2dspec.py +++ b/pypeit/scripts/chk_noise_2dspec.py @@ -17,6 +17,7 @@ from pypeit import spec2dobj from pypeit import msgs +from pypeit import PypeItError from pypeit import io from pypeit import utils from pypeit.scripts import scriptbase diff --git a/pypeit/scripts/chk_plugins.py b/pypeit/scripts/chk_plugins.py index 0d6c7b063b..bd612df597 100644 --- a/pypeit/scripts/chk_plugins.py +++ b/pypeit/scripts/chk_plugins.py @@ -13,6 +13,7 @@ def main(args): from pypeit.display import required_plugins, plugins_available from pypeit import msgs + from pypeit import PypeItError success, report = plugins_available(return_report=True) if not success: diff --git a/pypeit/scripts/chk_scattlight.py b/pypeit/scripts/chk_scattlight.py index 26451d2d70..1a8b093aa4 100644 --- a/pypeit/scripts/chk_scattlight.py +++ b/pypeit/scripts/chk_scattlight.py @@ -36,7 +36,7 @@ def main(args): from pypeit import scattlight, spec2dobj, slittrace from pypeit import msgs - from pypeit.pypmsgs import PypeItError, PypeItDataModelError + from pypeit import PypeItError, PypeItDataModelError from pypeit.images.detector_container import DetectorContainer from pypeit import io diff --git a/pypeit/scripts/chk_wavecalib.py b/pypeit/scripts/chk_wavecalib.py index 9446ff5d51..8372b069cf 100644 --- a/pypeit/scripts/chk_wavecalib.py +++ b/pypeit/scripts/chk_wavecalib.py @@ -28,6 +28,7 @@ def main(args): from IPython import embed from astropy.io import fits from pypeit import wavecalib, spec2dobj, msgs + from pypeit import PypeItError chk_version = not args.try_old diff --git a/pypeit/scripts/clean_cache.py b/pypeit/scripts/clean_cache.py index e117394bf6..2de81a4917 100644 --- a/pypeit/scripts/clean_cache.py +++ b/pypeit/scripts/clean_cache.py @@ -35,6 +35,7 @@ def main(args): import astropy.utils.data from pypeit import msgs + from pypeit import PypeItError from pypeit import cache if args.list: diff --git a/pypeit/scripts/coadd_1dspec.py b/pypeit/scripts/coadd_1dspec.py index a3b8ab178d..8489548754 100644 --- a/pypeit/scripts/coadd_1dspec.py +++ b/pypeit/scripts/coadd_1dspec.py @@ -14,6 +14,7 @@ from astropy.time import Time from pypeit import msgs +from pypeit import PypeItError from pypeit import inputfiles from pypeit import coadd1d from pypeit import inputfiles diff --git a/pypeit/scripts/coadd_datacube.py b/pypeit/scripts/coadd_datacube.py index a55f01537a..7220898acf 100644 --- a/pypeit/scripts/coadd_datacube.py +++ b/pypeit/scripts/coadd_datacube.py @@ -28,6 +28,7 @@ def main(args): import time from pypeit import msgs + from pypeit import PypeItError from pypeit import par from pypeit import inputfiles from pypeit import utils diff --git a/pypeit/scripts/collate_1d.py b/pypeit/scripts/collate_1d.py index 5ac8e59a23..8fbfc12a46 100644 --- a/pypeit/scripts/collate_1d.py +++ b/pypeit/scripts/collate_1d.py @@ -21,6 +21,7 @@ from pypeit.spectrographs.util import load_spectrograph from pypeit import coadd1d from pypeit import msgs +from pypeit import PypeItError from pypeit import par from pypeit.utils import is_float from pypeit.core import wave diff --git a/pypeit/scripts/compile_wvarxiv.py b/pypeit/scripts/compile_wvarxiv.py index c4c6b93384..138250f01e 100644 --- a/pypeit/scripts/compile_wvarxiv.py +++ b/pypeit/scripts/compile_wvarxiv.py @@ -7,6 +7,7 @@ """ import time from pypeit import msgs +from pypeit import PypeItError from pypeit import par from pypeit import inputfiles from pypeit import utils diff --git a/pypeit/scripts/extract_datacube.py b/pypeit/scripts/extract_datacube.py index ca4a849aff..59e8d19276 100644 --- a/pypeit/scripts/extract_datacube.py +++ b/pypeit/scripts/extract_datacube.py @@ -37,6 +37,7 @@ def main(args): import time from pypeit import msgs + from pypeit import PypeItError from pypeit import par from pypeit import inputfiles from pypeit import utils diff --git a/pypeit/scripts/flux_calib.py b/pypeit/scripts/flux_calib.py index 38be5218f4..c2c13694f4 100644 --- a/pypeit/scripts/flux_calib.py +++ b/pypeit/scripts/flux_calib.py @@ -9,6 +9,7 @@ from astropy.io import fits from pypeit import msgs +from pypeit import PypeItError from pypeit import inputfiles from pypeit.spectrographs.util import load_spectrograph from pypeit import fluxcalibrate diff --git a/pypeit/scripts/identify.py b/pypeit/scripts/identify.py index 9c3e8e03b2..9a5899ac33 100644 --- a/pypeit/scripts/identify.py +++ b/pypeit/scripts/identify.py @@ -61,6 +61,7 @@ def main(args): import numpy as np from pypeit import msgs + from pypeit import PypeItError from pypeit.spectrographs.util import load_spectrograph from pypeit.core.gui.identify import Identify from pypeit.wavecalib import BuildWaveCalib, WaveCalib diff --git a/pypeit/scripts/parse_slits.py b/pypeit/scripts/parse_slits.py index 546eaa05b3..b818d50c54 100644 --- a/pypeit/scripts/parse_slits.py +++ b/pypeit/scripts/parse_slits.py @@ -11,6 +11,7 @@ from pypeit import slittrace from pypeit import spec2dobj from pypeit import msgs +from pypeit import PypeItError from astropy.table import Table from astropy.io import fits diff --git a/pypeit/scripts/print_bpm.py b/pypeit/scripts/print_bpm.py index 70d0032f6d..96dbc38598 100644 --- a/pypeit/scripts/print_bpm.py +++ b/pypeit/scripts/print_bpm.py @@ -13,7 +13,7 @@ from pypeit import msgs, spec2dobj from pypeit.images.detector_container import DetectorContainer from pypeit.images.imagebitmask import ImageBitMask -from pypeit.pypmsgs import PypeItDataModelError +from pypeit import PypeItDataModelError from pypeit.scripts import scriptbase diff --git a/pypeit/scripts/ql.py b/pypeit/scripts/ql.py index 1d702c32ec..bdc866b70c 100644 --- a/pypeit/scripts/ql.py +++ b/pypeit/scripts/ql.py @@ -48,7 +48,7 @@ from astropy.table import Table -from pypeit.pypmsgs import PypeItError +from pypeit import PypeItError from pypeit import msgs from pypeit import pypeitsetup from pypeit import metadata diff --git a/pypeit/scripts/run_pypeit.py b/pypeit/scripts/run_pypeit.py index 8df6f41983..a6784d5d20 100644 --- a/pypeit/scripts/run_pypeit.py +++ b/pypeit/scripts/run_pypeit.py @@ -83,6 +83,7 @@ def main(args): from pypeit import pypeit from pypeit import msgs + from pypeit import PypeItError # Load options from command line splitnm = os.path.splitext(args.pypeit_file) diff --git a/pypeit/scripts/run_to_calibstep.py b/pypeit/scripts/run_to_calibstep.py index cd67f81a45..0f02df17cf 100644 --- a/pypeit/scripts/run_to_calibstep.py +++ b/pypeit/scripts/run_to_calibstep.py @@ -51,6 +51,7 @@ def main(args): from pypeit import pypeit from pypeit import msgs + from pypeit import PypeItError # Load options from command line _pypeit_file = Path(args.pypeit_file).absolute() diff --git a/pypeit/scripts/sensfunc.py b/pypeit/scripts/sensfunc.py index 8712cbfffa..c4becdf532 100644 --- a/pypeit/scripts/sensfunc.py +++ b/pypeit/scripts/sensfunc.py @@ -93,6 +93,7 @@ def main(args): import os from pypeit import msgs + from pypeit import PypeItError from pypeit import inputfiles from pypeit import io from pypeit.par import pypeitpar diff --git a/pypeit/scripts/setup_coadd2d.py b/pypeit/scripts/setup_coadd2d.py index b5d55d9ea6..98892370e4 100644 --- a/pypeit/scripts/setup_coadd2d.py +++ b/pypeit/scripts/setup_coadd2d.py @@ -93,6 +93,7 @@ def main(args): from astropy.table import Table from pypeit import msgs + from pypeit import PypeItError from pypeit import io from pypeit import utils from pypeit import inputfiles diff --git a/pypeit/scripts/show_1dspec.py b/pypeit/scripts/show_1dspec.py index fb46ae757f..7088f61957 100644 --- a/pypeit/scripts/show_1dspec.py +++ b/pypeit/scripts/show_1dspec.py @@ -47,6 +47,7 @@ def main(args): from pypeit import specobjs from pypeit import msgs + from pypeit import PypeItError sobjs = specobjs.SpecObjs.from_fitsfile(args.file, chk_version=False) diff --git a/pypeit/scripts/show_2dspec.py b/pypeit/scripts/show_2dspec.py index 09e3be433e..2c25b60578 100644 --- a/pypeit/scripts/show_2dspec.py +++ b/pypeit/scripts/show_2dspec.py @@ -15,12 +15,13 @@ from astropy.stats import sigma_clipped_stats from pypeit import msgs +from pypeit import PypeItError from pypeit import slittrace from pypeit import specobjs from pypeit import io from pypeit import utils from pypeit import __version__ -from pypeit.pypmsgs import PypeItDataModelError, PypeItBitMaskError +from pypeit import PypeItDataModelError, PypeItBitMaskError from pypeit.display import display from pypeit.images.imagebitmask import ImageBitMask diff --git a/pypeit/scripts/show_pixflat.py b/pypeit/scripts/show_pixflat.py index ff8267d7f4..732e83ad66 100644 --- a/pypeit/scripts/show_pixflat.py +++ b/pypeit/scripts/show_pixflat.py @@ -25,6 +25,7 @@ def get_parser(cls, width=None): def main(args): import numpy as np from pypeit import msgs + from pypeit import PypeItError from pypeit import io from pypeit.display import display from pypeit import dataPaths diff --git a/pypeit/scripts/skysub_regions.py b/pypeit/scripts/skysub_regions.py index d0e450d97f..1b21bd64ad 100644 --- a/pypeit/scripts/skysub_regions.py +++ b/pypeit/scripts/skysub_regions.py @@ -42,6 +42,7 @@ def main(args): import os import astropy.io.fits as fits from pypeit import msgs + from pypeit import PypeItError from pypeit import io from pypeit.core.gui.skysub_regions import SkySubGUI from pypeit.images import buildimage diff --git a/pypeit/scripts/tellfit.py b/pypeit/scripts/tellfit.py index 1d1e5f3096..c670cbd862 100644 --- a/pypeit/scripts/tellfit.py +++ b/pypeit/scripts/tellfit.py @@ -72,6 +72,7 @@ def main(args): from astropy.io import fits from pypeit import msgs + from pypeit import PypeItError from pypeit import dataPaths from pypeit.par import pypeitpar from pypeit.spectrographs.util import load_spectrograph diff --git a/pypeit/scripts/trace_edges.py b/pypeit/scripts/trace_edges.py index 7195d0d689..f7c6985711 100644 --- a/pypeit/scripts/trace_edges.py +++ b/pypeit/scripts/trace_edges.py @@ -66,6 +66,7 @@ def main(args): from pathlib import Path import numpy as np from pypeit import msgs + from pypeit import PypeItError from pypeit.spectrographs.util import load_spectrograph from pypeit import edgetrace from pypeit.pypeit import PypeIt diff --git a/pypeit/scripts/view_fits.py b/pypeit/scripts/view_fits.py index 3f51b1bbca..0301207d4e 100644 --- a/pypeit/scripts/view_fits.py +++ b/pypeit/scripts/view_fits.py @@ -49,6 +49,7 @@ def get_parser(cls, width=None): def main(args): from pypeit import msgs + from pypeit import PypeItError from pypeit.display import display from pypeit.spectrographs import util from pypeit import io diff --git a/pypeit/sensfilearchive.py b/pypeit/sensfilearchive.py index f49f2d2130..f340f01cc5 100644 --- a/pypeit/sensfilearchive.py +++ b/pypeit/sensfilearchive.py @@ -5,11 +5,11 @@ .. include:: ../include/links.rst """ from abc import ABC, abstractmethod -import os from astropy.io import fits from pypeit import msgs +from pypeit import PypeItError from pypeit import dataPaths class SensFileArchive(ABC): diff --git a/pypeit/sensfunc.py b/pypeit/sensfunc.py index 68f2d09a7d..1acdcaec1f 100644 --- a/pypeit/sensfunc.py +++ b/pypeit/sensfunc.py @@ -16,6 +16,7 @@ from astropy import table from pypeit import msgs +from pypeit import PypeItError from pypeit import specobjs from pypeit import specobj from pypeit import utils diff --git a/pypeit/slittrace.py b/pypeit/slittrace.py index b271a5a937..bbec5ba012 100644 --- a/pypeit/slittrace.py +++ b/pypeit/slittrace.py @@ -17,8 +17,9 @@ from astropy.stats import sigma_clipped_stats from astropy.io import fits -from pypeit.pypmsgs import PypeItBitMaskError +from pypeit import PypeItBitMaskError from pypeit import msgs +from pypeit import PypeItError from pypeit import datamodel from pypeit import calibframe from pypeit import specobj @@ -357,9 +358,10 @@ def from_hdu(cls, hdu, chk_version=True, **kwargs): hdr = hdu[parsed_hdus[0]].header if isinstance(hdu, fits.HDUList) else hdu.header hdr_bitmask = BitMask.from_header(hdr) if chk_version and hdr_bitmask.bits != self.bitmask.bits: - raise PypeItError('The bitmask in this fits file appear to be out of date! Recreate this ' - 'file by re-running the relevant script or set chk_version=False.', - cls='PypeItBitMaskError') + raise PypeItBitMaskError( + 'The bitmask in this fits file appear to be out of date! Recreate this file by ' + 're-running the relevant script or set chk_version=False.' + ) return self diff --git a/pypeit/spec2dobj.py b/pypeit/spec2dobj.py index 3095718414..113b967c7d 100644 --- a/pypeit/spec2dobj.py +++ b/pypeit/spec2dobj.py @@ -19,6 +19,7 @@ from astropy import table from pypeit import msgs +from pypeit import PypeItError from pypeit import io from pypeit import datamodel from pypeit import slittrace diff --git a/pypeit/specobj.py b/pypeit/specobj.py index 2a7fc0226b..320fd8e838 100644 --- a/pypeit/specobj.py +++ b/pypeit/specobj.py @@ -16,6 +16,7 @@ from linetools.spectra import xspectrum1d from pypeit import msgs +from pypeit import PypeItError from pypeit.core import flexure from pypeit.core import flux_calib from pypeit.core import parse diff --git a/pypeit/specobjs.py b/pypeit/specobjs.py index 4409c1f80a..0f68759b95 100644 --- a/pypeit/specobjs.py +++ b/pypeit/specobjs.py @@ -19,6 +19,7 @@ from astropy.time import Time from pypeit import msgs +from pypeit import PypeItError from pypeit import specobj from pypeit import io from pypeit.spectrographs.util import load_spectrograph diff --git a/pypeit/spectrographs/aat_uhrf.py b/pypeit/spectrographs/aat_uhrf.py index ad313b93cd..b1b529b6b5 100644 --- a/pypeit/spectrographs/aat_uhrf.py +++ b/pypeit/spectrographs/aat_uhrf.py @@ -12,6 +12,7 @@ from astropy.time import Time from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch from pypeit.spectrographs import spectrograph diff --git a/pypeit/spectrographs/apf_levy.py b/pypeit/spectrographs/apf_levy.py index 01da0b8065..22fcb7c564 100644 --- a/pypeit/spectrographs/apf_levy.py +++ b/pypeit/spectrographs/apf_levy.py @@ -11,6 +11,7 @@ from IPython import embed from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit import io from pypeit.core import framematch diff --git a/pypeit/spectrographs/bok_bc.py b/pypeit/spectrographs/bok_bc.py index bbfcbc195a..f7ed518e7d 100644 --- a/pypeit/spectrographs/bok_bc.py +++ b/pypeit/spectrographs/bok_bc.py @@ -8,6 +8,7 @@ from astropy.time import Time from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit import io from pypeit.core import framematch diff --git a/pypeit/spectrographs/gemini_flamingos.py b/pypeit/spectrographs/gemini_flamingos.py index d1481a4ce1..94c3c97ed6 100644 --- a/pypeit/spectrographs/gemini_flamingos.py +++ b/pypeit/spectrographs/gemini_flamingos.py @@ -6,6 +6,7 @@ import numpy as np from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch from pypeit.images import detector_container diff --git a/pypeit/spectrographs/gemini_gmos.py b/pypeit/spectrographs/gemini_gmos.py index af84467c0b..643d02c71f 100644 --- a/pypeit/spectrographs/gemini_gmos.py +++ b/pypeit/spectrographs/gemini_gmos.py @@ -14,6 +14,7 @@ from astropy.io import fits from pypeit import msgs +from pypeit import PypeItError from pypeit.spectrographs import spectrograph from pypeit import telescopes from pypeit import io diff --git a/pypeit/spectrographs/gemini_gnirs.py b/pypeit/spectrographs/gemini_gnirs.py index d296f72642..166cfd0133 100644 --- a/pypeit/spectrographs/gemini_gnirs.py +++ b/pypeit/spectrographs/gemini_gnirs.py @@ -9,6 +9,7 @@ from astropy.time import Time from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch, parse from pypeit.images import detector_container diff --git a/pypeit/spectrographs/gtc_osiris.py b/pypeit/spectrographs/gtc_osiris.py index a64585c845..f554e56962 100644 --- a/pypeit/spectrographs/gtc_osiris.py +++ b/pypeit/spectrographs/gtc_osiris.py @@ -6,6 +6,7 @@ import numpy as np from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit.core import parse from pypeit.core import framematch diff --git a/pypeit/spectrographs/jwst_nircam.py b/pypeit/spectrographs/jwst_nircam.py index bf0272fafc..61bc5ad2c6 100644 --- a/pypeit/spectrographs/jwst_nircam.py +++ b/pypeit/spectrographs/jwst_nircam.py @@ -9,6 +9,7 @@ from astropy.time import Time from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch from pypeit.par import pypeitpar diff --git a/pypeit/spectrographs/jwst_nirspec.py b/pypeit/spectrographs/jwst_nirspec.py index 0a539acc54..03327f8eff 100644 --- a/pypeit/spectrographs/jwst_nirspec.py +++ b/pypeit/spectrographs/jwst_nirspec.py @@ -6,6 +6,7 @@ import numpy as np from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit import utils from pypeit.core import framematch diff --git a/pypeit/spectrographs/keck_deimos.py b/pypeit/spectrographs/keck_deimos.py index ff371e7d8f..a0bef82679 100644 --- a/pypeit/spectrographs/keck_deimos.py +++ b/pypeit/spectrographs/keck_deimos.py @@ -23,6 +23,7 @@ import linetools from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit import io from pypeit.core import parse diff --git a/pypeit/spectrographs/keck_esi.py b/pypeit/spectrographs/keck_esi.py index f4509fb60a..72a3e1d80d 100644 --- a/pypeit/spectrographs/keck_esi.py +++ b/pypeit/spectrographs/keck_esi.py @@ -13,6 +13,7 @@ import datetime from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit import io from pypeit.core import framematch diff --git a/pypeit/spectrographs/keck_hires.py b/pypeit/spectrographs/keck_hires.py index 454aa5e8e0..39fb7907bc 100644 --- a/pypeit/spectrographs/keck_hires.py +++ b/pypeit/spectrographs/keck_hires.py @@ -17,6 +17,7 @@ from astropy import units from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit import io from pypeit.core import parse diff --git a/pypeit/spectrographs/keck_kcwi.py b/pypeit/spectrographs/keck_kcwi.py index 370d2cb601..0200250d09 100644 --- a/pypeit/spectrographs/keck_kcwi.py +++ b/pypeit/spectrographs/keck_kcwi.py @@ -16,6 +16,7 @@ from scipy.optimize import curve_fit from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit import utils from pypeit import io diff --git a/pypeit/spectrographs/keck_lris.py b/pypeit/spectrographs/keck_lris.py index 2117b6564e..4fc3d833b5 100644 --- a/pypeit/spectrographs/keck_lris.py +++ b/pypeit/spectrographs/keck_lris.py @@ -17,6 +17,7 @@ import linetools.utils from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit import utils from pypeit import io diff --git a/pypeit/spectrographs/keck_mosfire.py b/pypeit/spectrographs/keck_mosfire.py index 9f31ae096b..a3c8de180f 100644 --- a/pypeit/spectrographs/keck_mosfire.py +++ b/pypeit/spectrographs/keck_mosfire.py @@ -9,6 +9,7 @@ from astropy.io import fits from astropy.stats import sigma_clipped_stats from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch, meta from pypeit import utils diff --git a/pypeit/spectrographs/keck_nires.py b/pypeit/spectrographs/keck_nires.py index 9144ff6d8a..f116f1d86a 100644 --- a/pypeit/spectrographs/keck_nires.py +++ b/pypeit/spectrographs/keck_nires.py @@ -6,6 +6,7 @@ import numpy as np from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch from pypeit.spectrographs import spectrograph diff --git a/pypeit/spectrographs/keck_nirspec.py b/pypeit/spectrographs/keck_nirspec.py index c461edd477..19e0f8bf35 100644 --- a/pypeit/spectrographs/keck_nirspec.py +++ b/pypeit/spectrographs/keck_nirspec.py @@ -7,6 +7,7 @@ import numpy as np from pypeit import msgs +from pypeit import PypeItError from pypeit import io from pypeit.images import detector_container from pypeit import telescopes diff --git a/pypeit/spectrographs/lbt_luci.py b/pypeit/spectrographs/lbt_luci.py index 3a5a0d765b..a62567afc1 100644 --- a/pypeit/spectrographs/lbt_luci.py +++ b/pypeit/spectrographs/lbt_luci.py @@ -9,6 +9,7 @@ import numpy as np from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch from pypeit.spectrographs import spectrograph diff --git a/pypeit/spectrographs/lbt_mods.py b/pypeit/spectrographs/lbt_mods.py index 714e04a49c..82bbb93616 100644 --- a/pypeit/spectrographs/lbt_mods.py +++ b/pypeit/spectrographs/lbt_mods.py @@ -7,6 +7,7 @@ from astropy.io import fits from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit import utils from pypeit import io diff --git a/pypeit/spectrographs/ldt_deveny.py b/pypeit/spectrographs/ldt_deveny.py index bd99b7a0d5..ff0734c731 100644 --- a/pypeit/spectrographs/ldt_deveny.py +++ b/pypeit/spectrographs/ldt_deveny.py @@ -24,6 +24,7 @@ from astropy.time import Time from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch from pypeit.core import parse diff --git a/pypeit/spectrographs/magellan_fire.py b/pypeit/spectrographs/magellan_fire.py index c8e83da8a7..2d4f33a2d5 100644 --- a/pypeit/spectrographs/magellan_fire.py +++ b/pypeit/spectrographs/magellan_fire.py @@ -12,6 +12,7 @@ import numpy as np from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch from pypeit.spectrographs import spectrograph diff --git a/pypeit/spectrographs/magellan_mage.py b/pypeit/spectrographs/magellan_mage.py index 6b81620996..2e91a1951a 100644 --- a/pypeit/spectrographs/magellan_mage.py +++ b/pypeit/spectrographs/magellan_mage.py @@ -11,6 +11,7 @@ from astropy.time import Time from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit import io from pypeit.core import framematch diff --git a/pypeit/spectrographs/mdm_modspec.py b/pypeit/spectrographs/mdm_modspec.py index d0654ee6e2..aa986be7af 100644 --- a/pypeit/spectrographs/mdm_modspec.py +++ b/pypeit/spectrographs/mdm_modspec.py @@ -10,6 +10,7 @@ from astropy.time import Time from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch from pypeit.spectrographs import spectrograph diff --git a/pypeit/spectrographs/mdm_osmos.py b/pypeit/spectrographs/mdm_osmos.py index 14f81e2519..c1c817a578 100644 --- a/pypeit/spectrographs/mdm_osmos.py +++ b/pypeit/spectrographs/mdm_osmos.py @@ -6,6 +6,7 @@ import numpy as np from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch from pypeit.spectrographs import spectrograph diff --git a/pypeit/spectrographs/mmt_binospec.py b/pypeit/spectrographs/mmt_binospec.py index c6fdd78522..3c9d66488e 100644 --- a/pypeit/spectrographs/mmt_binospec.py +++ b/pypeit/spectrographs/mmt_binospec.py @@ -6,6 +6,7 @@ import numpy as np from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit import utils from pypeit import io diff --git a/pypeit/spectrographs/mmt_bluechannel.py b/pypeit/spectrographs/mmt_bluechannel.py index 6ecbb51c1c..d071c08279 100644 --- a/pypeit/spectrographs/mmt_bluechannel.py +++ b/pypeit/spectrographs/mmt_bluechannel.py @@ -8,6 +8,7 @@ from astropy.time import Time from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit import utils from pypeit.core import framematch diff --git a/pypeit/spectrographs/mmt_mmirs.py b/pypeit/spectrographs/mmt_mmirs.py index a04e9ed4d3..ff8dd2e3c0 100644 --- a/pypeit/spectrographs/mmt_mmirs.py +++ b/pypeit/spectrographs/mmt_mmirs.py @@ -11,6 +11,7 @@ from astropy.stats import sigma_clipped_stats from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit import utils from pypeit import io diff --git a/pypeit/spectrographs/not_alfosc.py b/pypeit/spectrographs/not_alfosc.py index a822acd53b..3ac39ee04e 100644 --- a/pypeit/spectrographs/not_alfosc.py +++ b/pypeit/spectrographs/not_alfosc.py @@ -10,6 +10,7 @@ from astropy.time import Time from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch from pypeit.spectrographs import spectrograph diff --git a/pypeit/spectrographs/ntt_efosc2.py b/pypeit/spectrographs/ntt_efosc2.py index ace9a1637d..00542e2dde 100644 --- a/pypeit/spectrographs/ntt_efosc2.py +++ b/pypeit/spectrographs/ntt_efosc2.py @@ -6,6 +6,7 @@ import numpy as np from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit.core import parse from pypeit.core import framematch diff --git a/pypeit/spectrographs/opticalmodel.py b/pypeit/spectrographs/opticalmodel.py index 4f319aafb0..13a2c832c1 100644 --- a/pypeit/spectrographs/opticalmodel.py +++ b/pypeit/spectrographs/opticalmodel.py @@ -5,9 +5,13 @@ """ import warnings -from pypeit import msgs + import numpy import scipy + +from pypeit import msgs +from pypeit import PypeItError + # ---------------------------------------------------------------------- # General class for a reflection grating class ReflectionGrating: diff --git a/pypeit/spectrographs/p200_dbsp.py b/pypeit/spectrographs/p200_dbsp.py index ffb6025f3a..b5d3369d58 100644 --- a/pypeit/spectrographs/p200_dbsp.py +++ b/pypeit/spectrographs/p200_dbsp.py @@ -13,6 +13,7 @@ from astropy.time import Time from pypeit import msgs +from pypeit import PypeItError from pypeit import io from pypeit import telescopes from pypeit.core import framematch diff --git a/pypeit/spectrographs/p200_ngps.py b/pypeit/spectrographs/p200_ngps.py index 6d9390c3f1..8425165812 100644 --- a/pypeit/spectrographs/p200_ngps.py +++ b/pypeit/spectrographs/p200_ngps.py @@ -11,6 +11,7 @@ from astropy.time import Time from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch from pypeit.spectrographs import spectrograph diff --git a/pypeit/spectrographs/p200_tspec.py b/pypeit/spectrographs/p200_tspec.py index 16b2f23eb4..ee275717ee 100644 --- a/pypeit/spectrographs/p200_tspec.py +++ b/pypeit/spectrographs/p200_tspec.py @@ -8,6 +8,7 @@ from astropy.time import Time from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch from pypeit.spectrographs import spectrograph diff --git a/pypeit/spectrographs/shane_kast.py b/pypeit/spectrographs/shane_kast.py index cfbd1dbef3..c2a30a6d58 100644 --- a/pypeit/spectrographs/shane_kast.py +++ b/pypeit/spectrographs/shane_kast.py @@ -11,6 +11,7 @@ from astropy.time import Time from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch from pypeit.spectrographs import spectrograph diff --git a/pypeit/spectrographs/soar_goodman.py b/pypeit/spectrographs/soar_goodman.py index 2d4bc93b5c..a7abb1791e 100644 --- a/pypeit/spectrographs/soar_goodman.py +++ b/pypeit/spectrographs/soar_goodman.py @@ -9,6 +9,7 @@ from astropy import units from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit import io from pypeit.core import flux_calib diff --git a/pypeit/spectrographs/spectrograph.py b/pypeit/spectrographs/spectrograph.py index 3940c03ded..4dda7cb52e 100644 --- a/pypeit/spectrographs/spectrograph.py +++ b/pypeit/spectrographs/spectrograph.py @@ -36,6 +36,7 @@ from astropy import units from pypeit import msgs +from pypeit import PypeItError from pypeit import io from pypeit.core import parse from pypeit.core import procimg diff --git a/pypeit/spectrographs/subaru_focas.py b/pypeit/spectrographs/subaru_focas.py index 7a479d8f66..074677d790 100644 --- a/pypeit/spectrographs/subaru_focas.py +++ b/pypeit/spectrographs/subaru_focas.py @@ -5,6 +5,7 @@ """ import numpy as np from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit.core import parse from pypeit.core import framematch diff --git a/pypeit/spectrographs/tng_dolores.py b/pypeit/spectrographs/tng_dolores.py index b6abf6c7cd..fb35daef82 100644 --- a/pypeit/spectrographs/tng_dolores.py +++ b/pypeit/spectrographs/tng_dolores.py @@ -9,6 +9,7 @@ from astropy.time import Time from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch from pypeit.spectrographs import spectrograph diff --git a/pypeit/spectrographs/util.py b/pypeit/spectrographs/util.py index 7cf45e4891..59fb53e229 100644 --- a/pypeit/spectrographs/util.py +++ b/pypeit/spectrographs/util.py @@ -11,6 +11,7 @@ from pypeit import spectrographs from pypeit import msgs +from pypeit import PypeItError def load_spectrograph(spec): diff --git a/pypeit/spectrographs/vlt_fors.py b/pypeit/spectrographs/vlt_fors.py index a7f1f16afd..f6a7ccf800 100644 --- a/pypeit/spectrographs/vlt_fors.py +++ b/pypeit/spectrographs/vlt_fors.py @@ -5,6 +5,7 @@ """ import numpy as np from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit.core import parse from pypeit.core import framematch diff --git a/pypeit/spectrographs/vlt_sinfoni.py b/pypeit/spectrographs/vlt_sinfoni.py index 126fd1e2c2..24fcedf94c 100644 --- a/pypeit/spectrographs/vlt_sinfoni.py +++ b/pypeit/spectrographs/vlt_sinfoni.py @@ -9,6 +9,7 @@ import numpy as np from astropy.io import fits from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch from pypeit.spectrographs import spectrograph diff --git a/pypeit/spectrographs/vlt_xshooter.py b/pypeit/spectrographs/vlt_xshooter.py index c82e427fcf..0bf708cd78 100644 --- a/pypeit/spectrographs/vlt_xshooter.py +++ b/pypeit/spectrographs/vlt_xshooter.py @@ -9,6 +9,7 @@ from astropy import units from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit import io from pypeit.core import parse diff --git a/pypeit/spectrographs/wht_isis.py b/pypeit/spectrographs/wht_isis.py index cb1c8765b1..89c832c8bc 100644 --- a/pypeit/spectrographs/wht_isis.py +++ b/pypeit/spectrographs/wht_isis.py @@ -6,6 +6,7 @@ import numpy as np from pypeit import msgs +from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch from pypeit.spectrographs import spectrograph diff --git a/pypeit/specutils/pypeit_loaders.py b/pypeit/specutils/pypeit_loaders.py index eebf118a3f..48150071b1 100644 --- a/pypeit/specutils/pypeit_loaders.py +++ b/pypeit/specutils/pypeit_loaders.py @@ -32,7 +32,7 @@ 'option to use the pypeit.specutils module.') from pypeit import __version__ -from pypeit.pypmsgs import PypeItError +from pypeit import PypeItError from pypeit import msgs from pypeit import specobjs from pypeit import onespec diff --git a/pypeit/tests/test_calibframe.py b/pypeit/tests/test_calibframe.py index fbdf307431..beb5bcb863 100644 --- a/pypeit/tests/test_calibframe.py +++ b/pypeit/tests/test_calibframe.py @@ -9,7 +9,7 @@ import pytest -from pypeit.pypmsgs import PypeItError +from pypeit import PypeItError from pypeit.calibframe import CalibFrame from pypeit import io from pypeit.tests.tstutils import data_output_path diff --git a/pypeit/tests/test_coadd.py b/pypeit/tests/test_coadd.py index c0bc82abc1..8f455f1071 100644 --- a/pypeit/tests/test_coadd.py +++ b/pypeit/tests/test_coadd.py @@ -3,7 +3,7 @@ import numpy as np import pytest -from pypeit.pypmsgs import PypeItError +from pypeit import PypeItError from pypeit.core import coadd from pypeit.core import flux_calib from pypeit.core import meta diff --git a/pypeit/tests/test_collate_1d.py b/pypeit/tests/test_collate_1d.py index e735abafad..3919c0e006 100644 --- a/pypeit/tests/test_collate_1d.py +++ b/pypeit/tests/test_collate_1d.py @@ -16,7 +16,7 @@ from pypeit.spectrographs.util import load_spectrograph from pypeit.sensfilearchive import SensFileArchive from pypeit.par import pypeitpar -from pypeit.pypmsgs import PypeItError +from pypeit import PypeItError from pypeit.images.detector_container import DetectorContainer from pypeit import fluxcalibrate from pypeit import coadd1d diff --git a/pypeit/tests/test_flux.py b/pypeit/tests/test_flux.py index 802a99b1a4..0020dfb229 100644 --- a/pypeit/tests/test_flux.py +++ b/pypeit/tests/test_flux.py @@ -14,7 +14,7 @@ from pypeit.par.pypeitpar import Coadd1DPar -from pypeit.pypmsgs import PypeItError +from pypeit import PypeItError diff --git a/pypeit/tests/test_fluxspec.py b/pypeit/tests/test_fluxspec.py index a51b3aed00..a264c88b99 100644 --- a/pypeit/tests/test_fluxspec.py +++ b/pypeit/tests/test_fluxspec.py @@ -25,7 +25,7 @@ from pypeit import inputfiles from pypeit import fluxcalibrate -from pypeit.pypmsgs import PypeItError +from pypeit import PypeItError from pypeit import scripts def test_input_flux_file(): diff --git a/pypeit/tests/test_match.py b/pypeit/tests/test_match.py index efe2ba01b1..0c873cf6b5 100644 --- a/pypeit/tests/test_match.py +++ b/pypeit/tests/test_match.py @@ -10,7 +10,7 @@ from pypeit.calibframe import CalibFrame from pypeit.core import framematch from pypeit.tests.tstutils import dummy_fitstbl -from pypeit.pypmsgs import PypeItError +from pypeit import PypeItError @pytest.fixture diff --git a/pypeit/tests/test_mosaic.py b/pypeit/tests/test_mosaic.py index 9079be9925..675bc961d1 100644 --- a/pypeit/tests/test_mosaic.py +++ b/pypeit/tests/test_mosaic.py @@ -5,7 +5,7 @@ from astropy.io import fits -from pypeit.pypmsgs import PypeItDataModelError +from pypeit import PypeItDataModelError from pypeit.tests.tstutils import data_output_path from pypeit.images.mosaic import Mosaic from pypeit.spectrographs.util import load_spectrograph diff --git a/pypeit/tests/test_msgs.py b/pypeit/tests/test_msgs.py deleted file mode 100644 index 474ec7c2f0..0000000000 --- a/pypeit/tests/test_msgs.py +++ /dev/null @@ -1,34 +0,0 @@ -""" -Module to run tests on armsgs -""" -import numpy as np -import pytest - -from pypeit import pypmsgs - -def test_log_write(): - - outfil = 'tst.log' - msgs = pypmsgs.Messages(outfil, verbosity=1) - msgs.close() - # Insure scipy, numpy, astropy are being version - with open(outfil, 'r') as f: - lines = f.readlines() - pckgs = ['scipy', 'numpy', 'astropy'] - flgs = [False]*len(pckgs) - for line in lines: - for jj,pckg in enumerate(pckgs): - if pckg in line: - flgs[jj] = True - for flg in flgs: - assert flg is True - - -def test_msgs(): - msgs = pypmsgs.Messages(None, verbosity=1) - msgs.info("test 123") - msgs.warning("test 123") - msgs.bug("test 123") - msgs.work("test 123") - msgs.close() - diff --git a/pypeit/tests/test_parse.py b/pypeit/tests/test_parse.py index 67bbacb6ec..f18350ac75 100644 --- a/pypeit/tests/test_parse.py +++ b/pypeit/tests/test_parse.py @@ -7,7 +7,7 @@ import numpy as np -from pypeit.pypmsgs import PypeItError +from pypeit import PypeItError from pypeit.core import parse from pypeit.spectrographs.util import load_spectrograph diff --git a/pypeit/tests/test_pkgdata.py b/pypeit/tests/test_pkgdata.py index ab712bdf66..8acc148eb4 100644 --- a/pypeit/tests/test_pkgdata.py +++ b/pypeit/tests/test_pkgdata.py @@ -14,7 +14,7 @@ from linetools.spectra import xspectrum1d -from pypeit.pypmsgs import PypeItPathError +from pypeit import PypeItPathError from pypeit.pypeitdata import PypeItDataPath from pypeit import dataPaths from pypeit import io diff --git a/pypeit/tests/test_pydl.py b/pypeit/tests/test_pydl.py deleted file mode 100644 index 2b0826952f..0000000000 --- a/pypeit/tests/test_pydl.py +++ /dev/null @@ -1,12 +0,0 @@ -""" -Module to run tests on pyidl functions -""" - -from pypeit.core import pydl -import pytest - -# TODO: JFH -- any suggestions?? - -def test_pydl(): - assert True - diff --git a/pypeit/tests/test_pypeitimage.py b/pypeit/tests/test_pypeitimage.py index 69a4d21305..af25859211 100644 --- a/pypeit/tests/test_pypeitimage.py +++ b/pypeit/tests/test_pypeitimage.py @@ -11,7 +11,7 @@ from astropy.io import fits -from pypeit.pypmsgs import PypeItError +from pypeit import PypeItError from pypeit.images import pypeitimage from pypeit.images import imagebitmask from pypeit.tests.tstutils import data_output_path diff --git a/pypeit/tests/test_sensfilearchive.py b/pypeit/tests/test_sensfilearchive.py index 4eff60e070..058ede92b6 100644 --- a/pypeit/tests/test_sensfilearchive.py +++ b/pypeit/tests/test_sensfilearchive.py @@ -8,7 +8,7 @@ from pypeit.sensfilearchive import SensFileArchive from astropy.io import fits -from pypeit.pypmsgs import PypeItError +from pypeit import PypeItError def test_getinstance(): # Test success diff --git a/pypeit/tests/test_spec2dobj.py b/pypeit/tests/test_spec2dobj.py index 2a639153fe..b34c81fe32 100644 --- a/pypeit/tests/test_spec2dobj.py +++ b/pypeit/tests/test_spec2dobj.py @@ -17,7 +17,7 @@ from pypeit.spectrographs.util import load_spectrograph from pypeit.tests import tstutils from pypeit import slittrace -from pypeit import pypmsgs +from pypeit import PypeItError from pypeit.images import imagebitmask @@ -108,7 +108,7 @@ def test_spec2dobj_update_slit(init_dict): # code itself! # Checks - with pytest.raises(pypmsgs.PypeItError): + with pytest.raises(PypeItError): spec2DObj1.update_slits(spec2DObj2) # Update diff --git a/pypeit/tests/test_spectrographs.py b/pypeit/tests/test_spectrographs.py index d286bb9e2d..c21d812242 100644 --- a/pypeit/tests/test_spectrographs.py +++ b/pypeit/tests/test_spectrographs.py @@ -9,7 +9,7 @@ import pytest from pypeit import dataPaths -from pypeit.pypmsgs import PypeItError +from pypeit import PypeItError from pypeit import spectrographs from pypeit.spectrographs.util import load_spectrograph from pypeit import pypeitsetup diff --git a/pypeit/tests/test_specutils.py b/pypeit/tests/test_specutils.py index 1cff725988..bc52b0b50d 100644 --- a/pypeit/tests/test_specutils.py +++ b/pypeit/tests/test_specutils.py @@ -14,7 +14,7 @@ from pypeit import specobjs from pypeit.specutils import Spectrum, SpectrumList from pypeit.tests import tstutils -from pypeit.pypmsgs import PypeItError +from pypeit import PypeItError import pytest specutils_required = pytest.mark.skipif(Spectrum is None or SpectrumList is None, diff --git a/pypeit/tracepca.py b/pypeit/tracepca.py index 06ea4f2ec6..758b8c3b99 100644 --- a/pypeit/tracepca.py +++ b/pypeit/tracepca.py @@ -14,6 +14,7 @@ from astropy.io import fits from pypeit import msgs +from pypeit import PypeItError from pypeit import utils from pypeit.io import hdu_iter_by_ext from pypeit.core import trace diff --git a/pypeit/utils.py b/pypeit/utils.py index 5e1f457a0b..836a7ec866 100644 --- a/pypeit/utils.py +++ b/pypeit/utils.py @@ -30,6 +30,7 @@ from astropy.io import ascii from pypeit import msgs +from pypeit import PypeItError from pypeit.move_median import move_median from pypeit import dataPaths diff --git a/pypeit/wavecalib.py b/pypeit/wavecalib.py index e92d23b2bb..f46aefec08 100644 --- a/pypeit/wavecalib.py +++ b/pypeit/wavecalib.py @@ -15,6 +15,7 @@ from astropy.io import fits from pypeit import msgs +from pypeit import PypeItError from pypeit.core import arc, qa from pypeit.core import fitting from pypeit.core import parse diff --git a/pypeit/wavetilts.py b/pypeit/wavetilts.py index eebe19e78e..6c2565d4b1 100644 --- a/pypeit/wavetilts.py +++ b/pypeit/wavetilts.py @@ -20,6 +20,7 @@ from astropy import table from pypeit import msgs, datamodel, utils +from pypeit import PypeItError from pypeit import calibframe from pypeit import slittrace, wavecalib from pypeit.display import display @@ -1011,7 +1012,7 @@ def show_tilts_mpl(tilt_img, tilt_traces, show_traces=False, left_edges=None, """ if tilt_traces is None: - return raise PypeItError('No tilts have been traced or fitted') + raise PypeItError('No tilts have been traced or fitted') if cut is None: cut = utils.growth_lim(tilt_img, 0.98, fac=1) From 487732742c22433e7407d65c8f123bd3283dd9f3 Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Wed, 8 Oct 2025 15:44:29 -0700 Subject: [PATCH 05/33] newline --- pypeit/coadd2d.py | 12 ++--- pypeit/coadd3d.py | 99 ++++++++++++++++++++-------------- pypeit/core/coadd.py | 21 ++++---- pypeit/core/datacube.py | 46 +++++++++------- pypeit/core/extract.py | 12 +++-- pypeit/core/findobj_skymask.py | 6 ++- pypeit/core/fitting.py | 12 +++-- pypeit/core/flat.py | 14 ++--- pypeit/core/flexure.py | 24 +++++---- pypeit/core/flux_calib.py | 18 +++---- pypeit/edgetrace.py | 15 +++--- pypeit/flatfield.py | 60 ++++++++++++--------- pypeit/inputfiles.py | 2 +- pypeit/metadata.py | 6 +-- pypeit/pypeit.py | 14 ++--- pypeit/specobj.py | 13 ++--- pypeit/specobjs.py | 2 +- 17 files changed, 216 insertions(+), 160 deletions(-) diff --git a/pypeit/coadd2d.py b/pypeit/coadd2d.py index 072606ae77..1654985c06 100644 --- a/pypeit/coadd2d.py +++ b/pypeit/coadd2d.py @@ -1948,14 +1948,14 @@ def snr_report(self, snr_bar): """ # Print out a report on the SNR - msg_string = msgs.newline() + '-------------------------------------' - msg_string += msgs.newline() + ' Summary for highest S/N object' - msg_string += msgs.newline() + '-------------------------------------' - msg_string += msgs.newline() + ' exp# S/N' + msg_string = '\n-------------------------------------' + msg_string += '\n Summary for highest S/N object' + msg_string += '\n-------------------------------------' + msg_string += '\n exp# S/N' for iexp, snr in enumerate(snr_bar): - msg_string += msgs.newline() + ' {:d} {:5.2f}'.format(iexp, snr) + msg_string += '\n {:d} {:5.2f}'.format(iexp, snr) - msg_string += msgs.newline() + '-------------------------------------' + msg_string += '\n-------------------------------------' msgs.info(msg_string) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 2939e5d3d8..7432263937 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -291,12 +291,14 @@ def __init__(self, airmass, parangle, pressure, temperature, humidity, cosdec, w self.humidity, self.wave_ref.to_value(units.micron)) # Print out the DAR parameters - msgs.info("DAR correction parameters:" + msgs.newline() + - " Airmass = {0:.2f}".format(self.airmass) + msgs.newline() + - " Pressure = {0:.2f} mbar".format(self.pressure.to_value(units.mbar)) + msgs.newline() + - " Humidity = {0:.2f} %".format(self.humidity*100.0) + msgs.newline() + - " Temperature = {0:.2f} deg C".format(self.temperature.to_value(units.deg_C)) + msgs.newline() + - " Reference wavelength = {0:.2f} Angstrom".format(self.wave_ref.to_value(units.Angstrom))) + msgs.info( + "DAR correction parameters:\n" + f" Airmass = {self.airmass:.2f}\n" + f" Pressure = {self.pressure.to_value(units.mbar):.2f} mbar\n" + f" Humidity = {self.humidity*100.0:.2f} %\n" + f" Temperature = {self.temperature.to_value(units.deg_C):.2f} deg C\n" + f" Reference wavelength = {self.wave_ref.to_value(units.Angstrom):.2f} Angstrom" + ) def calculate_dispersion(self, waves): """ Calculate the total atmospheric dispersion relative to the reference wavelength @@ -531,10 +533,12 @@ def __init__(self, spec2dfiles, par, skysub_frame=None, sensfile=None, scale_cor if self.method == "subpixel": self.spec_subpixel, self.spat_subpixel, self.slice_subpixel = self.cubepar['spec_subpixel'], self.cubepar['spat_subpixel'], self.cubepar['slice_subpixel'] self.skip_subpix_weights = False - msgs.info("Adopting the subpixel algorithm to generate the datacube, with subpixellation scales:" + msgs.newline() + - f" Spectral: {self.spec_subpixel}" + msgs.newline() + - f" Spatial: {self.spat_subpixel}" + msgs.newline() + - f" Slices: {self.slice_subpixel}") + msgs.info( + "Adopting the subpixel algorithm to generate the datacube, with subpixellation scales:\n" + f" Spectral: {self.spec_subpixel}\n" + f" Spatial: {self.spat_subpixel}\n" + f" Slices: {self.slice_subpixel}" + ) elif self.method == "ngp": msgs.info("Adopting the nearest grid point (NGP) algorithm to generate the datacube.") self.skip_subpix_weights = True @@ -558,7 +562,9 @@ def __init__(self, spec2dfiles, par, skysub_frame=None, sensfile=None, scale_cor # If a reference image has been set, check that it exists if self.cubepar['reference_image'] is not None: if not os.path.exists(self.cubepar['reference_image']): - raise PypeItError("Reference image does not exist:" + msgs.newline() + self.cubepar['reference_image']) + raise PypeItError( + "Reference image does not exist:\n" + self.cubepar['reference_image'] + ) # Load the default scaleimg frame for the scale correction self.scalecorr_default = "none" @@ -580,26 +586,26 @@ def check_outputs(self): outfile = datacube.get_output_filename("", self.cubepar['output_filename'], self.combine) out_whitelight = datacube.get_output_whitelight_filename(outfile) if os.path.exists(outfile) and not self.overwrite: - raise PypeItError("Output filename already exists:"+msgs.newline()+outfile) + raise PypeItError("Output filename already exists:\n"+outfile) if os.path.exists(out_whitelight) and self.cubepar['save_whitelight'] and not self.overwrite: - raise PypeItError("Output filename already exists:"+msgs.newline()+out_whitelight) + raise PypeItError("Output filename already exists:\n"+out_whitelight) else: # Finally, if there's just one file, check if the output filename is given if self.numfiles == 1 and self.cubepar['output_filename'] != "": outfile = datacube.get_output_filename("", self.cubepar['output_filename'], True, -1) out_whitelight = datacube.get_output_whitelight_filename(outfile) if os.path.exists(outfile) and not self.overwrite: - raise PypeItError("Output filename already exists:" + msgs.newline() + outfile) + raise PypeItError("Output filename already exists:\n" + outfile) if os.path.exists(out_whitelight) and self.cubepar['save_whitelight'] and not self.overwrite: - raise PypeItError("Output filename already exists:" + msgs.newline() + out_whitelight) + raise PypeItError("Output filename already exists:\n" + out_whitelight) else: for ff in range(self.numfiles): outfile = datacube.get_output_filename(self.spec2d[ff], self.cubepar['output_filename'], self.combine, ff+1) out_whitelight = datacube.get_output_whitelight_filename(outfile) if os.path.exists(outfile) and not self.overwrite: - raise PypeItError("Output filename already exists:" + msgs.newline() + outfile) + raise PypeItError("Output filename already exists:\n" + outfile) if os.path.exists(out_whitelight) and self.cubepar['save_whitelight'] and not self.overwrite: - raise PypeItError("Output filename already exists:" + msgs.newline() + out_whitelight) + raise PypeItError("Output filename already exists:\n" + out_whitelight) def set_blaze_spline(self, wave_spl, spec_spl): """ @@ -629,18 +635,21 @@ def set_default_scalecorr(self): msgs.info("The default relative spectral illumination correction will use the science image") self.scalecorr_default = "image" else: - msgs.info("Loading default scale image for relative spectral illumination correction:" + - msgs.newline() + self.cubepar['scale_corr']) + msgs.info( + "Loading default scale image for relative spectral illumination correction:\n" + +self.cubepar['scale_corr'] + ) try: spec2DObj = spec2dobj.Spec2DObj.from_file(self.cubepar['scale_corr'], self.detname, chk_version=self.chk_version) except Exception as e: msgs.warning(f'Loading spec2d file raised {type(e).__name__}:\n{str(e)}') - msgs.warning("Could not load scaleimg from spec2d file:" + msgs.newline() + - self.cubepar['scale_corr'] + msgs.newline() + - "scale correction will not be performed unless you have specified the correct" + msgs.newline() + - "scale_corr file in the spec2d block") + msgs.warning( + "Could not load scaleimg from spec2d file:\n" + + self.cubepar['scale_corr'] + + "\nscale correction will not be performed unless you have specified the " + "correct\nscale_corr file in the spec2d block") self.cubepar['scale_corr'] = None self.scalecorr_default = "none" else: @@ -692,22 +701,26 @@ def get_current_scalecorr(self, spec2DObj, scalecorr=None): this_scalecorr = "none" # Don't do relative spectral illumination scaling else: # Load a user specified frame for sky subtraction - msgs.info("Loading the following frame for the relative spectral illumination correction:" + - msgs.newline() + scalecorr) + msgs.info( + "Loading the following frame for the relative spectral illumination " + "correction:\n" + scalecorr + ) try: spec2DObj_scl = spec2dobj.Spec2DObj.from_file(scalecorr, self.detname, chk_version=self.chk_version) except Exception as e: msgs.warning(f'Loading spec2d file raised {type(e).__name__}:\n{str(e)}') - raise PypeItError("Could not load skysub image from spec2d file:" + msgs.newline() + scalecorr) + raise PypeItError("Could not load skysub image from spec2d file:\n" + scalecorr) else: relScaleImg = spec2DObj_scl.scaleimg this_scalecorr = scalecorr if this_scalecorr == "none": msgs.info("Relative spectral illumination correction will not be performed.") else: - msgs.info("Using the following frame for the relative spectral illumination correction:" + - msgs.newline() + this_scalecorr) + msgs.info( + "Using the following frame for the relative spectral illumination correction:\n" + + this_scalecorr + ) # Return the scaling correction for this frame return this_scalecorr, relScaleImg @@ -720,19 +733,20 @@ def set_default_skysub(self): self.skyImgDef = np.array([0.0]) # Do not perform sky subtraction self.skySclDef = np.array([0.0]) # Do not perform sky subtraction elif self.cubepar['skysub_frame'] == "image": - msgs.info("The sky model in the spec2d science frames will be used for sky subtraction" + msgs.newline() + - "(unless specific skysub frames have been specified)") + msgs.info("The sky model in the spec2d science frames will be used for sky " + "subtraction\n(unless specific skysub frames have been specified)") self.skysub_default = "image" else: - msgs.info("Loading default image for sky subtraction:" + - msgs.newline() + self.cubepar['skysub_frame']) + msgs.info("Loading default image for sky subtraction:\n" + + self.cubepar['skysub_frame']) try: spec2DObj = spec2dobj.Spec2DObj.from_file(self.cubepar['skysub_frame'], self.detname, chk_version=self.chk_version) skysub_exptime = self.spec.get_meta_value([spec2DObj.head0], 'exptime') except: - raise PypeItError("Could not load skysub image from spec2d file:" + msgs.newline() + self.cubepar['skysub_frame']) + raise PypeItError("Could not load skysub image from spec2d file:\n" + + self.cubepar['skysub_frame']) else: self.skysub_default = self.cubepar['skysub_frame'] self.skyImgDef = spec2DObj.sciimg / skysub_exptime # Sky counts/second @@ -797,20 +811,22 @@ def get_current_skysub(self, spec2DObj, exptime, opts_skysub=None): this_skysub = "none" # Don't do sky subtraction else: # Load a user specified frame for sky subtraction - msgs.info("Loading skysub frame:" + msgs.newline() + opts_skysub) + msgs.info("Loading skysub frame:\n" + opts_skysub) try: spec2DObj_sky = spec2dobj.Spec2DObj.from_file(opts_skysub, self.detname, chk_version=self.chk_version) skysub_exptime = self.spec.get_meta_value([spec2DObj_sky.head0], 'exptime') except: - raise PypeItError("Could not load skysub image from spec2d file:" + msgs.newline() + opts_skysub) + raise PypeItError( + "Could not load skysub image from spec2d file:\n" + opts_skysub + ) skyImg = spec2DObj_sky.sciimg * exptime / skysub_exptime # Sky counts skyScl = spec2DObj_sky.scaleimg this_skysub = opts_skysub # User specified spec2d for sky subtraction if this_skysub == "none": msgs.info("Sky subtraction will not be performed.") else: - msgs.info("Using the following frame for sky subtraction:" + msgs.newline() + this_skysub) + msgs.info("Using the following frame for sky subtraction:\n" + this_skysub) # Return the skysub params for this frame return this_skysub, skyImg, skyScl @@ -834,7 +850,9 @@ def add_grating_corr(self, flatfile, waveimg, slits, spat_flexure=None): """ # Check if the Flat file exists if not os.path.exists(flatfile): - msgs.warning("Grating correction requested, but the following file does not exist:" + msgs.newline() + flatfile) + msgs.warning( + "Grating correction requested, but the following file does not exist:\n" + flatfile + ) return if flatfile not in self.flat_splines.keys(): msgs.info("Calculating relative sensitivity for grating correction") @@ -999,7 +1017,7 @@ def load(self): # Load all spec2d files and prepare the data for making a datacube for ff, fil in enumerate(self.spec2d): # Load it up - msgs.info(f"Loading PypeIt spec2d frame ({ff+1}/{len(self.spec2d)}):" + msgs.newline() + fil) + msgs.info(f"Loading PypeIt spec2d frame ({ff+1}/{len(self.spec2d)}):\n" + fil) spec2DObj = spec2dobj.Spec2DObj.from_file(fil, self.detname, chk_version=self.chk_version) detector = spec2DObj.detector @@ -1322,8 +1340,9 @@ def run_align(self): # Convert pixel shift to degrees shift ra_shift *= self._dspat/cosdec dec_shift *= self._dspat - msgs.info("Spatial shift of cube #{0:d}:".format(ff + 1) + msgs.newline() + - "RA, DEC (arcsec) = {0:+0.3f} E, {1:+0.3f} N".format(ra_shift*3600.0, dec_shift*3600.0)) + msgs.info(f"Spatial shift of cube #{ff + 1:d}:\n" + f"RA, DEC (arcsec) = {ra_shift*3600.0:+0.3f} E, {dec_shift*3600.0:+0.3f} N" + ) # Store the shift in the RA and DEC offsets in degrees ra_offsets[ff] += ra_shift dec_offsets[ff] += dec_shift diff --git a/pypeit/core/coadd.py b/pypeit/core/coadd.py index 2e3aa2d525..426cb40f6a 100644 --- a/pypeit/core/coadd.py +++ b/pypeit/core/coadd.py @@ -117,14 +117,17 @@ def renormalize_errors(chi, mask, clip=6.0, max_corr=5.0, title = '', debug=Fals chi2_sigrej = np.percentile(chi2[maskchi], 100.0*gauss_prob) sigma_corr = np.sqrt(chi2_sigrej) if sigma_corr < 1.0: - msgs.warning("Error renormalization found correction factor sigma_corr = {:f}".format(sigma_corr) + - " < 1." + msgs.newline() + - " Errors are overestimated so not applying correction") + msgs.warning( + f"Error renormalization found correction factor sigma_corr = {sigma_corr} < 1.\n" + "Errors are overestimated so not applying correction." + ) sigma_corr = 1.0 if sigma_corr > max_corr: - msgs.warning(("Error renormalization found sigma_corr/sigma = {:f} > {:f}." + msgs.newline() + - "Errors are severely underestimated." + msgs.newline() + - "Setting correction to sigma_corr = {:4.2f}").format(sigma_corr, max_corr, max_corr)) + msgs.warning( + f"Error renormalization found sigma_corr/sigma = {sigma_corr} > {max_corr}.\n" + "Errors are severely underestimated.\nSetting correction to sigma_corr = " + f"{max_corr:4.2f}" + ) sigma_corr = max_corr if debug: @@ -1027,13 +1030,13 @@ def robust_median_ratio( else: if (np.sum(calc_mask) <= min_good*nspec): msgs.warning( - f'Found only {np.sum(calc_mask)} good pixels for computing median flux ratio.' - + msgs.newline() + 'No median rescaling applied' + f'Found only {np.sum(calc_mask)} good pixels for computing median flux ratio.\n' + 'No median rescaling applied' ) if (snr_resc_med <= snr_do_not_rescale): msgs.warning( f'Median flux ratio of pixels in reference spectrum {snr_resc_med} <= ' - f'snr_do_not_rescale = {snr_do_not_rescale}.' + msgs.newline() + f'snr_do_not_rescale = {snr_do_not_rescale}.\n' + 'No median rescaling applied' ) ratio = 1.0 diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index 2011df3db9..3e849b95d5 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -589,15 +589,19 @@ def get_whitelight_range(wavemin, wavemax, wl_range): wlrng = [wavemin, wavemax] if wl_range[0] is not None: if wl_range[0] < wavemin: - msgs.warning("The user-specified minimum wavelength ({0:.2f}) to use for the white light".format(wl_range[0]) + - msgs.newline() + "images is lower than the recommended value ({0:.2f}),".format(wavemin) + - msgs.newline() + "which ensures that all spaxels cover the same wavelength range.") + msgs.warning( + f"The user-specified minimum wavelength ({wl_range[0]:.2f}) to use for the white " + f"light\nimages is lower than the recommended value ({wavemin:.2f}),\n" + "which ensures that all spaxels cover the same wavelength range." + ) wlrng[0] = wl_range[0] if wl_range[1] is not None: if wl_range[1] > wavemax: - msgs.warning("The user-specified maximum wavelength ({0:.2f}) to use for the white light".format(wl_range[1]) + - msgs.newline() + "images is greater than the recommended value ({0:.2f}),".format(wavemax) + - msgs.newline() + "which ensures that all spaxels cover the same wavelength range.") + msgs.warning( + f"The user-specified maximum wavelength ({wl_range[1]:.2f}) to use for the white " + "light\nimages is greater than the recommended value ({wavemax:.2f}),\n" + "which ensures that all spaxels cover the same wavelength range." + ) wlrng[1] = wl_range[1] msgs.info("The white light images will cover the wavelength range: {0:.2f}A - {1:.2f}A".format(wlrng[0], wlrng[1])) return wlrng @@ -714,8 +718,10 @@ def align_user_offsets(ifu_ra, ifu_dec, ra_offset, dec_offset): # Apply the shift out_ra_offsets[ff] = ref_shift_ra[ff] + ra_offset[ff] out_dec_offsets[ff] = ref_shift_dec[ff] + dec_offset[ff] - msgs.info("Spatial shift of cube #{0:d}:".format(ff + 1) + msgs.newline() + - "RA, DEC (arcsec) = {0:+0.3f} E, {1:+0.3f} N".format(ra_offset[ff]*3600.0, dec_offset[ff]*3600.0)) + msgs.info( + f"Spatial shift of cube #{ff + 1}:\nRA, DEC (arcsec) = {ra_offset[ff]*3600.0:+0.3f} " + f"E, {dec_offset[ff]*3600.0:+0.3f} N" + ) return out_ra_offsets, out_dec_offsets @@ -751,19 +757,19 @@ def set_voxel_sampling(spatscale, specscale, dspat=None, dwv=None): if np.any(np.abs(ratio) > 1E-4): msgs.warning("The pixel scales of all input frames are not the same!") spatstr = ", ".join(["{0:.6f}".format(ss) for ss in spatscale[:,0]*3600.0]) - msgs.info("Pixel scales of all input frames:" + msgs.newline() + spatstr + "arcseconds") + msgs.info("Pixel scales of all input frames:\n" + spatstr + "arcseconds") # Make sure all frames have consistent slicer scales ratio = (spatscale[:, 1] - spatscale[0, 1]) / spatscale[0, 1] if np.any(np.abs(ratio) > 1E-4): msgs.warning("The slicer scales of all input frames are not the same!") spatstr = ", ".join(["{0:.6f}".format(ss) for ss in spatscale[:,1]*3600.0]) - msgs.info("Slicer scales of all input frames:" + msgs.newline() + spatstr + "arcseconds") + msgs.info("Slicer scales of all input frames:\n" + spatstr + "arcseconds") # Make sure all frames have consistent wavelength sampling ratio = (specscale - specscale[0]) / specscale[0] if np.any(np.abs(ratio) > 1E-2): msgs.warning("The wavelength samplings of the input frames are not the same!") specstr = ", ".join(["{0:.6f}".format(ss) for ss in specscale]) - msgs.info("Wavelength samplings of all input frames:" + msgs.newline() + specstr + "Angstrom") + msgs.info("Wavelength samplings of all input frames:\n" + specstr + "Angstrom") # If the user has not specified the spatial scale, then set it appropriately now to the largest spatial scale _dspat = np.max(spatscale) if dspat is None else dspat @@ -999,14 +1005,16 @@ def create_wcs(raImg, decImg, waveImg, slitid_img_gpm, dspat, dwave, numra, numdec = reference_image.shape cubewcs = generate_WCS(coord_min, coord_dlt, numra, equinox=equinox, name=specname) - msgs.info(msgs.newline() + "-" * 40 + - msgs.newline() + "Parameters of the WCS:" + - msgs.newline() + "RA min = {0:f}".format(coord_min[0]) + - msgs.newline() + "DEC min = {0:f}".format(coord_min[1]) + - msgs.newline() + "WAVE min, max = {0:f}, {1:f}".format(_wave_min, _wave_max) + - msgs.newline() + "Spaxel size = {0:f} arcsec".format(3600.0 * dspat) + - msgs.newline() + "Wavelength step = {0:f} A".format(dwave) + - msgs.newline() + "-" * 40) + msgs.info( + f'\n{"-"*40}' + "\nParameters of the WCS:" + f"\nRA min = {coord_min[0]}" + f"\nDEC min = {coord_min[1]}" + f"\nWAVE min, max = {_wave_min}, {_wave_max}" + f"\nSpaxel size = {3600.0 * dspat} arcsec" + f"\nWavelength step = {dwave} A" + f'\n{"-"*40}' + ) # Generate the output binning xbins = np.arange(1 + numra) - 0.5 diff --git a/pypeit/core/extract.py b/pypeit/core/extract.py index e01e769915..9b1e3bfcb6 100644 --- a/pypeit/core/extract.py +++ b/pypeit/core/extract.py @@ -893,8 +893,10 @@ def fit_profile(image, ivar, waveimg, thismask, spat_img, trace_in, wave, eligible_pixels = np.sum((wave >= wave_min) & (wave <= wave_max)) good_pix_frac = 0.05 if (np.sum(indsp) < good_pix_frac*eligible_pixels) or (eligible_pixels == 0): - msgs.warning('There are no pixels eligible to be fit for the object profile.' + msgs.newline() + - 'There is likely an issue in local_skysub_extract. Returning a Gassuain with fwhm={:5.3f}'.format(thisfwhm)) + msgs.warning( + 'There are no pixels eligible to be fit for the object profile.\nThere is likely an ' + f'issue in local_skysub_extract. Returning a Gassuain with fwhm={thisfwhm:5.3f}' + ) profile_model = return_gaussian(sigma_x, None, thisfwhm, 0.0, obj_string, False) return profile_model, trace_in, fwhmfit, 0.0 @@ -908,8 +910,10 @@ def fit_profile(image, ivar, waveimg, thismask, spat_img, trace_in, wave, try: cont_flux, _ = c_answer.value(wave[indsp]) except: - msgs.warning('Problem estimating S/N ratio of spectrum' + msgs.newline() + - 'There is likely an issue in local_skysub_extract. Returning a Gassuain with fwhm={:5.3f}'.format(thisfwhm)) + msgs.warning( + 'Problem estimating S/N ratio of spectrum\nThere is likely an issue in ' + f'local_skysub_extract. Returning a Gassuain with fwhm={thisfwhm:5.3f}' + ) profile_model = return_gaussian(sigma_x, None, thisfwhm, 0.0, obj_string, False) return profile_model, trace_in, fwhmfit, 0.0 diff --git a/pypeit/core/findobj_skymask.py b/pypeit/core/findobj_skymask.py index f8128db854..3a2efcac73 100644 --- a/pypeit/core/findobj_skymask.py +++ b/pypeit/core/findobj_skymask.py @@ -418,8 +418,10 @@ def ech_fof_sobjs(sobjs:specobjs.SpecObjs, for iord in range(norders): on_order = (obj_id_init == uni_obj_id_init[iobj]) & (sobjs.ECH_ORDER == order_vec[iord]) if (np.sum(on_order) > 1): - msgs.warning('Found multiple objects in a FOF group on order iord={:d}'.format(order_vec[iord]) + msgs.newline() + - 'Spawning new objects to maintain a single object per order.') + msgs.warning( + f'Found multiple objects in a FOF group on order iord={order_vec[iord]}\n' + 'Spawning new objects to maintain a single object per order.' + ) off_order = (obj_id_init == uni_obj_id_init[iobj]) & (sobjs.ECH_ORDER != order_vec[iord]) ind = np.where(on_order)[0] if np.any(off_order): diff --git a/pypeit/core/fitting.py b/pypeit/core/fitting.py index d16e201f6d..eb1a61c283 100644 --- a/pypeit/core/fitting.py +++ b/pypeit/core/fitting.py @@ -187,8 +187,10 @@ def fit(self): xv, y_out, self.order[0], w=np.sqrt(w_out) if w_out is not None else None) # numpy convention else: - raise PypeItError("Fitting function '{0:s}' is not implemented yet" + msgs.newline() + - "Please choose from 'polynomial', 'legendre', 'chebyshev','polynomial2d', 'legendre2d'") + raise PypeItError( + f"Fitting function '{self.func}' is not implemented yet\nPlease choose from " + "'polynomial', 'legendre', 'chebyshev', 'polynomial2d', 'legendre2d', 'chebyshev2d'" + ) self.success = 1 return self.success @@ -299,8 +301,10 @@ def evaluate_fit(fitc, func, x, x2=None, minx=None, return (np.polynomial.legendre.legval(xv, fitc) if func == "legendre" else np.polynomial.chebyshev.chebval(xv, fitc)) else: - raise PypeItError("Fitting function '{0:s}' is not implemented yet" + msgs.newline() + - "Please choose from 'polynomial', 'legendre', 'chebyshev', 'polynomial2d', 'legendre2d', 'chebyshev2d'") + raise PypeItError( + f"Fitting function '{func}' is not implemented yet\nPlease choose from " + "'polynomial', 'legendre', 'chebyshev', 'polynomial2d', 'legendre2d', 'chebyshev2d'" + ) def robust_fit(xarray, yarray, order, x2=None, function='polynomial', diff --git a/pypeit/core/flat.py b/pypeit/core/flat.py index 21ca1cae37..a2d8840f40 100644 --- a/pypeit/core/flat.py +++ b/pypeit/core/flat.py @@ -534,17 +534,19 @@ def tweak_slit_edges_gradient(left, right, spat_coo, norm_flat, maxfrac=0.1, deb # Check if the shift is within the allowed range if np.abs(left_shift) > maxfrac: - msgs.warning('Left slit edge shift of {0:.1f}% exceeds the maximum allowed of {1:.1f}%'.format( - 100*left_shift, 100*maxfrac) + msgs.newline() + - 'The left edge will not be tweaked.') + msgs.warning( + f'Left slit edge shift of {100*left_shift:.1f}% exceeds the maximum allowed of ' + f'{100*maxfrac:.1f}%\nThe left edge will not be tweaked.' + ) left_shift = 0.0 else: msgs.info('Tweaking left slit boundary by {0:.1f}%'.format(100 * left_shift) + ' ({0:.2f} pixels)'.format(left_shift * slitwidth)) if np.abs(right_shift) > maxfrac: - msgs.warning('Right slit edge shift of {0:.1f}% exceeds the maximum allowed of {1:.1f}%'.format( - 100*right_shift, 100*maxfrac) + msgs.newline() + - 'The right edge will not be tweaked.') + msgs.warning( + f'Right slit edge shift of {100*right_shift:.1f}% exceeds the maximum allowed of ' + f'{100*maxfrac:.1f}%\nThe right edge will not be tweaked.' + ) right_shift = 0.0 else: msgs.info('Tweaking right slit boundary by {0:.1f}%'.format(100 * right_shift) + diff --git a/pypeit/core/flexure.py b/pypeit/core/flexure.py index 28ac90973b..7c2bd1b5ac 100644 --- a/pypeit/core/flexure.py +++ b/pypeit/core/flexure.py @@ -112,10 +112,12 @@ def spat_flexure_shift(sciimg, slits, bpm=None, maxlag=20, sigdetect=10., debug= _, _, pix_max, _, _, _, _, _ = arc.detect_lines(xcorr_max, cont_subtract=False, input_thresh=0., nfind=1, debug=debug) # No peak? -- e.g. data fills the entire detector if (len(pix_max) == 0) or pix_max[0] == -999.0: - msgs.warning('No peak found in the x-correlation between the traced slits and the science/calib image.' - ' Assuming there is NO SPATIAL FLEXURE.'+msgs.newline() + 'If a flexure is expected, ' - 'consider either changing the maximum lag for the cross-correlation, ' - 'or the "spat_flexure_sigdetect" parameter, or use the manual flexure correction.') + msgs.warning( + 'No peak found in the x-correlation between the traced slits and the science/calib ' + 'image. Assuming there is NO SPATIAL FLEXURE.\nIf a flexure is expected, consider ' + 'either changing the maximum lag for the cross-correlation, or the ' + '"spat_flexure_sigdetect" parameter, or use the manual flexure correction.' + ) return 0. @@ -480,12 +482,14 @@ def spec_flex_shift(obj_skyspec, sky_file=None, arx_skyspec=None, arx_fwhm_pix=N f"larger than specified maximum {mxshft} pix.") if excess_shft == "crash": - raise PypeItError(f"Flexure compensation failed for one of your{msgs.newline()}" - f"objects. Either adjust the \"spec_maxshift\"{msgs.newline()}" - f"FlexurePar Keyword, or see the flexure documentation{msgs.newline()}" - f"for information on how to bypass this error using the{msgs.newline()}" - f"\"excessive_shift\" keyword.{msgs.newline()}" - "https://pypeit.readthedocs.io/en/release/flexure.html") + raise PypeItError( + "Flexure compensation failed for one of your\n" + "objects. Either adjust the \"spec_maxshift\"\n" + "FlexurePar Keyword, or see the flexure documentation\n" + "for information on how to bypass this error using the\n" + "\"excessive_shift\" keyword.\n" + "https://pypeit.readthedocs.io/en/release/flexure.html" + ) elif excess_shft == "set_to_zero": msgs.warning("Flexure compensation failed for one of your objects.") diff --git a/pypeit/core/flux_calib.py b/pypeit/core/flux_calib.py index 6822b73599..2889d005d4 100644 --- a/pypeit/core/flux_calib.py +++ b/pypeit/core/flux_calib.py @@ -284,10 +284,10 @@ def find_standard_file(ra, dec, toler=20.*units.arcmin, check=False, to_pkg=None if check: return False - raise PypeItError(f"No standard star was found within a tolerance of {toler}{msgs.newline()}" - f"Closest standard was {closest['name']} at separation {closest['sep'].to('arcmin')}") - - return None + raise PypeItError( + f"No standard star was found within a tolerance of {toler}\nClosest standard was " + f"{closest['name']} at separation {closest['sep'].to('arcmin')}" + ) def stellar_model(V, sptype): @@ -508,11 +508,11 @@ def load_extinction_data(longitude, latitude, extinctfilepar, msgs.info(f"Using {extinct_file} for extinction corrections.") else: # Crash with a helpful error message - msgs.warning(f"No observatory extinction file was found within {toler}{msgs.newline()}" - f"of observation at lon = {longitude:.1f} lat = {latitude:.1f} You may{msgs.newline()}" - f"select an included extinction file (e.g., KPNO) for use by{msgs.newline()}" - f"adding the following to the Sensitivity Input File{msgs.newline()}" - "(for pypeit_sensfunc):") + msgs.warning( + f"No observatory extinction file was found within {toler}\nof observation at " + f"lon = {longitude:.1f} lat = {latitude:.1f} You may\nselect an included " + "extinction file (e.g., KPNO) for use by\nadding the following to the " + f"Sensitivity Input File\n(for pypeit_sensfunc):") msgs.pypeitpar(['sensfunc', 'UVIS', 'extinct_file = kpnoextinct.dat']) msgs.warning("or the following to the Flux File (for pypeit_flux_calib):") msgs.pypeitpar(['fluxcalib', 'extinct_file = kpnoextinct.dat']) diff --git a/pypeit/edgetrace.py b/pypeit/edgetrace.py index 41a2c9f9e2..7ccfd1f059 100644 --- a/pypeit/edgetrace.py +++ b/pypeit/edgetrace.py @@ -779,15 +779,12 @@ def auto_trace(self, bpm=None, debug=0): if self.par['auto_pca'] and not self.can_pca() and not self.is_empty and self.par['sync_predict'] == 'pca': # TODO: This causes the code to fault. Maybe there's a way # to catch this earlier on? - msgs.warning('Sync predict cannot use PCA because too few edges were found. If you are ' - 'reducing multislit or echelle data, you may need a better trace image or ' - 'change the mode used to predict traces (see below). If you are reducing ' - 'longslit data, make sure to set the sync_predict parameter to nearest: ' - + msgs.newline() + - ' [calibrations]' + msgs.newline() + - ' [[slitedges]]' + msgs.newline() + - ' sync_predict = nearest') - # self.par['sync_predict'] = 'nearest' + msgs.warning( + 'Sync predict cannot use PCA because too few edges were found. If you are ' + 'reducing multislit or echelle data, you may need a better trace image or ' + 'change the mode used to predict traces (see below). If you are reducing ' + 'longslit data, make sure to set the "sync_predict" parameter to "nearest". + ) self.success = False else: # Left-right synchronize the traces diff --git a/pypeit/flatfield.py b/pypeit/flatfield.py index c78773a9f0..6118b1f85d 100644 --- a/pypeit/flatfield.py +++ b/pypeit/flatfield.py @@ -1011,7 +1011,7 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): if spec_nfit/spec_ntot < 0.5: # TODO: Shouldn't this raise an exception or continue to the next slit instead? msgs.warning('Spectral fit includes only {:.1f}'.format(100*spec_nfit/spec_ntot) - + '% of the pixels on this slit.' + msgs.newline() + + '% of the pixels on this slit.\n' + ' Either the slit has many bad pixels or the number of ' 'trimmed pixels is too large.') @@ -1099,7 +1099,7 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): if spat_nfit/spat_ntot < 0.5: # TODO: Shouldn't this raise an exception or continue to the next slit instead? msgs.warning('Spatial fit includes only {:.1f}'.format(100*spat_nfit/spat_ntot) - + '% of the pixels on this slit.' + msgs.newline() + + '% of the pixels on this slit.\n' + ' Either the slit has many bad pixels, the model of the ' 'spectral shape is poor, or the illumination profile is very irregular.') @@ -1317,17 +1317,21 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): # Check for infinities and NaNs in the flat-field model winfnan = np.where(np.logical_not(np.isfinite(self.flat_model[onslit_tweak]))) if winfnan[0].size != 0: - msgs.warning('There are {0:d} pixels with non-finite values in the flat-field model ' - 'for slit {1:d}!'.format(winfnan[0].size, slit_spat) + msgs.newline() + - 'These model pixel values will be set to the raw pixel value.') + msgs.warning( + f'There are {winfnan[0].size} pixels with non-finite values in the flat-field ' + f'model for slit {slit_spat}!\nThese model pixel values will be set to the ' + 'raw pixel value.' + ) self.flat_model[np.where(onslit_tweak)[0][winfnan]] = rawflat[np.where(onslit_tweak)[0][winfnan]] # Check for unrealistically high or low values of the model whilo = np.where((self.flat_model[onslit_tweak] >= nonlinear_counts) | (self.flat_model[onslit_tweak] <= 0.0)) if whilo[0].size != 0: - msgs.warning('There are {0:d} pixels with unrealistically high or low values in the flat-field model ' - 'for slit {1:d}!'.format(whilo[0].size, slit_spat) + msgs.newline() + - 'These model pixel values will be set to the raw pixel value.') + msgs.warning( + f'There are {whilo[0].size} pixels with unrealistically high or low values in ' + f'the flat-field model for slit {slit_spat}!\nThese model pixel values will ' + 'be set to the raw pixel value.' + ) self.flat_model[np.where(onslit_tweak)[0][whilo]] = rawflat[np.where(onslit_tweak)[0][whilo]] # Construct the pixel flat @@ -2038,7 +2042,7 @@ def spatillum_finecorr_qa(normed, finecorr, left, right, ypos, cut, outfile=None plt.show() else: plt.savefig(outfile, dpi=400) - msgs.info("Saved QA:"+msgs.newline()+outfile) + msgs.info("Saved QA:\n"+outfile) plt.close() plt.rcdefaults() @@ -2106,7 +2110,7 @@ def detector_structure_qa(det_resp, det_resp_model, outfile=None, title="Detecto plt.show() else: plt.savefig(outfile, dpi=400) - msgs.info("Saved QA:" + msgs.newline() + outfile) + msgs.info("Saved QA:\n" + outfile) plt.close() plt.rcdefaults() @@ -2255,8 +2259,10 @@ def illum_profile_spectral(rawimg, waveimg, slits, slit_illum_ref_idx=0, smooth_ if (ii == 1) and (slits.spat_id[wvsrt[ss]] == slit_illum_ref_idx): # This must be the first element of the loop by construction, but throw an error just in case if ss != 0: - raise PypeItError("CODING ERROR - An error has occurred in the relative spectral illumination." + - msgs.newline() + "Please contact the developers.") + raise PypeItError( + "CODING ERROR - An error has occurred in the relative spectral " + "illumination.\nPlease contact the developers." + ) tmp_cntr = cntr * spec_ref tmp_arr = hist * utils.inverse(tmp_cntr) # Calculate a smooth version of the relative response @@ -2452,28 +2458,32 @@ def write_pixflat_to_fits(pixflat_norm_list, detname_list, spec_name, outdir, pi if not pixelflat_file.parent.is_dir(): pixelflat_file.parent.mkdir(parents=True) new_hdulist.writeto(pixelflat_file, overwrite=True) - msgs.info(f'A slitless Pixel Flat file for detectors {detname_list} has been saved to {msgs.newline()}' + msgs.info(f'A slitless Pixel Flat file for detectors {detname_list} has been saved to\n' f'{pixelflat_file}') # common msg - add_msgs = f"add the following to your PypeIt Reduction File:{msgs.newline()}" \ - f" [calibrations]{msgs.newline()}" \ - f" [[flatfield]]{msgs.newline()}" \ - f" pixelflat_file = {pixelflat_name}{msgs.newline()}{msgs.newline()}{msgs.newline()}" \ - f"Please consider sharing your Pixel Flat file with the PypeIt Developers.{msgs.newline()}" \ - + add_msgs = ( + f"add the following to your PypeIt Reduction File:\n" + f" [calibrations]\n" + f" [[flatfield]]\n" + f" pixelflat_file = {pixelflat_name}\n\n\n" + f"Please consider sharing your Pixel Flat file with the PypeIt Developers.\n" + ) if to_cache: # NOTE that the file saved in the cache is gzipped, while the one saved in the outdir is not # This prevents `dataPaths.pixelflat.get_file_path()` from returning the file saved in the outdir cache.write_file_to_cache(pixelflat_file, pixelflat_name+'.gz', f"pixelflats") - msgs.info(f"The slitless Pixel Flat file has also been saved to the PypeIt cache directory {msgs.newline()}" - f"{str(dataPaths.pixelflat)} {msgs.newline()}" - f"It will be automatically used in this run. " - f"If you want to use this file in future runs, {add_msgs}") + msgs.info( + f"The slitless Pixel Flat file has also been saved to the PypeIt cache directory\n" + f"{str(dataPaths.pixelflat)}\n" + f"It will be automatically used in this run. " + f"If you want to use this file in future runs, {add_msgs}") else: - msgs.info(f"To use this file, move it to the PypeIt data directory {msgs.newline()}" - f"{str(dataPaths.pixelflat)} {msgs.newline()} and {add_msgs}") + msgs.info( + f"To use this file, move it to the PypeIt data directory\n" + f"{str(dataPaths.pixelflat)}\n and {add_msgs}" + ) def load_pixflat(pixel_flat_file, spectrograph, det, flatimages, calib_dir=None, chk_version=False): diff --git a/pypeit/inputfiles.py b/pypeit/inputfiles.py index 7b564b26fd..57469f2902 100644 --- a/pypeit/inputfiles.py +++ b/pypeit/inputfiles.py @@ -108,7 +108,7 @@ def readlines(ifile:str): """ # Check the files if not os.path.isfile(ifile): - raise PypeItError('The filename does not exist -' + msgs.newline() + ifile) + raise PypeItError('The filename does not exist -\n' + ifile) # Read the input lines and replace special characters with open(ifile, 'r') as f: diff --git a/pypeit/metadata.py b/pypeit/metadata.py index d88948b550..60cd753a74 100644 --- a/pypeit/metadata.py +++ b/pypeit/metadata.py @@ -1431,9 +1431,9 @@ def get_frame_types(self, flag_unknown=False, user=None, merge=True): try: type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftypes.split(',')) except ValueError as err: - raise PypeItError(f'Improper frame type supplied!{msgs.newline()}' - f'{err}{msgs.newline()}' - 'Check your PypeIt Reduction File') + raise PypeItError( + f'Improper frame type supplied!l\n{err}\nCheck your PypeIt Reduction File' + ) return self.set_frame_types(type_bits, merge=merge) # Loop over the frame types diff --git a/pypeit/pypeit.py b/pypeit/pypeit.py index 7d333235fe..41399abcd0 100644 --- a/pypeit/pypeit.py +++ b/pypeit/pypeit.py @@ -540,18 +540,18 @@ def reduce_exposure(self, frames, bg_frames=None, std_outfile=None): objFind_list = [] # Print status message - msgs_string = 'Reducing target {:s}'.format(self.fitstbl['target'][frames[0]]) + msgs.newline() + msgs_string = f'Reducing target {self.fitstbl['target'][frames[0]]}\n' # TODO: Print these when the frames are actually combined, # backgrounds are used, etc? - msgs_string += 'Combining frames:' + msgs.newline() + msgs_string += 'Combining frames:\n' for iframe in frames: - msgs_string += '{0:s}'.format(self.fitstbl['filename'][iframe]) + msgs.newline() + msgs_string += f'{self.fitstbl['filename'][iframe]}\n' msgs.info(msgs_string) if has_bg: bg_msgs_string = '' for iframe in bg_frames: - bg_msgs_string += '{0:s}'.format(self.fitstbl['filename'][iframe]) + msgs.newline() - bg_msgs_string = msgs.newline() + 'Using background from frames:' + msgs.newline() + bg_msgs_string + bg_msgs_string += f'{self.fitstbl['filename'][iframe]}\n' + bg_msgs_string = '\nUsing background from frames:\n' + bg_msgs_string msgs.info(bg_msgs_string) # Find the detectors to reduce @@ -941,7 +941,9 @@ def load_skyregions(self, initial_slits=False, scifile=None, frame=None, spat_fl # Get the regions status, regions = skysub.read_userregions(skyregtxt, self.caliBrate.slits.nslits, maxslitlength) if status == 1: - raise PypeItError("Unknown error in sky regions definition. Please check the value:" + msgs.newline() + skyregtxt) + raise PypeItError( + "Unknown error in sky regions definition. Please check the value:\n" + skyregtxt + ) elif status == 2: raise PypeItError("Sky regions definition must contain a percentage range, and therefore must contain a ':'") # Generate and return image diff --git a/pypeit/specobj.py b/pypeit/specobj.py index 320fd8e838..541a2e4f67 100644 --- a/pypeit/specobj.py +++ b/pypeit/specobj.py @@ -525,8 +525,9 @@ def apply_spectral_flexure(self, shift, sky_spec): # Apply for attr in ['BOX', 'OPT']: if self[attr+'_WAVE'] is not None: - msgs.info("Applying flexure correction to {0:s} extraction for object:".format(attr) + - msgs.newline() + "{0:s}".format(str(self.NAME))) + msgs.info( + f"Applying flexure correction to {attr:s} extraction for object:\n{self.NAME}" + ) self[attr+'_WAVE'] = flexure.flexure_interp(shift, self[attr+'_WAVE']).copy() # Shift sky spec too twave = flexure.flexure_interp(shift, sky_spec.wavelength.value) * units.AA @@ -590,7 +591,7 @@ def apply_flux_calib(self, wave_zp, zeropoint, exptime, tellmodel=None, extinct_ for attr in ['BOX', 'OPT']: if self[attr+'_WAVE'] is None: continue - msgs.info("Fluxing {:s} extraction for:".format(attr) + msgs.newline() + "{}".format(self)) + msgs.info(f"Fluxing {attr} extraction for:\n{self}") wave = self[attr+'_WAVE'] # Interpolate the sensitivity function onto the wavelength grid of the data @@ -632,9 +633,9 @@ def apply_helio(self, vel_corr, refframe): # Apply for attr in ['BOX', 'OPT']: if self[attr+'_WAVE'] is not None: - msgs.info('Applying {0} correction to '.format(refframe) - + '{0} extraction for object:'.format(attr) - + msgs.newline() + "{0}".format(str(self.NAME))) + msgs.info( + f'Applying {refframe} correction to {attr} extraction for object:\n'{self.NAME}' + ) self[attr+'_WAVE'] *= vel_corr # Record self['VEL_TYPE'] = refframe diff --git a/pypeit/specobjs.py b/pypeit/specobjs.py index 0f68759b95..9a00155ad1 100644 --- a/pypeit/specobjs.py +++ b/pypeit/specobjs.py @@ -249,7 +249,7 @@ def unpack_object(self, ret_flam=False, log10blaze=False, min_blaze_value=1e-3, if not remove_missing: raise PypeItError(msg) else: - msg += f"{msgs.newline()}-- The missing data will be removed --" + msg += f"\n-- The missing data will be removed --" msgs.warning(msg) # Remove missing data r_indx = np.where(none_flux)[0] From 44a350ef3f593ec26d3fcdb90614d479f44eec72 Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Wed, 8 Oct 2025 17:17:02 -0700 Subject: [PATCH 06/33] newline --- pypeit/core/flux_calib.py | 9 +- pypeit/core/framematch.py | 92 ------- pypeit/core/gui/identify.py | 21 +- pypeit/core/skysub.py | 48 ++-- pypeit/core/tracewave.py | 7 +- pypeit/core/wavecal/autoid.py | 422 ++++++++--------------------- pypeit/core/wavecal/waveio.py | 6 +- pypeit/core/wavecal/wvutils.py | 24 +- pypeit/edgetrace.py | 2 +- pypeit/scripts/arxiv_solution.py | 4 +- pypeit/scripts/coadd_2dspec.py | 10 +- pypeit/scripts/flux_calib.py | 12 +- pypeit/scripts/print_bpm.py | 9 +- pypeit/scripts/ql.py | 22 +- pypeit/scripts/setup_coadd2d.py | 4 +- pypeit/scripts/show_2dspec.py | 4 +- pypeit/specobj.py | 2 +- pypeit/spectrographs/aat_uhrf.py | 8 +- pypeit/spectrographs/ldt_deveny.py | 4 +- pypeit/spectrographs/p200_dbsp.py | 2 +- pypeit/spectrographs/vlt_fors.py | 8 +- pypeit/specutils/pypeit_loaders.py | 6 +- 22 files changed, 236 insertions(+), 490 deletions(-) diff --git a/pypeit/core/flux_calib.py b/pypeit/core/flux_calib.py index 50baad69eb..ecbd380115 100644 --- a/pypeit/core/flux_calib.py +++ b/pypeit/core/flux_calib.py @@ -263,10 +263,11 @@ def get_sensfunc_factor(wave, wave_zp, zeropoint, exptime, tellmodel=None, delta "par['sensfunc']['extrap_red'] to extrapolate further and recreate your " "sensfunc. But we are extrapolating per your direction. Good luck!") else: - raise PypeItError("Your data extends beyond the bounds of your sensfunc. " + msgs.newline() + - "Adjust the par['sensfunc']['extrap_blu'] and/or " - "par['sensfunc']['extrap_red'] to extrapolate further and recreate " - "your sensfunc.") + raise PypeItError( + "Your data extends beyond the bounds of your sensfunc.\nAdjust the " + "par['sensfunc']['extrap_blu'] and/or par['sensfunc']['extrap_red'] to " + "extrapolate further and recreate your sensfunc." + ) # This is the S_lam factor required to convert N_lam = counts/sec/Ang to # F_lam = 1e-17 erg/s/cm^2/Ang, i.e. F_lam = S_lam*N_lam diff --git a/pypeit/core/framematch.py b/pypeit/core/framematch.py index 52bb4711ad..5c5b495283 100644 --- a/pypeit/core/framematch.py +++ b/pypeit/core/framematch.py @@ -136,95 +136,3 @@ def check_frame_exptime(exptime, exprng): if exprng[1] is not None: indx[indx] &= (exptime[indx] <= exprng[1]) return indx - - -# TODO: May want to keep this in case we ever try to bring it back.... -#def group_AB_frames(file_list, targets, coords, max_nod_sep=2): -# """ -# Group files into a ABBA or AB sequences. -# -# Args: -# file_list (:obj:`list`): -# A list of file names. -# targets (:obj:`dict`): -# A dictionary that matches each file to a unique target name. -# The target name can be one of the files in the file list. -# coords (:class:`astropy.coordinates.SkyCoord`): -# The coordinates of all the exposures. Number of coordinates -# should match the number of files. -# max_nod_sep (:obj:`int`, optional): -# The maximum separation (arcsec) between the 1st and 4th -# frame sky coordinates in the ABBA sequence that is allowed -# when identifying the sequence. Note that the default (2 -# arcsec) is arbitrary. -# -# Returns: -# list: -# A list that matches the length of the input list of files. -# Each file in an AB or ABBA sequence is identified with it's -# pair in the sequence. -# """ -# -# AB_frame = [''] * len(file_list) -# -# for key, value in targets.items(): -# files = file_list[value] -# -# # Check here that there are more than 1 files and that the -# # number of files is even -# if len(files) == 1: -# msgs.warning('Cannot perform ABBA reduction on targets with 1 file') -# elif len(files) % 2 != 0: -# msgs.warning('Expected an even number of files associated with target ' + key) -# -# # TODO: Check for increasing time? Files are read in numerical -# # sequential order -- should be in order of increasing time -# # anyway.. -# -# # Assume that the files are initially in ABBA order and proceed -# ABBA_coords = coords[value] -# -# # Break files into ABBA groups (includes remainder if there are only 2 files) -# file_groups = [files[i:i+4] for i in range(0,len(files),4)] -# ABBA_groups = [ABBA_coords[i:i + 4] for i in range(0, len(ABBA_coords), 4)] -# value_groups = [value[i:i + 4] for i in range(0, len(ABBA_coords), 4)] -# -# for group in range(len(ABBA_groups)): -# if len(ABBA_groups[group]) == 2: -# # Warn user that if there are any groups of only 2 -# # files, assuming they are in order of A and B -# msgs.info('Assuming these two frames are A and B frame:' -# + msgs.newline() + file_groups[group][0] -# + msgs.newline() + file_groups[group][1]) -# elif len(ABBA_groups[group]) == 4: -# # Check that frames 1, 4 of an ABBA sequence are at the -# # same nod position (A) based on their RA, DEC -# AA_sep = ABBA_coords[0].separation(ABBA_coords[-1]).arcsec -# BB_sep = ABBA_coords[1].separation(ABBA_coords[2]).arcsec -# if AA_sep > max_nod_sep or BB_sep > max_nod_sep: -# if AA_sep > max_nod_sep: -# msgs.warning('Separation between 1st and 4th frame in presumed ABBA sequence ' -# 'have a large separation ({0}).'.format(AA_sep)) -# if BB_sep > max_nod_sep: -# msgs.warning('Separation between 2nd and 3rd frame in presumed ABBA sequence ' -# 'have a large separation ({0}).'.format(BB_sep)) -# msgs.warning('Check ABBA identification for target {0} group {1}:'.format( -# target, group) + msgs.newline() + 'A:' + file_groups[group][0] -# + msgs.newline() + 'B:' + file_groups[group][1] -# + msgs.newline() + 'B:' + file_groups[group][2] -# + msgs.newline() + 'A:' + file_groups[group][3]) -# else: -# raise PypeItError('BUG: This should never be reached.') -# -# # Flip group from ABBA to BABA, or AB to BA -# AB_idx_flip = np.copy(value_groups[group]) -# AB_idx_flip[::2], AB_idx_flip[1::2] \ -# = value_groups[group][1::2], value_groups[group][::2] -# -# # Associate each file in the group with its AB pair -# for i,j in enumerate(value_groups[group]): -# AB_frame[j] = file_list[AB_idx_flip[i]] -# -# return AB_frame - - diff --git a/pypeit/core/gui/identify.py b/pypeit/core/gui/identify.py index a048866dec..e9df262d41 100644 --- a/pypeit/core/gui/identify.py +++ b/pypeit/core/gui/identify.py @@ -815,9 +815,10 @@ def store_solution(self, final_fit, binspec, rmstol=0.15, #better try again... Return to the start of the loop continue if len(order_vec) != len(wvcalib.wv_fits): - msgs.warning(f'The number of orders in this list, {order_vec} '+msgs.newline()+ - f'does not match the number of traces: {len(wvcalib.wv_fits)}' + msgs.newline() + - 'Please try again...') + msgs.warning( + f'The number of orders in this list, {order_vec}\ndoes not match ' + f'the number of traces: {len(wvcalib.wv_fits)}\nPlease try again.' + ) continue # we are done, break out of the loop break @@ -894,9 +895,11 @@ def store_solution(self, final_fit, binspec, rmstol=0.15, if not force_save: while ow_wvcalib != 'y' and ow_wvcalib != 'n': print('') - msgs.warning('Do you want to overwrite existing Calibrations/WaveCalib*.fits file? ' + msgs.newline() + - 'NOTE: To use this WaveCalib file the user will need to delete the other files in Calibrations/ ' + msgs.newline() + - ' and re-run run_pypeit. ') + msgs.warning( + 'Do you want to overwrite existing Calibrations/WaveCalib*.fits ' + 'file?\nNOTE: To use this WaveCalib file the user will need to ' + 'delete the other files in Calibrations/ \nand re-run run_pypeit.' + ) print('') ow_wvcalib = input('Proceed with overwrite? (y/[n]): ') @@ -1457,9 +1460,9 @@ def load_IDs(self, wv_calib=None, fname='waveid.ascii'): self._detns = data['pixel'].data self._lineids = data['wavelength'].data self._lineflg = data['flag'].data - msgs.info("Loaded line IDs:" + msgs.newline() + fname) + msgs.info(f"Loaded line IDs:\n{fname}") else: - msgs.info("Could not find line IDs:" + msgs.newline()+fname) + msgs.info(f"Could not find line IDs:\n{fname}") self._detnsy = self.get_ann_ypos() # Get the y locations of the annotations self.replot() @@ -1477,5 +1480,5 @@ def save_IDs(self, fname='waveid.ascii'): names=['pixel', 'wavelength', 'flag'], meta=meta) ascii_io.write(data, fname, format='fixed_width', overwrite=True) - msgs.info("Line IDs saved as:" + msgs.newline() + fname) + msgs.info(f"Line IDs saved as:\n{fname}") self.update_infobox(message="Line IDs saved as: {0:s}".format(fname), yesno=False) diff --git a/pypeit/core/skysub.py b/pypeit/core/skysub.py index b59bfb66d3..556512b3a1 100644 --- a/pypeit/core/skysub.py +++ b/pypeit/core/skysub.py @@ -142,10 +142,11 @@ def global_skysub(image, ivar, tilts, thismask, slit_left, slit_righ, inmask=Non & np.isfinite(image) & np.isfinite(ivar) bad_pixel_frac = np.sum(thismask & np.logical_not(gpm))/np.sum(thismask) if bad_pixel_frac > max_mask_frac: - msgs.warning(f'This slit/order has {100.0*bad_pixel_frac:.3f}% of the pixels masked, which ' - f'exceeds the threshold of {100.0*max_mask_frac:.3f}%.' - + msgs.newline() + 'There is likely a problem with this slit. Giving up on ' - 'global sky-subtraction.') + msgs.warning( + f'This slit/order has {100.0*bad_pixel_frac:.3f}% of the pixels masked, which exceeds ' + f'the threshold of {100.0*max_mask_frac:.3f}%.\nThere is likely a problem with this ' + 'slit. Giving up on global sky-subtraction.' + ) return np.zeros(np.sum(thismask)) # Sub arrays @@ -172,8 +173,10 @@ def global_skysub(image, ivar, tilts, thismask, slit_left, slit_righ, inmask=Non kwargs_bspline={'bkspace':bsp}, kwargs_reject={'groupbadpix': True, 'maxrej': 10}) if exit_status != 0: - msgs.warning('Global sky-subtraction did not exit cleanly for initial positive sky fit.' - + msgs.newline() + 'Initial masking based on positive sky fit will be skipped') + msgs.warning( + 'Global sky-subtraction did not exit cleanly for initial positive sky fit.\n' + 'Initial masking based on positive sky fit will be skipped' + ) else: res = (sky[pos_sky] - np.exp(lsky_fit)) * np.sqrt(sky_ivar[pos_sky]) lmask = (res < 5.0) & (res > -4.0) @@ -200,9 +203,10 @@ def global_skysub(image, ivar, tilts, thismask, slit_left, slit_righ, inmask=Non # better understand what this functionality is doing, but it makes the rejection much more quickly approach a small # chi^2 if exit_status == 1: - msgs.warning('Maximum iterations reached in bspline_profile global sky-subtraction for npoly={:d}.'.format(npoly_fit) + - msgs.newline() + - 'Redoing sky-subtraction without polynomial degrees of freedom') + msgs.warning( + 'Maximum iterations reached in bspline_profile global sky-subtraction for ' + f'npoly={npoly_fit}.\nRedoing sky-subtraction without polynomial degrees of freedom' + ) poly_basis = np.ones_like(sky) # Perform the full fit now skyset, outmask, yfit, _, exit_status \ @@ -1354,12 +1358,13 @@ def ech_local_skysub_extract(sciimg, sciivar, fullmask, tilts, waveimg, # Print out a status message str_out = '' for iord in srt_order_snr: - str_out += '{:<8d}{:<8d}{:>10.2f}'.format(slitids[iord], order_vec[iord], order_snr[iord,ibright]) + msgs.newline() + str_out += f'{slitids[iord]:<8d}{order_vec[iord]:<8d}{order_snr[iord,ibright]:>10.2f}\n' dash = '-'*27 dash_big = '-'*40 - msgs.info(msgs.newline() + 'Reducing orders in order of S/N of brightest object:' + msgs.newline() + dash + - msgs.newline() + '{:<8s}{:<8s}{:>10s}'.format('slit','order','S/N') + msgs.newline() + dash + - msgs.newline() + str_out) + msgs.info( + f'\nReducing orders in order of S/N of brightest object:\n{dash}\n' + f'{"slit":<8s}{"order":<8s}{"S/N":>10s}\n{dash}\n' + str_out + ) # Loop over orders in order of S/N ratio (from highest to lowest) for the brightest object for iord in srt_order_snr: order = order_vec[iord] @@ -1401,14 +1406,15 @@ def ech_local_skysub_extract(sciimg, sciivar, fullmask, tilts, waveimg, slitids[other_orders], order_vec[other_orders], order_snr[other_orders,ibright], fwhm_here[other_orders]): - str_out += '{:<8d}{:<8d}{:>10.2f}{:>10.2f}'.format(slit_now, order_now, snr_now, fwhm_now) + msgs.newline() - msgs.info(msgs.newline() + 'Using' + fwhm_str + ' for FWHM of object={:d}'.format(uni_objid[iobj]) + - ' on slit/order: {:d}/{:d}'.format(iord,order) + msgs.newline() + dash_big + - msgs.newline() + '{:<8s}{:<8s}{:>10s}{:>10s}'.format('slit', 'order','SNR','FWHM') + - msgs.newline() + dash_big + - msgs.newline() + str_out[:-8] + - fwhm_str.upper() + ':{:<8d}{:<8d}{:>10.2f}{:>10.2f}'.format(iord, order, order_snr[iord,ibright], fwhm_this_ord) + - msgs.newline() + dash_big) + str_out += f'{slit_now:<8d}{order_now:<8d}{snr_now:>10.2f}{fwhm_now:>10.2f}' + msgs.info( + f'\nUsing {fwhm_str} for FWHM of object={uni_objid[iobj]} on slit/order: ' + f'{iord}/{order}\n{dash_big}\n' + f'{"slit":<8s}{"order":<8s}{"SNR":>10s}{"FWHM":>10s}\n{dash_big}\n' + f'{str_out[:-8]}{fwhm_str.upper()}' + f':{iord:<8d}{order:<8d}{order_snr[iord,ibright]:>10.2f}' + f'{fwhm_this_ord:>10.2f}\n{dash_big}' + ) if show_fwhm: plt.plot(order_vec[other_orders][fit_mask], fwhm_here[other_orders][fit_mask], marker='o', linestyle=' ', color='k', mfc='k', markersize=4.0, label='orders informing fit') diff --git a/pypeit/core/tracewave.py b/pypeit/core/tracewave.py index b7e1339b25..c7b8d453ae 100644 --- a/pypeit/core/tracewave.py +++ b/pypeit/core/tracewave.py @@ -489,9 +489,10 @@ def trace_tilts_work(arcimg, lines_spec, lines_spat, thismask, slit_cen, inmask= # cause a full fault of the code, we need to make sure the user # sees these kinds of critical failures instead of them getting # buried in all the other messages. - msgs.warning('Too many lines rejected in this slit/order.' + msgs.newline() - + 'Would reject {0}/{1} lines (more than 95%).'.format(nlines - nuse, nlines) - + msgs.newline() + 'Proceeding without rejection, but reduction likely bogus.') + msgs.warning( + f'Too many lines rejected in this slit/order.\nWould reject {nlines - nuse}/{nlines} ' + 'lines (more than 95%).\nProceeding without rejection, but reduction likely bogus.' + ) use_tilt = np.ones(nlines, dtype=bool) nuse = nlines diff --git a/pypeit/core/wavecal/autoid.py b/pypeit/core/wavecal/autoid.py index ebc181de30..169829b4d5 100644 --- a/pypeit/core/wavecal/autoid.py +++ b/pypeit/core/wavecal/autoid.py @@ -1477,10 +1477,12 @@ def echelle_wvcalib(spec, orders, spec_arxiv, wave_arxiv, lamps, par, if not all_patt_dict[str(iord)]['acceptable']: wv_calib[str(iord)] = None bad_orders = np.append(bad_orders, iord) - msgs.warning(msgs.newline() + '---------------------------------------------------' + msgs.newline() + - f'Reidentify report for order = {orders[iord]:d} ({iord+1:d}/{norders:d}):' + msgs.newline() + - f' Cross-correlation failed' + - msgs.newline() + '---------------------------------------------------') + msgs.warning( + '\n---------------------------------------------------' + f'\nReidentify report for order = {orders[iord]:d} ({iord+1:d}/{norders:d}):' + f'\nCross-correlation failed' + '\n---------------------------------------------------' + ) continue # Perform the fit n_final = wvutils.parse_param(par, 'n_final', iord) @@ -1498,17 +1500,22 @@ def echelle_wvcalib(spec, orders, spec_arxiv, wave_arxiv, lamps, par, # This pattern wasn't good enough wv_calib[str(iord)] = None bad_orders = np.append(bad_orders, iord) - msgs.warning(msgs.newline() + '---------------------------------------------------' + msgs.newline() + - f'Reidentify report for order = {orders[iord]:d} ({iord+1:d}/{norders:d}):' + msgs.newline() + - f' Final fit failed' + - msgs.newline() + '---------------------------------------------------') + msgs.warning( + '\n---------------------------------------------------' + f'\nReidentify report for order = {orders[iord]:d} ({iord+1:d}/{norders:d}):' + f'\nFinal fit failed' + '\n---------------------------------------------------' + ) continue # Is the RMS below the threshold? if final_fit['rms'] > rms_thresh: - msgs.warning(msgs.newline() + '---------------------------------------------------' + msgs.newline() + - f'Reidentify report for order = {orders[iord]:d} ({iord+1:d}/{norders:d}):' + msgs.newline() + - f' Poor RMS ({final_fit["rms"]:.3f})! Need to add additional spectra to arxiv to improve fits' + - msgs.newline() + '---------------------------------------------------') + msgs.warning( + '\n---------------------------------------------------' + f'\nReidentify report for order = {orders[iord]:d} ({iord+1:d}/{norders:d}):' + f'\nPoor RMS ({final_fit["rms"]:.3f})! Need to add additional spectra to arxiv ' + 'to improve fits' + '\n---------------------------------------------------' + ) bad_orders = np.append(bad_orders, iord) # Note this result in new_bad_orders, but store the solution since this might be the best possible @@ -1552,13 +1559,13 @@ def report_final(nslits, all_patt_dict, detections, """ for slit in range(nslits): # title of the report - report_ttl = msgs.newline() + '---------------------------------------------------' + msgs.newline() + report_ttl = '\n---------------------------------------------------\n' if orders is not None: - report_ttl += f'Final report for order {orders[slit]} ({slit+1}/{nslits}):' + msgs.newline() + report_ttl += f'Final report for order {orders[slit]} ({slit+1}/{nslits}):\n' else: - report_ttl += f'Final report for slit {slit+1}/{nslits}:' + msgs.newline() + report_ttl += f'Final report for slit {slit+1}/{nslits}:\n' # Prepare a message for bad wavelength solutions - badmsg = report_ttl + ' Wavelength calibration not performed!' + msgs.newline() + badmsg = report_ttl + ' Wavelength calibration not performed!\n' # Redo? if redo_slits is not None and orders[slit] not in redo_slits: continue @@ -1574,17 +1581,16 @@ def report_final(nslits, all_patt_dict, detections, # Report cen_wave = wv_calib[st]['cen_wave'] cen_disp = wv_calib[st]['cen_disp'] - sreport = str(report_ttl + - ' Pixels {:s} with wavelength'.format(signtxt) + msgs.newline() + - ' Number of lines detected = {:d}'.format(detections[st].size) + msgs.newline() + - ' Number of lines that were fit = {:d}'.format( - len(wv_calib[st]['pixel_fit'])) + msgs.newline() + - ' Central wavelength = {:g}A'.format(cen_wave) + msgs.newline() + - ' Central dispersion = {:g}A/pix'.format(cen_disp) + msgs.newline() + - ' Central wave/disp = {:g}'.format(cen_wave / cen_disp) + msgs.newline() + - ' Final RMS of fit = {:g}'.format(wv_calib[st]['rms']) + msgs.newline()) - - msgs.info(sreport) + msgs.info( + f'{report_ttl}' + f' Pixels {signtxt} with wavelength\n' + f' Number of lines detected = {detections[st].size}\n' + f' Number of lines that were fit = {len(wv_calib[st]['pixel_fit'])}\n' + f' Central wavelength = {cen_wave}A\n' + f' Central dispersion = {cen_disp}A/pix\n' + f' Central wave/disp = {cen_wave / cen_disp}\n' + f' Final RMS of fit = {wv_calib[st]["rms"]}\n' + ) class ArchiveReid: @@ -1735,11 +1741,6 @@ def __init__(self, spec, lamps, par, ech_fixed_format=False, ok_mask=None, except ValueError: narxiv -=1 - #if self.ech_fix_format and (self.nslits != narxiv): - # raise PypeItError('You have set ech_fix_format = True, but nslits={:d} != narxiv={:d}'.format(self.nslits,narxiv) + '.' + - # msgs.newline() + 'The number of orders identified does not match the number of solutions in the arxiv') - # - # Array to hold continuum subtracted arcs self.spec_cont_sub = np.zeros_like(self.spec) @@ -1797,10 +1798,12 @@ def __init__(self, spec, lamps, par, ech_fixed_format=False, ok_mask=None, if not self.all_patt_dict[str(slit)]['acceptable']: self.wv_calib[str(slit)] = None self.bad_slits = np.append(self.bad_slits, slit) - msgs.warning('---------------------------------------------------' + msgs.newline() + - 'Reidentify report for slit {0:d}/{1:d}'.format(slit, self.nslits-1) + order_str + msgs.newline() + - ' Cross-correlation failed' + msgs.newline() + - '---------------------------------------------------') + msgs.warning( + '---------------------------------------------------\n' + f'Reidentify report for slit {slit}/{self.nslits-1}{order_str}\n' + ' Cross-correlation failed\n' + '---------------------------------------------------' + ) continue # Perform the fit @@ -1815,18 +1818,22 @@ def __init__(self, spec, lamps, par, ech_fixed_format=False, ok_mask=None, # This pattern wasn't good enough self.wv_calib[str(slit)] = None self.bad_slits = np.append(self.bad_slits, slit) - msgs.warning('---------------------------------------------------' + msgs.newline() + - 'Reidentify report for slit {0:d}/{1:d}'.format(slit, self.nslits-1) + order_str + msgs.newline() + - ' Final fit failed' + msgs.newline() + - '---------------------------------------------------') + msgs.warning( + '---------------------------------------------------\n' + f'Reidentify report for slit {slit}/{self.nslits-1}{order_str}\n' + ' Final fit failed\n' + '---------------------------------------------------' + ) continue # Is the RMS below the threshold? if final_fit['rms'] > rms_thresh: - msgs.warning('---------------------------------------------------' + msgs.newline() + - 'Reidentify report for slit {0:d}/{1:d}'.format(slit, self.nslits-1) + order_str + msgs.newline() + - ' Poor RMS ({0:.3f})! Need to add additional spectra to arxiv to improve fits'.format( - final_fit['rms']) + msgs.newline() + - '---------------------------------------------------') + msgs.warning( + '---------------------------------------------------\n' + f'Reidentify report for slit {slit}/{self.nslits-1}{order_str}\n' + f' Poor RMS ({final_fit["rms"]:.3f})! Need to add additional spectra to ' + 'arxiv to improve fits\n' + '---------------------------------------------------' + ) self.bad_slits = np.append(self.bad_slits, slit) # Note this result in new_bad_slits, but store the solution since this might be the best possible @@ -2546,10 +2553,12 @@ def cross_match(self, good_fit, detections): rms_thresh = round(self._par['rms_thresh_frac_fwhm'] * fwhm, 3) if final_fit['rms'] > rms_thresh: - msgs.warning('---------------------------------------------------' + msgs.newline() + - 'Cross-match report for slit {0:d}/{1:d}:'.format(bs + 1, self._nslit) + msgs.newline() + - ' Poor RMS ({0:.3f})! Will try cross matching iteratively'.format(final_fit['rms']) + msgs.newline() + - '---------------------------------------------------') + msgs.warning( + '---------------------------------------------------\n' + f'Cross-match report for slit {bs+1}/{self._nslit}\n' + f' Poor RMS ({final_fit["rms"]:.3f})! Will try cross matching iteratively\n' + '---------------------------------------------------' + ) # Store this result in new_bad_slits, so the iteration can be performed, # but make sure to store the result, as this might be the best possible. new_bad_slits = np.append(new_bad_slits, bs) @@ -2563,213 +2572,6 @@ def cross_match(self, good_fit, detections): plt.show() return new_bad_slits - # This routine is commented out because it is not used. - # def cross_match_order(self, good_fit): - # """Using the solutions of all orders, identify the good solutions, and refit the bad ones! - # - # TODO: This function needs work... The first few lines of code successfully pick up the good orders, - # but we need a new routine that (based on an estimated central wavelength and dispersion) can successfully - # ID all of the lines. - # """ - # # DEPRECATED (NOT USED) - # - # # First determine the central wavelength and dispersion of every slit, using the known good solutions - # xplt = np.arange(self._nslit) - # yplt, dplt = np.zeros(self._nslit), np.zeros(self._nslit) - # imsk = np.ones(self._nslit, dtype=int) - # for slit in range(self._nslit): - # if good_fit[slit]: - # yplt[slit] = self._all_patt_dict[str(slit)]['bwv'] - # dplt[slit] = self._all_patt_dict[str(slit)]['bdisp'] - # imsk[slit] = 0 - # - # mask, fit = utils.robust_polyfit(xplt, yplt, 2, function='polynomial', sigma=2, - # initialmask=imsk, forceimask=True) - # good_fit[mask == 1] = False - # wavemodel = utils.func_val(fit, xplt, 'polynomial') - # disp = np.median(dplt[good_fit]) - # - # # TODO: maybe rethink the model at this point? Using the derived - # # central wavelength and dispersion identify liens in all orders? - # - # if self._debug: - # plt.subplot(211) - # plt.plot(xplt, wavemodel, 'r-') - # ww = np.where(mask==0) - # plt.plot(xplt[ww], yplt[ww], 'bx') - # ww = np.where(mask==1) - # plt.plot(xplt[ww], yplt[ww], 'rx') - # plt.subplot(212) - # plt.plot(xplt, dplt, 'bx') - # plt.show() - # #embed() - # - # fact_nl = 1.2 # Non linear factor - # new_good_fit = np.zeros(self._nslit, dtype=bool) - # for slit in range(self._nslit): - # wmin = wavemodel[slit] - fact_nl*disp*self._npix/2 - # wmax = wavemodel[slit] + fact_nl*disp*self._npix/2 - # ww = np.where((self._wvdata > wmin) & (self._wvdata < wmax)) - # wavedata = self._wvdata[ww] - # msgs.info('Brute force ID for slit {0:d}/{1:d}'.format(slit+1, self._nslit)) - # best_patt_dict, best_final_fit =\ - # self.run_brute_loop(slit, arrerr=self._det_weak[str(slit)], wavedata=wavedata) - # - # self._all_patt_dict[str(slit)] = copy.deepcopy(best_patt_dict) - # self._all_final_fit[str(slit)] = copy.deepcopy(best_final_fit) - # new_good_fit[slit] = self.report_prelim(slit, best_patt_dict, best_final_fit) - # return new_good_fit - # - # - # # Set some fitting parameters - # if self._n_final is None: - # order = 4 - # else: - # order = self._n_final - # - # ofit = [5, 3, 1, 0] - # lnpc = len(ofit) - 1 - # - # # Prepare the fitting coefficients - # xv = np.arange(self._npix)/(self._npix-1) - # ords = np.arange(self._nslit) - # xcen = xv[:, np.newaxis].repeat(self._nslit, axis=1) - # extrapord = ~good_fit - # maskord = np.where(extrapord)[0] - # - # coeffs = None - # waves = np.zeros(xcen.shape, dtype=float) - # for slit in range(self._nslit): - # if good_fit[slit]: - # func = self._all_final_fit[str(slit)]['function'] - # fmin = self._all_final_fit[str(slit)]['fmin'] - # fmax = self._all_final_fit[str(slit)]['fmax'] - # fitc = self._all_final_fit[str(slit)]['fitc'] - # if coeffs is None: - # coeffs = np.zeros((fitc.size, self._nslit)) - # coeffs[:, slit] = fitc.copy() - # waves[:, slit] = utils.func_val(fitc, xv, func, minx=fmin, maxx=fmax) - # - # msgs.info("Performing a PCA on the order wavelength solutions") - # #embed() - # pca_wave, outpar = pca.basis(xcen, waves, coeffs, lnpc, ofit, x0in=ords, mask=maskord, skipx0=False, function=func) - # - # # Report the QA - # # TODO: fix setup passing - # setup = "BLAH" - # pca.pca_plot(setup, outpar, ofit, "wave_cross_match", pcadesc="Wavelength calibration PCA") - # - # - # # Extrapolate the remaining orders requested - # #extrap_wave, outpar = pca.extrapolate(outpar, ords) - # - # # Determine if pixels correlate and anti-correlate with wavelength - # signs = np.zeros(self._nslit, dtype=int) - # for slit in range(self._nslit): - # wvval = pca_wave[:, slit] - # if wvval[wvval.size//2] > wvval[wvval.size//2-1]: - # signs[slit] = 1 - # else: - # signs[slit] = -1 - # sign = 1 - # if np.sum(signs) < 0: - # sign = -1 - # - # new_bad_slits = np.array([], dtype=int) - # # Using the first guesses at the wavelength solution, identify lines - # for slit in range(self._nslit): - # # Get the detections - # dets, _ = self.get_use_tcent(sign, self._det_weak[str(slit)]) - # lindex = np.array([], dtype=int) - # dindex = np.array([], dtype=int) - # # Calculate wavelengths for the gsdet detections - # wvval = pca_wave[:, slit] - # wvcen = wvval[wvval.size//2] - # disp = abs(wvval[wvval.size//2] - wvval[wvval.size//2-1]) - # for dd in range(dets.size): - # pdiff = np.abs(dets[dd] - xv) - # bstpx = np.argmin(pdiff) - # bstwv = np.abs(self._wvdata - wvval[bstpx]) - # if bstwv[np.argmin(bstwv)] > 10.0 * disp: - # # This is probably not a good match - # continue - # lindex = np.append(lindex, np.argmin(bstwv)) - # dindex = np.append(dindex, dd) - # - # # Finalize the best guess of each line - # # Initialise the patterns dictionary - # patt_dict = dict(acceptable=False, nmatch=0, ibest=-1, bwv=0., - # sigdetect=wvutils.parse_param(self._par, 'sigdetect', slit), - # mask=np.zeros(dets.size, dtype=bool)) - # patt_dict['sign'] = sign - # patt_dict['bwv'] = wvcen - # patt_dict['bdisp'] = disp - # - # patterns.solve_triangles(dets, self._wvdata, dindex, lindex, patt_dict) - # # Check if a solution was found - # if not patt_dict['acceptable']: - # new_bad_slits = np.append(new_bad_slits, slit) - # msgs.warning('---------------------------------------------------' + msgs.newline() + - # 'Cross-match report for slit {0:d}/{1:d}:'.format(slit, self._nslit-1) + msgs.newline() + - # ' Lines could not be identified! Will try cross matching iteratively' + msgs.newline() + - # '---------------------------------------------------') - # continue - # final_fit = self.fit_slit(slit, patt_dict, dets) - # if final_fit is None: - # # This pattern wasn't good enough - # new_bad_slits = np.append(new_bad_slits, slit) - # msgs.warning('---------------------------------------------------' + msgs.newline() + - # 'Cross-match report for slit {0:d}/{1:d}:'.format(slit, self._nslit-1) + msgs.newline() + - # ' Fit was not good enough! Will try cross matching iteratively' + msgs.newline() + - # '---------------------------------------------------') - # continue - # if final_fit['rms'] > rms_thresh: - # msgs.warning('---------------------------------------------------' + msgs.newline() + - # 'Cross-match report for slit {0:d}/{1:d}:'.format(slit, self._nslit-1) + msgs.newline() + - # ' Poor RMS ({0:.3f})! Will try cross matching iteratively'.format(final_fit['rms']) + msgs.newline() + - # '---------------------------------------------------') - # # Store this result in new_bad_slits, so the iteration can be performed, - # # but make sure to store the result, as this might be the best possible. - # new_bad_slits = np.append(new_bad_slits, slit) - # self._all_patt_dict[str(slit)] = copy.deepcopy(patt_dict) - # self._all_final_fit[str(slit)] = copy.deepcopy(final_fit) - # if self._debug: - # xplt = np.linspace(0.0, 1.0, self._npix) - # yplt = utils.func_val(final_fit['fitc'], xplt, 'legendre', minx=0.0, maxx=1.0) - # plt.plot(final_fit['pixel_fit'], final_fit['wave_fit'], 'bx') - # plt.plot(xplt, yplt, 'r-') - # plt.show() - # #embed() - # - # # debugging - # if self._debug: - # # First determine the central wavelength and dispersion of every slit, using the known good solutions - # xplt = np.arange(self._nslit) - # yplt, dplt = np.zeros(self._nslit), np.zeros(self._nslit) - # imsk = np.ones(self._nslit, dtype=int) - # for slit in range(self._nslit): - # if good_fit[slit]: - # yplt[slit] = self._all_patt_dict[str(slit)]['bwv'] - # dplt[slit] = self._all_patt_dict[str(slit)]['bdisp'] - # imsk[slit] = 0 - # - # mask, fit = utils.robust_polyfit(xplt, yplt, 2, function='polynomial', sigma=2, - # initialmask=imsk, forceimask=True) - # - # ymodel = utils.func_val(fit, xplt, 'polynomial') - # plt.subplot(211) - # plt.plot(xplt, ymodel, 'r-') - # ww = np.where(mask==0) - # plt.plot(xplt[ww], yplt[ww], 'bx') - # ww = np.where(mask==1) - # plt.plot(xplt[ww], yplt[ww], 'rx') - # plt.subplot(212) - # plt.plot(xplt, dplt, 'bx') - # plt.show() - # #embed() - # - # return new_bad_slits - def get_use_tcent_old(self, corr, cut=True, arr_err=None, weak=False): """ Grab the lines to use @@ -3152,25 +2954,26 @@ def solve_patterns(self, slit, bestlist, tcent_ecent): # Check that a solution has been found if patt_dict['nmatch'] == 0 and self._verbose: - msgs.info(msgs.newline() + - '---------------------------------------------------' + msgs.newline() + - 'Initial report:' + msgs.newline() + - ' No matches! Try another algorithm' + msgs.newline() + - '---------------------------------------------------') + msgs.info( + '\n---------------------------------------------------' + '\nInitial report:' + '\n No matches! Try another algorithm' + '\n---------------------------------------------------' + ) return None elif self._verbose: # Report - msgs.info(msgs.newline() + - '---------------------------------------------------' + msgs.newline() + - 'Initial report:' + msgs.newline() + - ' Pixels {:s} with wavelength'.format(signtxt) + msgs.newline() + - ' Number of lines recovered = {:d}'.format(self._all_tcent.size) + msgs.newline() + - ' Number of lines analyzed = {:d}'.format(use_tcent.size) + msgs.newline() + - ' Number of acceptable matches = {:d}'.format(patt_dict['nmatch']) + msgs.newline() + - ' Best central wavelength = {:g}A'.format(patt_dict['bwv']) + msgs.newline() + - ' Best dispersion = {:g}A/pix'.format(patt_dict['bdisp']) + msgs.newline() + - ' Best wave/disp = {:g}'.format(patt_dict['bwv']/patt_dict['bdisp']) + msgs.newline() + - '---------------------------------------------------') + msgs.info( + '\n---------------------------------------------------' + '\nInitial report:' + f'\n Pixels {signtxt} with wavelength' + f'\n Number of lines recovered = {self._all_tcent.size:d}' + f'\n Number of lines analyzed = {use_tcent.size}' + f'\n Number of acceptable matches = {patt_dict["nmatch"]}' + f'\n Best central wavelength = {patt_dict["bwv"]}A' + f'\n Best dispersion = {patt_dict["bdisp"]}A/pix' + f'\n Best wave/disp = {patt_dict["bwv"]/patt_dict["bdisp"]}' + '\n---------------------------------------------------') return patt_dict def finalize_fit(self, detections): @@ -3216,17 +3019,21 @@ def report_prelim(self, slit, best_patt_dict, best_final_fit): good_fit = False # Report on the best preliminary result if best_final_fit is None: - msgs.warning('---------------------------------------------------' + msgs.newline() + - 'Preliminary report for slit {0:d}/{1:d}:'.format(slit+1, self._nslit) + msgs.newline() + - ' No matches! Attempting to cross match.' + msgs.newline() + - '---------------------------------------------------') + msgs.warning( + '---------------------------------------------------' + f'\nPreliminary report for slit {slit+1}/{self._nslit}:' + '\n No matches! Attempting to cross match.' + '\n---------------------------------------------------' + ) self._all_patt_dict[str(slit)] = None self._all_final_fit[str(slit)] = None elif best_final_fit['rms'] > rms_thresh: - msgs.warning('---------------------------------------------------' + msgs.newline() + - 'Preliminary report for slit {0:d}/{1:d}:'.format(slit+1, self._nslit) + msgs.newline() + - ' Poor RMS ({0:.3f})! Attempting to cross match.'.format(best_final_fit['rms']) + msgs.newline() + - '---------------------------------------------------') + msgs.warning( + '---------------------------------------------------' + f'\nPreliminary report for slit {slit+1}/{self._nslit}:' + f'\n Poor RMS ({best_final_fit["rms"]:.3f})! Attempting to cross match.' + '\n---------------------------------------------------' + ) self._all_patt_dict[str(slit)] = None self._all_final_fit[str(slit)] = None else: @@ -3236,18 +3043,20 @@ def report_prelim(self, slit, best_patt_dict, best_final_fit): else: signtxt = 'anitcorrelate' # Report - msgs.info('---------------------------------------------------' + msgs.newline() + - 'Preliminary report for slit {0:d}/{1:d}:'.format(slit+1, self._nslit) + msgs.newline() + - ' Pixels {:s} with wavelength'.format(signtxt) + msgs.newline() + - ' Number of weak lines = {:d}'.format(self._det_weak[str(slit)][0].size) + msgs.newline() + - ' Number of strong lines = {:d}'.format(self._det_stro[str(slit)][0].size) + msgs.newline() + - ' Number of lines analyzed = {:d}'.format(len(best_final_fit['pixel_fit'])) + msgs.newline() + - ' Number of pattern matches = {:d}'.format(best_patt_dict['nmatch']) + msgs.newline() + - ' Patt match cen wavelength = {:g}A'.format(best_patt_dict['bwv']) + msgs.newline() + - ' Patt match dispersion = {:g}A/pix'.format(best_patt_dict['bdisp']) + msgs.newline() + - ' Best patt match wave/disp = {:g}'.format(best_patt_dict['bwv']/best_patt_dict['bdisp']) + msgs.newline() + - ' Final RMS of fit = {:g}'.format(best_final_fit['rms']) + msgs.newline() + - '---------------------------------------------------') + msgs.info( + '---------------------------------------------------' + f'\nPreliminary report for slit {slit+1}/{self._nslit}:' + f'\n Pixels {signtxt} with wavelength' + f'\n Number of weak lines = {self._det_weak[str(slit)][0].size}' + f'\n Number of strong lines = {self._det_stro[str(slit)][0].size}' + f'\n Number of lines analyzed = {len(best_final_fit["pixel_fit"])}' + f'\n Number of pattern matches = {best_patt_dict["nmatch"]}' + f'\n Patt match cen wavelength = {best_patt_dict["bwv"]}A' + f'\n Patt match dispersion = {best_patt_dict["bdisp"]}A/pix' + f'\n Best patt match wave/disp = {best_patt_dict["bwv"]/best_patt_dict["bdisp"]}' + f'\n Final RMS of fit = {best_final_fit["rms"]}' + '\n---------------------------------------------------' + ) self._all_patt_dict[str(slit)] = copy.deepcopy(best_patt_dict) self._all_final_fit[str(slit)] = copy.deepcopy(best_final_fit) return good_fit @@ -3256,8 +3065,10 @@ def report_final(self): """Print out the final report of the wavelength calibration""" for slit in range(self._nslit): # Prepare a message for bad wavelength solutions - badmsg = '---------------------------------------------------' + msgs.newline() +\ - 'Final report for slit {0:d}/{1:d}:'.format(slit+1, self._nslit) + msgs.newline() + badmsg = ( + '---------------------------------------------------\n' + f'Final report for slit {slit+1}/{self._nslit}:\n' + ) if slit not in self._ok_mask: msgs.warning(badmsg + 'Masked slit ignored') continue @@ -3273,17 +3084,18 @@ def report_final(self): centwave = self._all_final_fit[st].pypeitfit.eval(0.5) tempwave = self._all_final_fit[st].pypeitfit.eval(0.5 + 1.0/self._npix) centdisp = abs(centwave-tempwave) - msgs.info(msgs.newline() + - '---------------------------------------------------' + msgs.newline() + - 'Final report for slit {0:d}/{1:d}:'.format(slit+1, self._nslit) + msgs.newline() + - ' Pixels {:s} with wavelength'.format(signtxt) + msgs.newline() + - ' Number of weak lines = {:d}'.format(self._det_weak[str(slit)][0].size) + msgs.newline() + - ' Number of strong lines = {:d}'.format(self._det_stro[str(slit)][0].size) + msgs.newline() + - ' Number of lines analyzed = {:d}'.format(len(self._all_final_fit[st]['pixel_fit'])) + msgs.newline() + - ' Central wavelength = {:g}A'.format(centwave) + msgs.newline() + - ' Central dispersion = {:g}A/pix'.format(centdisp) + msgs.newline() + - ' Central wave/disp = {:g}'.format(centwave/centdisp) + msgs.newline() + - ' Final RMS of fit = {:g}'.format(self._all_final_fit[st]['rms'])) + msgs.info( + '\n---------------------------------------------------' + f'\nFinal report for slit {slit+1}/{self._nslit}:' + f'\n Pixels {signtxt} with wavelength' + f'\n Number of weak lines = {self._det_weak[str(slit)][0].size}' + f'\n Number of strong lines = {self._det_stro[str(slit)][0].size}' + f'\n Number of lines analyzed = {len(self._all_final_fit[st]["pixel_fit"])}' + f'\n Central wavelength = {centwave}A' + f'\n Central dispersion = {centdisp}A/pix' + f'\n Central wave/disp = {centwave/centdisp}' + f'\n Final RMS of fit = {self._all_final_fit[st]["rms"]}' + ) return diff --git a/pypeit/core/wavecal/waveio.py b/pypeit/core/wavecal/waveio.py index ccc1771625..c9ca050b1b 100644 --- a/pypeit/core/wavecal/waveio.py +++ b/pypeit/core/wavecal/waveio.py @@ -298,8 +298,10 @@ def load_tree(polygon=4, numsearch=20): file_load = pickle.load(f_obj) index = np.load(fileindx) except FileNotFoundError: - msgs.info('The requested KDTree was not found on disk' + msgs.newline() + - 'please be patient while the ThAr KDTree is built and saved to disk.') + msgs.info( + 'The requested KDTree was not found on disk\nplease be patient while the ThAr KDTree ' + 'is built and saved to disk.' + ) from pypeit.core.wavecal import kdtree_generator file_load, index = kdtree_generator.main(polygon, numsearch=numsearch, verbose=True, ret_treeindx=True, outname=filename) diff --git a/pypeit/core/wavecal/wvutils.py b/pypeit/core/wavecal/wvutils.py index c335cc63d9..3303cca8aa 100644 --- a/pypeit/core/wavecal/wvutils.py +++ b/pypeit/core/wavecal/wvutils.py @@ -800,10 +800,12 @@ def xcorr_shift_stretch(inspec1, inspec2, cc_thresh=-1.0, percent_ceil=50.0, use if(corr_de < corr_cc): # Occasionally the differential evolution crapps out and returns a value worse that the CC value. In these cases just use the cc value - msgs.warning('Shift/Stretch optimizer performed worse than simple x-correlation.' + - 'Returning simple x-correlation shift and no stretch:' + msgs.newline() + - ' Optimizer: corr={:5.3f}, shift={:5.3f}, stretch={:7.5f}'.format(corr_de, shift_de,stretch_de) + msgs.newline() + - ' X-corr : corr={:5.3f}, shift={:5.3f}'.format(corr_cc,shift_cc)) + msgs.warning( + 'Shift/Stretch optimizer performed worse than simple x-correlation. ' + 'Returning simple x-correlation shift and no stretch:\n' + f' Optimizer: corr={corr_de:5.3f}, shift={shift_de:5.3f}, stretch={stretch_de:7.5f}\n' + f' X-corr : corr={corr_cc:5.3f}, shift={shift_cc:5.3f}' + ) corr_out = corr_cc shift_out = shift_cc stretch_out = 1.0 @@ -956,15 +958,15 @@ def write_template(nwwv, nwspec, binspec, outpath, outroot, det_cut=None, # Also copy the file to the cache for direct use cache.write_file_to_cache(outroot, outroot, "arc_lines/reid_arxiv") - msgs.info(f"Your arxiv solution has also been cached.{msgs.newline()}" - f"To utilize this wavelength solution, insert the{msgs.newline()}" - f"following block in your PypeIt Reduction File:{msgs.newline()}" - f" [calibrations]{msgs.newline()}" - f" [[wavelengths]]{msgs.newline()}" - f" reid_arxiv = {outroot}{msgs.newline()}" + msgs.info(f"Your arxiv solution has also been cached.\n" + f"To utilize this wavelength solution, insert the\n" + f"following block in your PypeIt Reduction File:\n" + f" [calibrations]\n" + f" [[wavelengths]]\n" + f" reid_arxiv = {outroot}\n" f" method = full_template\n") print("") # Empty line for clarity - msgs.info(f"To use exactly the solutions created above {msgs.newline()}" + msgs.info(f"To use exactly the solutions created above\n" f"disable the 2d fitting by adding the keyword ech_2dfit = False") print("") # Empty line for clarity msgs.info("Please consider sharing your solution with the PypeIt Developers.") diff --git a/pypeit/edgetrace.py b/pypeit/edgetrace.py index 7ccfd1f059..4103d0d774 100644 --- a/pypeit/edgetrace.py +++ b/pypeit/edgetrace.py @@ -783,7 +783,7 @@ def auto_trace(self, bpm=None, debug=0): 'Sync predict cannot use PCA because too few edges were found. If you are ' 'reducing multislit or echelle data, you may need a better trace image or ' 'change the mode used to predict traces (see below). If you are reducing ' - 'longslit data, make sure to set the "sync_predict" parameter to "nearest". + 'longslit data, make sure to set the "sync_predict" parameter to "nearest".' ) self.success = False else: diff --git a/pypeit/scripts/arxiv_solution.py b/pypeit/scripts/arxiv_solution.py index f4afc7f8fc..e1ea2d3b55 100644 --- a/pypeit/scripts/arxiv_solution.py +++ b/pypeit/scripts/arxiv_solution.py @@ -45,7 +45,7 @@ def main(args): if args.file is None: raise PypeItError('You must input a MasterWaveCalib file') elif not os.path.exists(args.file): - raise PypeItError("The following MasterWaveCalib file does not exist:" + msgs.newline() + args.file) + raise PypeItError(f"The following MasterWaveCalib file does not exist:\n{args.file}") # Load the wavelength calibration file wv_calib = WaveCalib.from_file(args.file, chk_version=chk_version) @@ -60,7 +60,7 @@ def main(args): if len(gd_slits) == 0: thismsg += "There are no good slits - the WaveCalib file is bad." else: - thismsg += "Try one of the following slits, instead: " + msgs.newline() + ", ".join(gd_slits) + thismsg += "Try one of the following slits, instead: \n{', '.join(gd_slits)}\n" raise PypeItError(thismsg) wave = wv_calib['wv_fits'][args.slit]['wave_soln'].flatten() spec = wv_calib['wv_fits'][args.slit]['spec'].flatten() diff --git a/pypeit/scripts/coadd_2dspec.py b/pypeit/scripts/coadd_2dspec.py index d7ae8bff52..d57edbda64 100644 --- a/pypeit/scripts/coadd_2dspec.py +++ b/pypeit/scripts/coadd_2dspec.py @@ -110,12 +110,12 @@ def main(args): find_negative = head2d['FINDOBJ'] == 'POS_NEG' # Print status message - msgs_string = f'Reducing target {basename}' + msgs.newline() - msgs_string += f"Coadding frame sky-subtracted with {head2d['SKYSUB']}" + msgs.newline() - msgs_string += f"Searching for objects that are {head2d['FINDOBJ']}" + msgs.newline() - msgs_string += 'Combining frames in 2d coadd:' + msgs.newline() + msgs_string = f'Reducing target {basename}\n' + msgs_string += f"Coadding frame sky-subtracted with {head2d['SKYSUB']}\n" + msgs_string += f"Searching for objects that are {head2d['FINDOBJ']}\n" + msgs_string += 'Combining frames in 2d coadd:\n' for f, file in enumerate(spec2d_files): - msgs_string += f'Exp {f}: {Path(file).name}' + msgs.newline() + msgs_string += f'Exp {f}: {Path(file).name}\n' msgs.info(msgs_string) # Instantiate the sci_dict diff --git a/pypeit/scripts/flux_calib.py b/pypeit/scripts/flux_calib.py index c2c13694f4..ec0a68393c 100644 --- a/pypeit/scripts/flux_calib.py +++ b/pypeit/scripts/flux_calib.py @@ -107,11 +107,13 @@ def main(args): sf_archive = SensFileArchive.get_instance(spectrograph.name) sensfiles = nspec*[sf_archive.get_archived_sensfile(fluxFile.filenames[0])] else: - raise PypeItError('Invalid format for .flux file.' + msgs.newline() + - 'You must specify a single sensfile on the first line of the flux block,' + msgs.newline() + - 'or specify a sensfile for every spec1dfile in the flux block,' + msgs.newline() + - 'or specify "use_archived_sens = True" to use an archived sensfile.' + msgs.newline() + - 'Run pypeit_flux_calib --help for information on the format') + raise PypeItError( + 'Invalid format for .flux file.\n' + 'You must specify a single sensfile on the first line of the flux block,\n' + 'or specify a sensfile for every spec1dfile in the flux block,\n' + 'or specify "use_archived_sens = True" to use an archived sensfile.\n' + 'Run pypeit_flux_calib --help for information on the format' + ) # Instantiate fluxcalibrate.flux_calibrate(fluxFile.filenames, sensfiles, par=par['fluxcalib'], diff --git a/pypeit/scripts/print_bpm.py b/pypeit/scripts/print_bpm.py index 96dbc38598..829c27f736 100644 --- a/pypeit/scripts/print_bpm.py +++ b/pypeit/scripts/print_bpm.py @@ -56,7 +56,7 @@ def main(args): descr = bpm.descr else: # Read the spec2d file - msgs.info("Using the bad pixel mask from the following spec2d file:" + msgs.newline() + f"{args.file}.") + msgs.info(f"Using the bad pixel mask from the following spec2d file:\n{args.file}.") spec2d_file = args.file # Parse the detector name @@ -92,18 +92,17 @@ def main(args): descr = bpm.bitmask.descr # Print the description of the bad pixel mask value - outstr = f"The bad pixel mask value ({args.bit}) corresponds to the following:" \ - + msgs.newline() + msgs.newline() + outstr = f"The bad pixel mask value ({args.bit}) corresponds to the following:\n\n" bitkeys = list(bpm.bits.keys()) # Pad the bit keys with spaces so that they all have the same length bitlen = len(max(bitkeys, key=len)) for i in range(len(binvals)): if binvals[i] == 1: - outstr += f"* {bitkeys[i].ljust(bitlen)} : {descr[i]}" + msgs.newline() + outstr += f"* {bitkeys[i].ljust(bitlen)} : {descr[i]}\n" # Print the message to the user msgs.info(outstr) # Finally, print out a message to point users to the online documentation - msgs.info("Please see the following website for more information:" + msgs.newline() + + msgs.info("Please see the following website for more information:\n" "https://pypeit.readthedocs.io/en/release/out_masks.html") diff --git a/pypeit/scripts/ql.py b/pypeit/scripts/ql.py index bdc866b70c..9b85035077 100644 --- a/pypeit/scripts/ql.py +++ b/pypeit/scripts/ql.py @@ -1067,16 +1067,18 @@ def print_offset_report(fitstbl:Table, platescale:float): raise PypeItError('Script only supported for a single type of dither pattern.') # Print out a report on the offsets - msg_string = msgs.newline() + '*******************************************************' - msg_string += msgs.newline() + ' Summary of offsets for target {:s} with dither pattern: {:s}'.format(target, - dither_pattern[ - 0]) - msg_string += msgs.newline() + '*******************************************************' - msg_string += msgs.newline() + 'filename Position arcsec pixels ' - msg_string += msgs.newline() + '----------------------------------------------------' + msg_string = '\n*******************************************************' + msg_string += ( + f'\n Summary of offsets for target {target} with dither pattern: {dither_pattern[0]}' + ) + msg_string += '\n*******************************************************' + msg_string += '\n filename Position arcsec pixels ' + msg_string += '\n----------------------------------------------------' for iexp, file in enumerate(files): - msg_string += msgs.newline() + ' {:s} {:s} {:6.2f} {:6.2f}'.format( - file, dither_id[iexp], offset_arcsec[iexp], offset_arcsec[iexp] / platescale) - msg_string += msgs.newline() + '********************************************************' + msg_string += ( + f'\n {file} {dither_id[iexp]} {offset_arcsec[iexp]:6.2f} ' + f'{offset_arcsec[iexp] / platescale:6.2f}' + ) + msg_string += '\n********************************************************' msgs.info(msg_string) diff --git a/pypeit/scripts/setup_coadd2d.py b/pypeit/scripts/setup_coadd2d.py index 98892370e4..c5a947e1c0 100644 --- a/pypeit/scripts/setup_coadd2d.py +++ b/pypeit/scripts/setup_coadd2d.py @@ -125,9 +125,9 @@ def main(args): sci_dirs_exist = [sc.exists() for sc in sci_dirs] if not np.all(sci_dirs_exist): - msgs_string = 'The following science directories do not exist:' + msgs.newline() + msgs_string = 'The following science directories do not exist:\n' for s in np.array(sci_dirs)[np.logical_not(sci_dirs_exist)]: - msgs_string += f'{s}' + msgs.newline() + msgs_string += f'{s}\n' raise PypeItError(msgs_string) # Find all the spec2d files: diff --git a/pypeit/scripts/show_2dspec.py b/pypeit/scripts/show_2dspec.py index 2c25b60578..19f531f48d 100644 --- a/pypeit/scripts/show_2dspec.py +++ b/pypeit/scripts/show_2dspec.py @@ -289,8 +289,8 @@ def main(args): sobjs = specobjs.SpecObjs.from_fitsfile(spec1d_file, chk_version=False) else: sobjs = None - msgs.warning('Could not find spec1d file: {:s}'.format(spec1d_file) + msgs.newline() + - ' No objects were extracted.') + msgs.warning(f'Could not find spec1d file: {spec1d_file}\n' + 'No objects were extracted.') # TODO: This may be too restrictive, i.e. ignore BADFLTCALIB?? slit_gpm = slit_mask == 0 diff --git a/pypeit/specobj.py b/pypeit/specobj.py index 4aaaf2fac6..ae7213bc4f 100644 --- a/pypeit/specobj.py +++ b/pypeit/specobj.py @@ -638,7 +638,7 @@ def apply_helio(self, vel_corr, refframe): for attr in ['BOX', 'OPT']: if self[attr+'_WAVE'] is not None: msgs.info( - f'Applying {refframe} correction to {attr} extraction for object:\n'{self.NAME}' + f'Applying {refframe} correction to {attr} extraction for object:\n{self.NAME}' ) self[attr+'_WAVE'] *= vel_corr # Record diff --git a/pypeit/spectrographs/aat_uhrf.py b/pypeit/spectrographs/aat_uhrf.py index b1b529b6b5..5a6ff39b18 100644 --- a/pypeit/spectrographs/aat_uhrf.py +++ b/pypeit/spectrographs/aat_uhrf.py @@ -251,8 +251,10 @@ def config_specific_par(self, scifile, inp_par=None): par = super().config_specific_par(scifile, inp_par=inp_par) if par['calibrations']['wavelengths']['reid_arxiv'] is None: - msgs.warning("Wavelength setup not supported!" + msgs.newline() + msgs.newline() + - "Please perform your own wavelength calibration, and provide the path+filename using:" + msgs.newline() + - msgs.pypeitpar_text(['calibrations', 'wavelengths', 'reid_arxiv = '])) + msgs.warning( + "Wavelength setup not supported!\n\n" + "Please perform your own wavelength calibration, and provide the path+filename " + "using the reid_arxiv parameter." + ) # Return return par diff --git a/pypeit/spectrographs/ldt_deveny.py b/pypeit/spectrographs/ldt_deveny.py index ff0734c731..b60b70723d 100644 --- a/pypeit/spectrographs/ldt_deveny.py +++ b/pypeit/spectrographs/ldt_deveny.py @@ -182,7 +182,7 @@ def compound_meta(self, headarr:list, meta_key:str) -> object: if (grating_kwd := headarr[0]['GRATING']) not in gratings: raise PypeItError(f"Grating value {grating_kwd} not recognized.") if grating_kwd == "UNKNOWN": - msgs.warning(f"Grating not selected in the LOUI; {msgs.newline()}" + msgs.warning(f"Grating not selected in the LOUI; \n" "Fix the header keyword GRATING before proceeding.") return f"{gratings[grating_kwd]} ({grating_kwd})" @@ -202,7 +202,7 @@ def compound_meta(self, headarr:list, meta_key:str) -> object: # Extract lines/mm, catch 'UNKNOWN' grating if (grating_kwd := headarr[0]["GRATING"]) == "UNKNOWN": lpmm = np.inf - msgs.warning(f"Grating angle not selected in the LOUI; {msgs.newline()}" + msgs.warning(f"Grating angle not selected in the LOUI; \n" "Fix the header keyword GRANGLE before proceeding.") else: lpmm = float(grating_kwd.split("/")[0]) diff --git a/pypeit/spectrographs/p200_dbsp.py b/pypeit/spectrographs/p200_dbsp.py index b5d3369d58..03d6dfa54b 100644 --- a/pypeit/spectrographs/p200_dbsp.py +++ b/pypeit/spectrographs/p200_dbsp.py @@ -81,7 +81,7 @@ def compound_meta(self, headarr: List[fits.Header], meta_key: str): try: return Angle(headarr[0]['ANGLE'].lower()).deg except Exception as e: - msgs.warning("Could not read dispangle from header:" + msgs.newline() + str(headarr[0]['ANGLE'])) + msgs.warning("Could not read dispangle from header:\n" + str(headarr[0]['ANGLE'])) raise e else: return None diff --git a/pypeit/spectrographs/vlt_fors.py b/pypeit/spectrographs/vlt_fors.py index f6a7ccf800..6f27614972 100644 --- a/pypeit/spectrographs/vlt_fors.py +++ b/pypeit/spectrographs/vlt_fors.py @@ -455,8 +455,12 @@ def parse_dither_pattern(self, file_list, ext=None): u_hat_this = np.array([ra_off.to('arcsec').value/separation, dec_off.to('arcsec').value/separation]) dot_product = np.dot(u_hat_slit, u_hat_this) if not np.isclose(np.abs(dot_product),1.0, atol=1e-2): - raise PypeItError('The slit appears misaligned with the angle between the coordinates: dot_product={:7.5f}'.format(dot_product) + msgs.newline() + - 'The position angle in the headers {:5.3f} differs from that computed from the coordinates {:5.3f}'.format(posang_this, posang_ref)) + raise PypeItError( + 'The slit appears misaligned with the angle between the coordinates: ' + f'dot_product={dot_product:7.5f}\n' + f'The position angle in the headers {posang_this:5.3f} differs from that ' + f'computed from the coordinates {posang_ref:5.3f}' + ) offset_arcsec[ifile] = separation*np.sign(dot_product) # dither_id.append(hdr['FRAMEID']) diff --git a/pypeit/specutils/pypeit_loaders.py b/pypeit/specutils/pypeit_loaders.py index 48150071b1..acb8e1caa5 100644 --- a/pypeit/specutils/pypeit_loaders.py +++ b/pypeit/specutils/pypeit_loaders.py @@ -327,5 +327,7 @@ def pypeit_spec1d_loader_nolist(filename, extract=None, fluxed=True, **kwargs): calibration hasn't been performed or ``fluxed=False``, the spectrum is returned in counts. """ - raise PypeItError(f'The spec1d file {filename.name} cannot be ingested into a Spectrum object.' - f'{msgs.newline()}Please use the SpectrumList object for spec1d files.') + raise PypeItError( + f'The spec1d file {filename.name} cannot be ingested into a Spectrum object.\n' + 'Please use the SpectrumList object for spec1d files.' + ) From 9f559b40ccb24bf9bfffdc8b70523a957b169c7a Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Wed, 8 Oct 2025 17:31:03 -0700 Subject: [PATCH 07/33] other msgs functions --- pypeit/alignframe.py | 2 +- pypeit/calibrations.py | 24 ++++++++++++------------ pypeit/coadd3d.py | 3 +-- pypeit/core/fitting.py | 4 ++-- pypeit/core/flexure.py | 6 +++--- pypeit/core/flux_calib.py | 8 ++++---- pypeit/core/gui/identify.py | 10 +++++----- pypeit/core/gui/object_find.py | 4 ++-- pypeit/core/gui/skysub_regions.py | 2 +- pypeit/core/procimg.py | 2 +- pypeit/find_objects.py | 2 +- pypeit/flatfield.py | 2 +- pypeit/pypeit.py | 8 -------- pypeit/scripts/run_pypeit.py | 1 - pypeit/scripts/run_to_calibstep.py | 1 - pypeit/scripts/skysub_regions.py | 2 +- pypeit/scripts/view_fits.py | 4 ++-- pypeit/setup_gui/model.py | 4 +++- pypeit/spectrographs/gemini_gnirs.py | 2 +- pypeit/spectrographs/gtc_osiris.py | 2 +- pypeit/spectrographs/spectrograph.py | 2 +- 21 files changed, 43 insertions(+), 52 deletions(-) diff --git a/pypeit/alignframe.py b/pypeit/alignframe.py index 7531fe77d6..08a2a58b2c 100644 --- a/pypeit/alignframe.py +++ b/pypeit/alignframe.py @@ -347,7 +347,7 @@ def __init__(self, traces, locations, tilts): Spectral tilts. """ # Perform some checks - msgs.work("Spatial flexure is not currently implemented for the astrometric alignment") + msgs.debug("Spatial flexure is not currently implemented for the astrometric alignment") if type(locations) is list: locations = np.array(locations) if locations.size != traces.shape[1]: diff --git a/pypeit/calibrations.py b/pypeit/calibrations.py index 338fca61d8..4691e188c0 100644 --- a/pypeit/calibrations.py +++ b/pypeit/calibrations.py @@ -234,13 +234,13 @@ def check_calibrations(self, file_list, check_lamps=True): maxlmp = max([len("Lamp status")] + [len(x) for x in lampstat]) strout = "{0:" + str(maxlen) + "} {1:s}" # Print the messages - print(msgs.indent() + '-' * maxlen + " " + '-' * maxlmp) - print(msgs.indent() + strout.format("Filename", "Lamp status")) - print(msgs.indent() + '-' * maxlen + " " + '-' * maxlmp) + print(' ' + '-' * maxlen + " " + '-' * maxlmp) + print(' ' + strout.format("Filename", "Lamp status")) + print(' ' + '-' * maxlen + " " + '-' * maxlmp) for ff, file in enumerate(file_list): - print(msgs.indent() + print(' ' + strout.format(os.path.split(file)[1], " ".join(lampstat[ff].split("_")))) - print(msgs.indent() + '-' * maxlen + " " + '-' * maxlmp) + print(' ' + '-' * maxlen + " " + '-' * maxlmp) def find_calibrations(self, frametype, frameclass): """ @@ -685,7 +685,7 @@ def get_scattlight(self, force:str=None): # Need to build everything from scratch. Start with the trace image. msgs.info('Creating scattered light calibration frame using files: ') for f in raw_scattlight_files: - msgs.prindent(f'{Path(f).name}') + msgs.info(f' {Path(f).name}') # Reset the BPM self.get_bpm(frame=raw_scattlight_files[0]) @@ -862,7 +862,7 @@ def get_flats(self, force:str=None): msgs.info('Creating pixel-flat calibration frame using files: ') for f in raw_pixel_files: - msgs.prindent(f'{Path(f).name}') + msgs.info(f' {Path(f).name}') pixel_flat = buildimage.buildimage_fromlist(self.spectrograph, self.det, self.par['pixelflatframe'], raw_pixel_files, dark=self.msdark, @@ -878,7 +878,7 @@ def get_flats(self, force:str=None): msgs.info('Subtracting lamp off flats using files: ') for f in raw_lampoff_files: - msgs.prindent(f'{Path(f).name}') + msgs.info(f' {Path(f).name}') lampoff_flat = buildimage.buildimage_fromlist(self.spectrograph, self.det, self.par['lampoffflatsframe'], raw_lampoff_files, @@ -908,7 +908,7 @@ def get_flats(self, force:str=None): msgs.info('Creating slit-illumination flat calibration frame using files: ') for f in raw_illum_files: - msgs.prindent(f'{Path(f).name}') + msgs.info(f' {Path(f).name}') illum_flat = buildimage.buildimage_fromlist(self.spectrograph, self.det, self.par['illumflatframe'], raw_illum_files, @@ -917,7 +917,7 @@ def get_flats(self, force:str=None): if len(raw_lampoff_files) > 0: msgs.info('Subtracting lamp off flats using files: ') for f in raw_lampoff_files: - msgs.prindent(f'{Path(f).name}') + msgs.info(f' {Path(f).name}') if lampoff_flat is None: # Perform a check on the files self.check_calibrations(raw_lampoff_files) @@ -1033,7 +1033,7 @@ def get_slits(self, force:str=None): # Need to build everything from scratch. Start with the trace image. msgs.info('Creating edge tracing calibration frame using files: ') for f in raw_trace_files: - msgs.prindent(f'{Path(f).name}') + msgs.info(f' {Path(f).name}') # Reset the BPM self.get_bpm(frame=raw_trace_files[0]) @@ -1052,7 +1052,7 @@ def get_slits(self, force:str=None): if len(raw_lampoff_files) > 0: msgs.info('Subtracting lamp off flats using files: ') for f in raw_lampoff_files: - msgs.prindent(f'{Path(f).name}') + msgs.info(f' {Path(f).name}') # Reset the BPM self.get_bpm(frame=raw_trace_files[0]) diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index b49fd649c8..26a714a138 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -885,8 +885,7 @@ def run(self): Main entry routine to set the order of operations to coadd the data. For specific details of this procedure, see the child routines. """ - msgs.bug("This routine should be overridden by child classes.") - raise PypeItError("Cannot proceed without coding the run() routine.") + raise NotImplementedError('Base class run function must be overriden by deerived class.') class SlicerIFUCoAdd3D(CoAdd3D): diff --git a/pypeit/core/fitting.py b/pypeit/core/fitting.py index eb1a61c283..ba4037cc0d 100644 --- a/pypeit/core/fitting.py +++ b/pypeit/core/fitting.py @@ -830,9 +830,9 @@ def polyfit2d_general(x, y, z, deg, w=None, function='polynomial', if w is not None: w = np.asarray(w) + 0.0 if w.ndim != 1: - msgs.bug("fitting.polyfit2d - Expected 1D vector for weights") + msgs.debug("fitting.polyfit2d - Expected 1D vector for weights") if len(x) != len(w) or len(y) != len(w) or len(x) != len(y): - msgs.bug("fitting.polyfit2d - Expected x, y and weights to have same length") + msgs.debug("fitting.polyfit2d - Expected x, y and weights to have same length") z = z * w vander = vander * w[:,np.newaxis] # Reshape diff --git a/pypeit/core/flexure.py b/pypeit/core/flexure.py index 7c2bd1b5ac..09a0708bd8 100644 --- a/pypeit/core/flexure.py +++ b/pypeit/core/flexure.py @@ -404,7 +404,7 @@ def spec_flex_shift(obj_skyspec, sky_file=None, arx_skyspec=None, arx_fwhm_pix=N obj_skyspec = obj_skyspec.rebin(keep_wave) # Deal with bad pixels - msgs.work("Need to mask bad pixels") + msgs.debug("Need to mask bad pixels") # Trim edges (rebinning is junk there) arx_skyspec.data['flux'][0,:2] = 0. arx_skyspec.data['flux'][0,-2:] = 0. @@ -453,7 +453,7 @@ def spec_flex_shift(obj_skyspec, sky_file=None, arx_skyspec=None, arx_fwhm_pix=N arx_sky_flux = np.clip(arx_sky_flux, arx_lower, arx_upper) # # # Consider sharpness filtering (e.g. LowRedux) - # msgs.work("Consider taking median first [5 pixel]") + # msgs.debug("Consider taking median first [5 pixel]") # Cross correlation of spectra corr = np.correlate(arx_sky_flux, obj_sky_flux, "same") @@ -880,7 +880,7 @@ def spec_flexure_slit(slits, slitord, slit_bpm, sky_file, method="boxcar", speco results of each slit. This is filled with a basically empty dict if the slit is skipped. """ - msgs.work("Consider doing 2 passes in flexure as in LowRedux") + msgs.debug("Consider doing 2 passes in flexure as in LowRedux") # Determine the method slit_cen = True if (specobjs is None) or (method == "slitcen") else False diff --git a/pypeit/core/flux_calib.py b/pypeit/core/flux_calib.py index ecbd380115..ef1233ec71 100644 --- a/pypeit/core/flux_calib.py +++ b/pypeit/core/flux_calib.py @@ -995,9 +995,9 @@ def standard_zeropoint(wave, Nlam, Nlam_ivar, Nlam_gpm, flam_true, mask_recomb=N # Compute an effective resolution for the standard. This could be improved # to setup an array of breakpoints based on the resolution. At the # moment we are using only one number - msgs.work("Should pull resolution from arc line analysis") - msgs.work("At the moment the resolution is taken as the PixelScale") - msgs.work("This needs to be changed!") + msgs.debug("Should pull resolution from arc line analysis") + msgs.debug("At the moment the resolution is taken as the PixelScale") + msgs.debug("This needs to be changed!") std_pix = np.median(np.abs(wave[zeropoint_data_gpm] - np.roll(wave[zeropoint_data_gpm], 1))) std_res = np.median(wave[zeropoint_data_gpm]/resolution) # median resolution in units of Angstrom. if (nresln * std_res) < std_pix: @@ -1006,7 +1006,7 @@ def standard_zeropoint(wave, Nlam, Nlam_ivar, Nlam_gpm, flam_true, mask_recomb=N nresln = std_res / std_pix # Output some helpful information for double-checking input params are correct - msgs.test(f" This is the passed-in R: {resolution}") + msgs.debug(f" This is the passed-in R: {resolution}") msgs.info(f" This is the standard pixel: {std_pix:.2f} Ã…") msgs.info(f" This is the standard resolution element: {std_res:.2f} Ã…") msgs.info(f" Breakpoint spacing: {std_res * nresln:.2f} pixels") diff --git a/pypeit/core/gui/identify.py b/pypeit/core/gui/identify.py index e9df262d41..0f50328c6f 100644 --- a/pypeit/core/gui/identify.py +++ b/pypeit/core/gui/identify.py @@ -1073,7 +1073,7 @@ def operations(self, key, axisID, event): self.update_infobox(message="WARNING: There are unsaved changes!!\nPress q again to exit", yesno=False) self._qconf = True else: - msgs.bug("Need to change this to kill and return the results to PypeIt") + msgs.debug("Need to change this to kill and return the results to PypeIt") plt.close() elif self._qconf: self.update_infobox(default=True) @@ -1089,7 +1089,7 @@ def operations(self, key, axisID, event): # Deal with the response if self._respreq[1] == "write": # First remove the old file, and save the new one - msgs.work("Not implemented yet!") + msgs.debug("Not implemented yet!") self.write() else: return @@ -1157,7 +1157,7 @@ def operations(self, key, axisID, event): elif self._fitr is None: msgs.info("You must select a fitting region first") else: - msgs.work("Feature not yet implemented") + msgs.debug("Feature not yet implemented") elif key == 's': self.save_IDs() elif key == 'w': @@ -1253,7 +1253,7 @@ def fitsol_value(self, xfit=None, idx=None): else: return np.polyval(self._fitdict["coeff"], xfit[idx] / self._fitdict["scale"]) else: - msgs.bug("Cannot predict wavelength value - no fit has been performed") + msgs.debug("Cannot predict wavelength value - no fit has been performed") return None def fitsol_deriv(self, xfit=None, idx=None): @@ -1279,7 +1279,7 @@ def fitsol_deriv(self, xfit=None, idx=None): else: return np.polyval(cder, xfit[idx] / self._fitdict["scale"]) / self._fitdict["scale"] else: - msgs.bug("Cannot predict wavelength value - no fit has been performed") + msgs.debug("Cannot predict wavelength value - no fit has been performed") return None def add_new_detection(self): diff --git a/pypeit/core/gui/object_find.py b/pypeit/core/gui/object_find.py index 54526c5a3c..1fe52e142f 100644 --- a/pypeit/core/gui/object_find.py +++ b/pypeit/core/gui/object_find.py @@ -562,7 +562,7 @@ def operations(self, key, axisID): self.update_infobox(message="WARNING: There are unsaved changes!!\nPress q again to exit", yesno=False) self._qconf = True else: - msgs.bug("Need to change this to kill and return the results to PypeIt") + msgs.debug("Need to change this to kill and return the results to PypeIt") plt.close() elif self._qconf: self.update_infobox(default=True) @@ -823,7 +823,7 @@ def get_specobjs(self): SpecObjs: SpecObjs Class """ if self._use_updates: - msgs.work("Have not updated SpecObjs yet") + msgs.debug("Have not updated SpecObjs yet") return self.specobjs else: return None diff --git a/pypeit/core/gui/skysub_regions.py b/pypeit/core/gui/skysub_regions.py index 67b58dfd24..0a6551cc00 100644 --- a/pypeit/core/gui/skysub_regions.py +++ b/pypeit/core/gui/skysub_regions.py @@ -525,7 +525,7 @@ def operations(self, key, axisID): 'again to exit', yesno=False) self._qconf = True else: - msgs.bug("Need to change this to kill and return the results to PypeIt") + msgs.debug("Need to change this to kill and return the results to PypeIt") plt.close() elif self._qconf: self.update_infobox(default=True) diff --git a/pypeit/core/procimg.py b/pypeit/core/procimg.py index c0401d7a13..d48f7a2cdd 100644 --- a/pypeit/core/procimg.py +++ b/pypeit/core/procimg.py @@ -284,7 +284,7 @@ def lacosmic(sciframe, saturation=None, nonlinear=1., bpm=None, varframe=None, m # Additional algorithms (not traditionally implemented by LA cosmic) to # remove some false positives. - #msgs.work("The following algorithm would be better on the rectified, tilts-corrected image") + #msgs.debug("The following algorithm would be better on the rectified, tilts-corrected image") filt = scipy.ndimage.sobel(sciframe, axis=1, mode='constant') _inv_mad = utils.inverse(np.sqrt(np.abs(sciframe))) # Avoid divisions by 0 filty = scipy.ndimage.sobel(filt * _inv_mad, axis=0, mode='constant') diff --git a/pypeit/find_objects.py b/pypeit/find_objects.py index bd4b07e9bc..3bcfd1051a 100644 --- a/pypeit/find_objects.py +++ b/pypeit/find_objects.py @@ -1236,7 +1236,7 @@ def calculate_flexure(self, global_sky): # Save QA # TODO :: Need to implement QA once the flexure code has been tidied up, and this routine has been moved # out of the find_objects() class. - msgs.work("QA is not currently implemented for the flexure correction") + msgs.debug("QA is not currently implemented for the flexure correction") if False:#flex_list is not None: basename = f'{self.basename}_global_{self.spectrograph.get_det_name(self.det)}' out_dir = os.path.join(self.par['rdx']['redux_path'], 'QA') diff --git a/pypeit/flatfield.py b/pypeit/flatfield.py index 6118b1f85d..7fe1950309 100644 --- a/pypeit/flatfield.py +++ b/pypeit/flatfield.py @@ -1864,7 +1864,7 @@ def make_slitless_pixflat(self, msbias=None, msdark=None, calib_dir=None, write_ msgs.info(f'Creating slitless pixel-flat calibration frame ' f'for {self.spectrograph.get_det_name(_det)} using files: ') for f in this_raw_files: - msgs.prindent(f'{Path(f).name}') + msgs.info(f' {Path(f).name}') # Reset the BPM msbpm = self.spectrograph.bpm(this_raw_files[0], _det, msbias=msbias if self.par['bpm_usebias'] else None) diff --git a/pypeit/pypeit.py b/pypeit/pypeit.py index 41399abcd0..4527821863 100644 --- a/pypeit/pypeit.py +++ b/pypeit/pypeit.py @@ -1258,14 +1258,6 @@ def save_exposure(self, frame:int, all_spec2d:spec2dobj.AllSpec2DObj, update_det=update_det, slitspatnum=self.par['rdx']['slitspatnum']) -# def msgs_reset(self): -# """ -# Reset the msgs object -# """ -# # Reset the global logger -# msgs.reset(log=self.logname, verbosity=self.verbosity) -# msgs.pypeit_file = self.pypeit_file - def print_end_time(self): """ Print the elapsed time diff --git a/pypeit/scripts/run_pypeit.py b/pypeit/scripts/run_pypeit.py index a6784d5d20..25c6703ee3 100644 --- a/pypeit/scripts/run_pypeit.py +++ b/pypeit/scripts/run_pypeit.py @@ -106,7 +106,6 @@ def main(args): # QA HTML msgs.info('Generating QA HTML') pypeIt.build_qa() - msgs.close() return 0 diff --git a/pypeit/scripts/run_to_calibstep.py b/pypeit/scripts/run_to_calibstep.py index 0f02df17cf..7f925b190f 100644 --- a/pypeit/scripts/run_to_calibstep.py +++ b/pypeit/scripts/run_to_calibstep.py @@ -96,7 +96,6 @@ def main(args): # QA HTML msgs.info('Generating QA HTML') pypeIt.build_qa() - msgs.close() return 0 diff --git a/pypeit/scripts/skysub_regions.py b/pypeit/scripts/skysub_regions.py index 1b21bd64ad..b9de03b6df 100644 --- a/pypeit/scripts/skysub_regions.py +++ b/pypeit/scripts/skysub_regions.py @@ -79,7 +79,7 @@ def main(args): det = spec2DObj.detector.parse_name(detname) # Setup for PypeIt imports - msgs.reset(verbosity=args.verbosity) + msgs.init(level=msgs.level) # Grab the slit edges slits = spec2DObj.slits diff --git a/pypeit/scripts/view_fits.py b/pypeit/scripts/view_fits.py index 0301207d4e..f829391a8e 100644 --- a/pypeit/scripts/view_fits.py +++ b/pypeit/scripts/view_fits.py @@ -61,8 +61,8 @@ def main(args): print(hdu.info()) return - # Setup for PYPIT imports - msgs.reset(verbosity=2) + # TODO: Update verbosity + msgs.init(level=msgs.level) if args.proc and args.exten is not None: raise PypeItError('You cannot specify --proc and --exten, since --exten shows the raw image') diff --git a/pypeit/setup_gui/model.py b/pypeit/setup_gui/model.py index ccd5fb5d5e..c44a5aac79 100644 --- a/pypeit/setup_gui/model.py +++ b/pypeit/setup_gui/model.py @@ -1236,8 +1236,10 @@ def setup_logging(self, verbosity): else: logfile = None + # TODO: Need help from Dusty to update this self.log_buffer = LogBuffer(logfile,verbosity) - msgs.reset(verbosity=verbosity, log=self.log_buffer, log_to_stderr=False) + msgs.init(level=msgs.level, log_file=self.log_buffer) +# msgs.reset(verbosity=verbosity, log=self.log_buffer, log_to_stderr=False) msgs.info(f"QT Version: {qtpy.QT_VERSION}") msgs.info(f"PySide version: {qtpy.PYSIDE_VERSION}") msgs.info(f"PyQt version: {qtpy.PYQT_VERSION}") diff --git a/pypeit/spectrographs/gemini_gnirs.py b/pypeit/spectrographs/gemini_gnirs.py index 166cfd0133..defdcfb826 100644 --- a/pypeit/spectrographs/gemini_gnirs.py +++ b/pypeit/spectrographs/gemini_gnirs.py @@ -723,7 +723,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): # Get the pixel and slice scales pxscl = platescale * binspat / 3600.0 # Need to convert arcsec to degrees - msgs.work("NEED TO WORK OUT SLICER SCALE AND PIXEL SCALE") + msgs.debug("NEED TO WORK OUT SLICER SCALE AND PIXEL SCALE") slscl = self.get_meta_value([hdr], 'slitwid') if spatial_scale is not None: if pxscl > spatial_scale / 3600.0: diff --git a/pypeit/spectrographs/gtc_osiris.py b/pypeit/spectrographs/gtc_osiris.py index f554e56962..7eff8ddc8c 100644 --- a/pypeit/spectrographs/gtc_osiris.py +++ b/pypeit/spectrographs/gtc_osiris.py @@ -207,7 +207,7 @@ def compound_meta(self, headarr, meta_key): return 20.0 elif meta_key == 'parangle': try: - msgs.work("Parallactic angle is not available for MAAT - DAR correction may be incorrect") + msgs.debug("Parallactic angle is not available for MAAT - DAR correction may be incorrect") return headarr[0]['PARANG'] # Must be expressed in radians except KeyError: raise PypeItError("Parallactic angle is not in header") diff --git a/pypeit/spectrographs/spectrograph.py b/pypeit/spectrographs/spectrograph.py index 01cf5e4263..9c4b60415a 100644 --- a/pypeit/spectrographs/spectrograph.py +++ b/pypeit/spectrographs/spectrograph.py @@ -1747,7 +1747,7 @@ def vet_assigned_ftypes(self, type_bits, fitstbl): if np.any(none_coords): msgs.warning('The following frames have None coordinates. ' 'They could be a twilight flat frame that was missed by the automatic identification') - [msgs.prindent(f) for f in fitstbl['filename'][none_coords]] + [msgs.warning(f' {f}') for f in fitstbl['filename'][none_coords]] # turn off the standard star flag for these frames type_bits[none_coords] = fitstbl.type_bitmask.turn_off(type_bits[none_coords], flag='standard') From ff13434109902c03e7a8520cdbed6c892b02771e Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Wed, 8 Oct 2025 17:32:05 -0700 Subject: [PATCH 08/33] rm pypmsgs --- pypeit/pypmsgs.py | 410 ---------------------------------------------- 1 file changed, 410 deletions(-) delete mode 100644 pypeit/pypmsgs.py diff --git a/pypeit/pypmsgs.py b/pypeit/pypmsgs.py deleted file mode 100644 index 5f5589a3fe..0000000000 --- a/pypeit/pypmsgs.py +++ /dev/null @@ -1,410 +0,0 @@ -""" -Module for terminal and file logging. - -.. todo:: - Why not use pythons native logging package? - -""" -import datetime -import inspect -import io -import os -import sys - -# Imported for versioning -import astropy -import numpy -import scipy - -import pypeit -from pypeit.core.qa import close_qa -#from pypeit import pypeit_user - -#pypeit_logger = None - -# Alphabetical list of developers -developers = ['ema', 'joe', 'milvang', 'rcooke', 'thsyu', 'xavier'] - - -#class PypeItError(Exception): -# pass -# -#class PypeItBitMaskError(PypeItError): -# pass -# -#class PypeItDataModelError(PypeItError): -# pass -# -#class PypeItPathError(PypeItError): -# pass - - -class Messages: - """ - Create coloured text for messages printed to screen. - - For further details on colours see the following example: - http://ascii-table.com/ansi-escape-sequences.php - - Parameters - ---------- - log : str or file-like object,optional - Name of saved log file (no log will be saved if log==""). If None, no - log is saved. - verbosity : int - Level of verbosity. Options are - - 0 = No output - - 1 = Minimal output - - 2 = All output (default) - colors : bool - If true, the screen output will have colors, otherwise normal screen - output will be displayed - """ - def __init__(self, log=None, verbosity=None, colors=True): - - # Initialize other variables - self._defverb = 1 - - if pypeit_user in developers: - self._defverb = 2 - self._verbosity = self._defverb if verbosity is None else verbosity - - # TODO: Why are these two necessary? It would seem better to - # provide Messages with member functions that can operate on - # sciexp and pypeit_file instead of having them kept within the - # object itself... - self.sciexp = None - self.pypeit_file = None - self.qa_path = None - - # Initialize the log - self._log_to_stderr = self._verbosity != 0 - self._log = None - self._initialize_log_file(log=log) - - # Use colors? - self._start = None - self._end = None - self._black_CL = None - self._yellow_CL = None - self._blue_CL = None - self._green_CL = None - self._red_CL = None - self._white_RD = None - self._white_GR = None - self._white_BK = None - self._white_BL = None - self._black_YL = None - self._yellow_BK = None - - self.disablecolors() - if colors: - self.enablecolors() - - def _cleancolors(self, msg): - cols = [self._end, self._start, - self._black_CL, self._yellow_CL, self._blue_CL, self._green_CL, self._red_CL, - self._white_RD, self._white_GR, self._white_BK, self._white_BL, - self._black_YL, self._yellow_BK] - for i in cols: - msg = msg.replace(i, '') - return msg - - def _devmsg(self): - if self._verbosity == 2: - info = inspect.getouterframes(inspect.currentframe())[3] - devmsg = self._start + self._blue_CL + info[1].split('/')[-1] + ' ' + str(info[2]) \ - + ' ' + info[3] + '()' + self._end + ' - ' - else: - devmsg = '' - return devmsg - - def _print(self, premsg, msg, last=True, printDevMsg=True): - """ - Print to standard error and the log file - """ - devmsg = self._devmsg() if printDevMsg else '' - _msg = premsg+devmsg+msg - if self._log_to_stderr != 0: - print(_msg, file=sys.stderr) - if self._log: - clean_msg = self._cleancolors(_msg) - self._log.write(clean_msg+'\n' if last else clean_msg) - - def _initialize_log_file(self, log=None): - """ - Expects self._log is already None. - """ - - if log is None: - return - - self._log = log if isinstance(log, io.IOBase) else open(log, 'w') - - - self._log.write("------------------------------------------------------\n\n") - self._log.write("This log was generated with version {0:s} of PypeIt\n\n".format( - pypeit.__version__)) - self._log.write("You are using scipy version={:s}\n".format(scipy.__version__)) - self._log.write("You are using numpy version={:s}\n".format(numpy.__version__)) - self._log.write("You are using astropy version={:s}\n\n".format(astropy.__version__)) - self._log.write("------------------------------------------------------\n\n") - - def reset(self, log=None, verbosity=None, colors=True, log_to_stderr=None): - """ - Reinitialize the object. - - Needed so that there can be a default object for all modules, - but also a dynamically defined log file. - """ - # Initialize other variables - self._verbosity = self._defverb if verbosity is None else verbosity - if log_to_stderr is None: - self._log_to_stderr = self._verbosity != 0 - else: - self._log_to_stderr = log_to_stderr - - self.reset_log_file(log) - self.disablecolors() - if colors: - self.enablecolors() - - def reset_log_file(self, log): - if self._log: - self._log.close() - self._log = None - self._initialize_log_file(log=log) - - def close(self): - ''' - Close the log file before the code exits - ''' - close_qa(self.pypeit_file, self.qa_path) - return self.reset_log_file(None) - - def error(self, msg, cls='PypeItError'): - """ - Print an error message - """ - premsg = '\n'+self._start + self._white_RD + '[ERROR] ::' + self._end + ' ' - self._print(premsg, msg) - - # Close QA plots - close_qa(self.pypeit_file, self.qa_path) - - raise eval(cls)(msg) - - - def info(self, msg): - """ - Print an information message - """ - premsg = self._start + self._green_CL + '[INFO] ::' + self._end + ' ' - self._print(premsg, msg) - - def info_update(self, msg, last=False): - """ - Print an information message that needs to be updated - """ - premsg = '\r' + self._start + self._green_CL + '[INFO] ::' + self._end + ' ' - self._print(premsg, msg, last=last) - - def test(self, msg): - """ - Print a test message - """ - if self._verbosity == 2: - premsg = self._start + self._white_BL + '[TEST] ::' + self._end + ' ' - self._print(premsg, msg) - - def warn(self, msg): - """ - Print a warning message - """ - premsg = self._start + self._red_CL + '[WARNING] ::' + self._end + ' ' - self._print(premsg, msg) - - def bug(self, msg): - """ - Print a bug message - """ - premsg = self._start + self._white_BK + '[BUG] ::' + self._end + ' ' - self._print(premsg, msg) - - def work(self, msg): - """ - Print a work in progress message - """ - if self._verbosity == 2: - premsgp = self._start + self._black_CL + '[WORK IN ]::' + self._end + '\n' - premsgs = self._start + self._yellow_CL + '[PROGRESS]::' + self._end + ' ' - self._print(premsgp+premsgs, msg) - - def pypeitpar_text(self, msglist): - """ - Prepare a text string with the pypeit par formatting. - - Parameters - ---------- - msglist: list - A list containing the pypeit parameter strings. The last element of - the list must be the argument and the variable. For example, to - print: - - .. code-block:: ini - - [sensfunc] - [[UVIS]] - polycorrect = False - - you should set ``msglist = ['sensfunc', 'UVIS', 'polycorrect = False']``. - - Returns - ------- - parstring : str - The parameter string - """ - parstring = '\n' - premsg = ' ' - for ll, lin in enumerate(msglist): - thismsg = ll*' ' - if ll == len(msglist)-1: - thismsg += lin - else: - thismsg += (ll+1) * '[' + lin + (ll+1) * ']' - parstring += premsg + thismsg + '\n' - return parstring - - def pypeitpar(self, msglist): - """ - Print a message with the pypeit par formatting. - - Parameters - ---------- - msglist: list - A list containing the pypeit parameter strings. The last element of - the list must be the argument and the variable. For example, to - print: - - .. code-block:: ini - - [sensfunc] - [[UVIS]] - polycorrect = False - - you should set ``msglist = ['sensfunc', 'UVIS', 'polycorrect = False']``. - - """ - premsg = ' ' - for ll, lin in enumerate(msglist): - thismsg = ll*' ' - if ll == len(msglist)-1: - thismsg += lin - else: - thismsg += (ll+1) * '[' + lin + (ll+1) * ']' - self._print(premsg, thismsg, printDevMsg=False) - - def prindent(self, msg): - """ - Print an indent - """ - premsg = ' ' - self._print(premsg, msg) - - def input(self): - """ - Return a text string to be used to display input required from the user - """ - premsg = self._start + self._blue_CL + '[INPUT] ::' + self._end + ' ' - return premsg - - @staticmethod - def newline(): - """ - Return a text string containing a newline to be used with messages - """ - return '\n ' - - @staticmethod - def indent(): - """ - Return a text string containing an indent to be used with messages - """ - return ' ' - - # Set the colors - def enablecolors(self): - """ - Enable colored output text - """ - - # Start and end coloured text - self._start = '\x1B[' - self._end = '\x1B[' + '0m' - - # Clear Backgrounds - self._black_CL = '1;30m' - self._yellow_CL = '1;33m' - self._blue_CL = '1;34m' - self._green_CL = '1;32m' - self._red_CL = '1;31m' - - # Coloured Backgrounds - self._white_RD = '1;37;41m' - self._white_GR = '1;37;42m' - self._white_BK = '1;37;40m' - self._white_BL = '1;37;44m' - self._black_YL = '1;37;43m' - self._yellow_BK = '1;33;40m' - - def disablecolors(self): - """ - Disable colored output text - """ - - # Start and end coloured text - self._start = '' - self._end = '' - - # Clear Backgrounds - self._black_CL = '' - self._yellow_CL = '' - self._blue_CL = '' - self._green_CL = '' - self._red_CL = '' - - # Coloured Backgrounds - self._white_RD = '' - self._white_GR = '' - self._white_BK = '' - self._white_BL = '' - self._black_YL = '' - self._yellow_BK = '' - - def set_logfile_and_verbosity(self, scriptname, verbosity): - """ - Set the logfile name and verbosity level for a script run. - - PypeIt scripts (with the exception of run_pypeit) default to verbosity - level = 1. For certain scripts, having a more verbose output (with an - accompanying log file) would be helpful for debugging purposes. This - function provides the ability to set the ``msgs`` verbosity and create - a log file for those certain scripts. - - Log filenames have the form scriptname_YYYYMMDD_HHMM.log to differentiate - between different runs of the script. Timestamp is UT. - - Args: - scriptname (:obj:`str`, optional): - The name of the calling script for use in the logfile - verbosity (:obj:`int`, optional): - The requested verbosity, passed in from the argument parser. - Verbosity level between 0 [none] and 2 [all] - """ - # Create a UT timestamp (to the minute) for the log filename - timestamp = datetime.datetime.now(datetime.UTC).strftime("%Y%m%d-%H%M") - # Create a logfile only if verbosity == 2 - logname = f"{scriptname}_{timestamp}.log" if verbosity == 2 else None - # Set the verbosity in msgs - self.reset(log=logname, verbosity=verbosity) - From bd77bbcea0ae41cecd7427e94210f43df0c7f5c5 Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Wed, 8 Oct 2025 17:42:01 -0700 Subject: [PATCH 09/33] doc update --- doc/api/pypeit.exceptions.rst | 8 ++++ .../{pypeit.pypmsgs.rst => pypeit.logger.rst} | 6 +-- doc/api/pypeit.rst | 3 +- doc/help/run_pypeit.rst | 43 ++++++++++--------- pypeit/core/atmextinction.py | 2 +- pypeit/core/standard.py | 2 +- pypeit/spectrographs/aat_uhrf.py | 2 +- pypeit/spectrographs/apf_levy.py | 2 +- pypeit/spectrographs/bok_bc.py | 2 +- pypeit/spectrographs/gemini_flamingos.py | 4 +- pypeit/spectrographs/gemini_gmos.py | 2 +- pypeit/spectrographs/gemini_gnirs.py | 2 +- pypeit/spectrographs/gtc_osiris.py | 4 +- pypeit/spectrographs/jwst_nirspec.py | 2 +- pypeit/spectrographs/keck_deimos.py | 2 +- pypeit/spectrographs/keck_esi.py | 2 +- pypeit/spectrographs/keck_hires.py | 2 +- pypeit/spectrographs/keck_kcwi.py | 2 +- pypeit/spectrographs/keck_lris.py | 2 +- pypeit/spectrographs/keck_mosfire.py | 2 +- pypeit/spectrographs/keck_nirspec.py | 6 +-- pypeit/spectrographs/lbt_luci.py | 2 +- pypeit/spectrographs/lbt_mods.py | 2 +- pypeit/spectrographs/ldt_deveny.py | 2 +- pypeit/spectrographs/magellan_fire.py | 4 +- pypeit/spectrographs/mdm_modspec.py | 2 +- pypeit/spectrographs/mdm_osmos.py | 4 +- pypeit/spectrographs/mmt_binospec.py | 2 +- pypeit/spectrographs/mmt_bluechannel.py | 2 +- pypeit/spectrographs/mmt_mmirs.py | 2 +- pypeit/spectrographs/not_alfosc.py | 2 +- pypeit/spectrographs/ntt_efosc2.py | 2 +- pypeit/spectrographs/p200_dbsp.py | 2 +- pypeit/spectrographs/p200_ngps.py | 2 +- pypeit/spectrographs/shane_kast.py | 2 +- pypeit/spectrographs/soar_goodman.py | 2 +- pypeit/spectrographs/subaru_focas.py | 2 +- pypeit/spectrographs/tng_dolores.py | 2 +- pypeit/spectrographs/vlt_fors.py | 2 +- pypeit/spectrographs/vlt_sinfoni.py | 2 +- pypeit/spectrographs/vlt_xshooter.py | 4 +- pypeit/spectrographs/wht_isis.py | 4 +- pypeit/tests/test_atmext.py | 2 +- pypeit/tests/test_spectrum.py | 2 +- pypeit/tests/test_standard.py | 2 +- 45 files changed, 84 insertions(+), 74 deletions(-) create mode 100644 doc/api/pypeit.exceptions.rst rename doc/api/{pypeit.pypmsgs.rst => pypeit.logger.rst} (50%) diff --git a/doc/api/pypeit.exceptions.rst b/doc/api/pypeit.exceptions.rst new file mode 100644 index 0000000000..de33695d16 --- /dev/null +++ b/doc/api/pypeit.exceptions.rst @@ -0,0 +1,8 @@ +pypeit.exceptions module +======================== + +.. automodule:: pypeit.exceptions + :members: + :private-members: + :undoc-members: + :show-inheritance: diff --git a/doc/api/pypeit.pypmsgs.rst b/doc/api/pypeit.logger.rst similarity index 50% rename from doc/api/pypeit.pypmsgs.rst rename to doc/api/pypeit.logger.rst index 08117ebfce..3866c64c1a 100644 --- a/doc/api/pypeit.pypmsgs.rst +++ b/doc/api/pypeit.logger.rst @@ -1,7 +1,7 @@ -pypeit.pypmsgs module -===================== +pypeit.logger module +==================== -.. automodule:: pypeit.pypmsgs +.. automodule:: pypeit.logger :members: :private-members: :undoc-members: diff --git a/doc/api/pypeit.rst b/doc/api/pypeit.rst index 6d5d37f1bd..bd0295ee9a 100644 --- a/doc/api/pypeit.rst +++ b/doc/api/pypeit.rst @@ -35,6 +35,7 @@ Submodules pypeit.coadd3d pypeit.datamodel pypeit.edgetrace + pypeit.exceptions pypeit.extraction pypeit.find_objects pypeit.flatfield @@ -42,6 +43,7 @@ Submodules pypeit.history pypeit.inputfiles pypeit.io + pypeit.logger pypeit.manual_extract pypeit.metadata pypeit.onespec @@ -49,7 +51,6 @@ Submodules pypeit.pypeit pypeit.pypeitdata pypeit.pypeitsetup - pypeit.pypmsgs pypeit.sampling pypeit.scattlight pypeit.sensfilearchive diff --git a/doc/help/run_pypeit.rst b/doc/help/run_pypeit.rst index eb7940ac15..1d026912a5 100644 --- a/doc/help/run_pypeit.rst +++ b/doc/help/run_pypeit.rst @@ -4,27 +4,28 @@ usage: run_pypeit [-h] [-v VERBOSITY] [-r REDUX_PATH] [-m] [-s] [-o] [-c] pypeit_file - ## PypeIt : The Python Spectroscopic Data Reduction Pipeline v1.18.2.dev173+gcdaaa7636 - ## - ## Available spectrographs include: - ## aat_uhrf, apf_levy, bok_bc, gemini_flamingos1, gemini_flamingos2, - ## gemini_gmos_north_e2v, gemini_gmos_north_ham, - ## gemini_gmos_north_ham_ns, gemini_gmos_south_ham, gemini_gnirs_echelle, - ## gemini_gnirs_ifu, gtc_maat, gtc_osiris, gtc_osiris_plus, jwst_nircam, - ## jwst_nirspec, keck_deimos, keck_esi, keck_hires, keck_kcrm, keck_kcwi, - ## keck_lris_blue, keck_lris_blue_orig, keck_lris_red, - ## keck_lris_red_mark4, keck_lris_red_orig, keck_mosfire, keck_nires, - ## keck_nirspec_high, keck_nirspec_high_old, keck_nirspec_low, lbt_luci1, - ## lbt_luci2, lbt_mods1b, lbt_mods1b_proc, lbt_mods1r, lbt_mods1r_proc, - ## lbt_mods2b, lbt_mods2b_proc, lbt_mods2r, lbt_mods2r_proc, ldt_deveny, - ## magellan_fire, magellan_fire_long, magellan_mage, mdm_modspec, - ## mdm_osmos_mdm4k, mdm_osmos_r4k, mmt_binospec, mmt_bluechannel, - ## mmt_mmirs, not_alfosc, not_alfosc_vert, ntt_efosc2, p200_dbsp_blue, - ## p200_dbsp_red, p200_ngps_i, p200_ngps_r, p200_tspec, shane_kast_blue, - ## shane_kast_red, shane_kast_red_ret, soar_goodman_blue, - ## soar_goodman_red, subaru_focas, tng_dolores, vlt_fors2, vlt_sinfoni, - ## vlt_xshooter_nir, vlt_xshooter_uvb, vlt_xshooter_vis, wht_isis_blue, - ## wht_isis_red + PypeIt: The Python Spectroscopic Data Reduction Pipeline + Version 1.18.2.dev173+gcdaaa7636 + + Available spectrographs include: + aat_uhrf, apf_levy, bok_bc, gemini_flamingos1, gemini_flamingos2, + gemini_gmos_north_e2v, gemini_gmos_north_ham, + gemini_gmos_north_ham_ns, gemini_gmos_south_ham, gemini_gnirs_echelle, + gemini_gnirs_ifu, gtc_maat, gtc_osiris, gtc_osiris_plus, jwst_nircam, + jwst_nirspec, keck_deimos, keck_esi, keck_hires, keck_kcrm, keck_kcwi, + keck_lris_blue, keck_lris_blue_orig, keck_lris_red, + keck_lris_red_mark4, keck_lris_red_orig, keck_mosfire, keck_nires, + keck_nirspec_high, keck_nirspec_high_old, keck_nirspec_low, lbt_luci1, + lbt_luci2, lbt_mods1b, lbt_mods1b_proc, lbt_mods1r, lbt_mods1r_proc, + lbt_mods2b, lbt_mods2b_proc, lbt_mods2r, lbt_mods2r_proc, ldt_deveny, + magellan_fire, magellan_fire_long, magellan_mage, mdm_modspec, + mdm_osmos_mdm4k, mdm_osmos_r4k, mmt_binospec, mmt_bluechannel, + mmt_mmirs, not_alfosc, not_alfosc_vert, ntt_efosc2, p200_dbsp_blue, + p200_dbsp_red, p200_ngps_i, p200_ngps_r, p200_tspec, shane_kast_blue, + shane_kast_red, shane_kast_red_ret, soar_goodman_blue, + soar_goodman_red, subaru_focas, tng_dolores, vlt_fors2, vlt_sinfoni, + vlt_xshooter_nir, vlt_xshooter_uvb, vlt_xshooter_vis, wht_isis_blue, + wht_isis_red positional arguments: pypeit_file PypeIt reduction file (must have .pypeit extension) diff --git a/pypeit/core/atmextinction.py b/pypeit/core/atmextinction.py index 966e4f4ab9..51681d57ba 100644 --- a/pypeit/core/atmextinction.py +++ b/pypeit/core/atmextinction.py @@ -16,7 +16,7 @@ from pypeit import msgs from pypeit import dataPaths -from pypeit.pypmsgs import PypeItError +from pypeit import PypeItError from pypeit import utils diff --git a/pypeit/core/standard.py b/pypeit/core/standard.py index 2b1aefd54b..588fc58e7e 100644 --- a/pypeit/core/standard.py +++ b/pypeit/core/standard.py @@ -16,7 +16,7 @@ from pypeit import msgs from pypeit import dataPaths -from pypeit.pypmsgs import PypeItError +from pypeit import PypeItError from pypeit.core import spectrum from pypeit.core.meta import convert_radec from pypeit.core.wave import airtovac diff --git a/pypeit/spectrographs/aat_uhrf.py b/pypeit/spectrographs/aat_uhrf.py index 5a6ff39b18..d5138dd3a9 100644 --- a/pypeit/spectrographs/aat_uhrf.py +++ b/pypeit/spectrographs/aat_uhrf.py @@ -228,7 +228,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def config_specific_par(self, scifile, inp_par=None): diff --git a/pypeit/spectrographs/apf_levy.py b/pypeit/spectrographs/apf_levy.py index 22fcb7c564..4c2ed79ad6 100644 --- a/pypeit/spectrographs/apf_levy.py +++ b/pypeit/spectrographs/apf_levy.py @@ -329,7 +329,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['pinhole']: return good_exp & (fitstbl['idname'] == 'NarrowFlat') & (fitstbl['decker'] == 'Pinhole') - msgs.warning(f'Cannot determine if frames are of type {ftype}.') + msgs.debug(f'Cannot determine if frames are of type {ftype}.') return np.zeros(len(fitstbl), dtype=bool) def is_science(self, fitstbl): diff --git a/pypeit/spectrographs/bok_bc.py b/pypeit/spectrographs/bok_bc.py index f7ed518e7d..f89bf3618e 100644 --- a/pypeit/spectrographs/bok_bc.py +++ b/pypeit/spectrographs/bok_bc.py @@ -387,7 +387,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return np.zeros(len(fitstbl), dtype=bool) if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['lampstat01'] != 'off') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/spectrographs/gemini_flamingos.py b/pypeit/spectrographs/gemini_flamingos.py index 94c3c97ed6..c7a960b2dd 100644 --- a/pypeit/spectrographs/gemini_flamingos.py +++ b/pypeit/spectrographs/gemini_flamingos.py @@ -207,7 +207,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & (fitstbl['idname'] == 'OBJECT') if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['idname'] == 'OBJECT') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @@ -338,6 +338,6 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & (fitstbl['idname'] == 'Science') if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['idname'] == 'Arc') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/spectrographs/gemini_gmos.py b/pypeit/spectrographs/gemini_gmos.py index 643d02c71f..5427323730 100644 --- a/pypeit/spectrographs/gemini_gmos.py +++ b/pypeit/spectrographs/gemini_gmos.py @@ -284,7 +284,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype == 'bias': return good_exp & (fitstbl['target'] == 'Bias')#& (fitstbl['idname'] == 'BIAS') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @classmethod diff --git a/pypeit/spectrographs/gemini_gnirs.py b/pypeit/spectrographs/gemini_gnirs.py index defdcfb826..c51bc03e91 100644 --- a/pypeit/spectrographs/gemini_gnirs.py +++ b/pypeit/spectrographs/gemini_gnirs.py @@ -261,7 +261,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): elif '10/mmLBSX' in fitstbl['dispname'][0]: return good_exp & (fitstbl['idname'] == 'ARC') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @classmethod diff --git a/pypeit/spectrographs/gtc_osiris.py b/pypeit/spectrographs/gtc_osiris.py index 7eff8ddc8c..6fa4dec6c6 100644 --- a/pypeit/spectrographs/gtc_osiris.py +++ b/pypeit/spectrographs/gtc_osiris.py @@ -291,7 +291,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype == 'bias': return good_exp & (np.char.lower(fitstbl['target']) == 'bias') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def config_independent_frames(self): @@ -1001,7 +1001,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype == 'bias': return good_exp & (fitstbl['target'] == 'BIAS') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def config_independent_frames(self): diff --git a/pypeit/spectrographs/jwst_nirspec.py b/pypeit/spectrographs/jwst_nirspec.py index 03327f8eff..887ad39196 100644 --- a/pypeit/spectrographs/jwst_nirspec.py +++ b/pypeit/spectrographs/jwst_nirspec.py @@ -237,7 +237,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype == 'science': return np.ones(len(fitstbl), dtype=bool) - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/spectrographs/keck_deimos.py b/pypeit/spectrographs/keck_deimos.py index a0bef82679..443f3ce225 100644 --- a/pypeit/spectrographs/keck_deimos.py +++ b/pypeit/spectrographs/keck_deimos.py @@ -679,7 +679,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & (fitstbl['idname'] == 'Line') & (fitstbl['hatch'] == 'closed') \ & (fitstbl['lampstat01'] != 'Off') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def get_rawimage(self, raw_file, det): diff --git a/pypeit/spectrographs/keck_esi.py b/pypeit/spectrographs/keck_esi.py index 72a3e1d80d..a9da8515f6 100644 --- a/pypeit/spectrographs/keck_esi.py +++ b/pypeit/spectrographs/keck_esi.py @@ -282,7 +282,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & (fitstbl['idname'] == 'Object') if ftype == 'standard': return good_exp & (fitstbl['idname'] == 'Object') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def bpm(self, filename, det, shape=None, msbias=None): diff --git a/pypeit/spectrographs/keck_hires.py b/pypeit/spectrographs/keck_hires.py index c3870ac484..91bd246c73 100644 --- a/pypeit/spectrographs/keck_hires.py +++ b/pypeit/spectrographs/keck_hires.py @@ -427,7 +427,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): # Arc and tilt frames are typed together return good_exp & (fitstbl['idname'] == 'Line') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def vet_assigned_ftypes(self, type_bits, fitstbl): diff --git a/pypeit/spectrographs/keck_kcwi.py b/pypeit/spectrographs/keck_kcwi.py index 0200250d09..f1637fbe1d 100644 --- a/pypeit/spectrographs/keck_kcwi.py +++ b/pypeit/spectrographs/keck_kcwi.py @@ -423,7 +423,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): # Don't type pinhole frames return np.zeros(len(fitstbl), dtype=bool) - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def lamps(self, fitstbl, status): diff --git a/pypeit/spectrographs/keck_lris.py b/pypeit/spectrographs/keck_lris.py index 569d27fd78..8096115bad 100644 --- a/pypeit/spectrographs/keck_lris.py +++ b/pypeit/spectrographs/keck_lris.py @@ -403,7 +403,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp & self.lamps(fitstbl, 'arcs') & (fitstbl['hatch'] == 'closed') & no_img - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def vet_assigned_ftypes(self, type_bits, fitstbl): diff --git a/pypeit/spectrographs/keck_mosfire.py b/pypeit/spectrographs/keck_mosfire.py index a3c8de180f..ab5c8b3c67 100644 --- a/pypeit/spectrographs/keck_mosfire.py +++ b/pypeit/spectrographs/keck_mosfire.py @@ -717,7 +717,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): is_arc = fitstbl['idname'] == 'arclamp' is_obj = (fitstbl['lampstat01'] == 'off') & (fitstbl['idname'] == 'object') & ('long2pos_specphot' not in fitstbl['decker']) return good_exp & (is_arc | is_obj) - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) # TODO: Is this supposed to be deprecated in favor of get_comb_group? diff --git a/pypeit/spectrographs/keck_nirspec.py b/pypeit/spectrographs/keck_nirspec.py index 19e0f8bf35..402c86b892 100644 --- a/pypeit/spectrographs/keck_nirspec.py +++ b/pypeit/spectrographs/keck_nirspec.py @@ -499,7 +499,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): is_obj = self.lamps(fitstbl, 'off') & (hatch == 'Out') good_exp[is_obj] = fitstbl['exptime'].data[is_obj] > 60.0 return good_exp & (is_arc | is_obj) - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def lamps(self, fitstbl, status): @@ -937,7 +937,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): is_obj = self.lamps(fitstbl, 'off') & (hatch == '0') good_exp[is_obj] = fitstbl['exptime'].data[is_obj] > 60.0 return good_exp & (is_arc | is_obj) - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def lamps(self, fitstbl, status): @@ -1367,7 +1367,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): is_obj = self.lamps(fitstbl, 'off') & (hatch == 0) \ & (fitstbl['idname'] == 'object') return good_exp & (is_arc | is_obj) - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def lamps(self, fitstbl, status): diff --git a/pypeit/spectrographs/lbt_luci.py b/pypeit/spectrographs/lbt_luci.py index a62567afc1..2c57330cdc 100644 --- a/pypeit/spectrographs/lbt_luci.py +++ b/pypeit/spectrographs/lbt_luci.py @@ -246,7 +246,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return (good_exp & ((fitstbl['idname'] == 'object') | (fitstbl['idname'] == 'arc'))) - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) # Detector information from official LBT LUCI website diff --git a/pypeit/spectrographs/lbt_mods.py b/pypeit/spectrographs/lbt_mods.py index 82bbb93616..67b27dc0e3 100644 --- a/pypeit/spectrographs/lbt_mods.py +++ b/pypeit/spectrographs/lbt_mods.py @@ -183,7 +183,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['idname'] == 'COMP') & (fitstbl['dispname'] != 'Flat') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def get_rawimage(self, raw_file, det): diff --git a/pypeit/spectrographs/ldt_deveny.py b/pypeit/spectrographs/ldt_deveny.py index b60b70723d..ff4a613a5c 100644 --- a/pypeit/spectrographs/ldt_deveny.py +++ b/pypeit/spectrographs/ldt_deveny.py @@ -414,7 +414,7 @@ def check_frame_type(self, ftype:str, fitstbl:Table, exprng=None): if ftype in ['pinhole', 'align', 'sky', 'lampoffflats', 'scattlight']: # DeVeny doesn't have any of these types of frames return np.zeros(len(fitstbl), dtype=bool) - msgs.warning(f"Cannot determine if frames are of type {ftype}") + msgs.debug(f"Cannot determine if frames are of type {ftype}") return np.zeros(len(fitstbl), dtype=bool) def pypeit_file_keys(self): diff --git a/pypeit/spectrographs/magellan_fire.py b/pypeit/spectrographs/magellan_fire.py index 2d4f33a2d5..fc39ac8e4b 100644 --- a/pypeit/spectrographs/magellan_fire.py +++ b/pypeit/spectrographs/magellan_fire.py @@ -241,7 +241,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & (fitstbl['idname'] == 'Science') if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['idname'] == 'Science') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @property @@ -460,6 +460,6 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & (fitstbl['idname'] == 'Science') if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['idname'] == 'Arc') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/spectrographs/mdm_modspec.py b/pypeit/spectrographs/mdm_modspec.py index aa986be7af..a20233c476 100644 --- a/pypeit/spectrographs/mdm_modspec.py +++ b/pypeit/spectrographs/mdm_modspec.py @@ -286,7 +286,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['illumflat', 'trace']: # Twilight Flats return good_exp & (fitstbl['idname'] == 'Flat') & (fitstbl['mirror'] == 'OUT') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) \ No newline at end of file diff --git a/pypeit/spectrographs/mdm_osmos.py b/pypeit/spectrographs/mdm_osmos.py index c1c817a578..a38ccb0d25 100644 --- a/pypeit/spectrographs/mdm_osmos.py +++ b/pypeit/spectrographs/mdm_osmos.py @@ -232,7 +232,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return np.zeros(len(fitstbl), dtype=bool) if ftype in ['arc','tilt']: return good_exp & np.array([ilamp in ['Ar','Xe'] for ilamp in fitstbl['lampstat01']]) & (fitstbl['idname'] == 'COMP') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @@ -325,7 +325,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return np.zeros(len(fitstbl), dtype=bool) if ftype in ['arc','tilt']: return good_exp & (fitstbl['idname'] == 'COMP') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @classmethod diff --git a/pypeit/spectrographs/mmt_binospec.py b/pypeit/spectrographs/mmt_binospec.py index 3c9d66488e..b734bd4ec1 100644 --- a/pypeit/spectrographs/mmt_binospec.py +++ b/pypeit/spectrographs/mmt_binospec.py @@ -347,7 +347,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['pixelflat', 'trace', 'illumflat']: return good_exp & (fitstbl['lampstat01'] == 'off') & (fitstbl['lampstat02'] == 'deployed') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def get_rawimage(self, raw_file, det): diff --git a/pypeit/spectrographs/mmt_bluechannel.py b/pypeit/spectrographs/mmt_bluechannel.py index d071c08279..ed58006291 100644 --- a/pypeit/spectrographs/mmt_bluechannel.py +++ b/pypeit/spectrographs/mmt_bluechannel.py @@ -460,7 +460,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): # i think the bright lamp, BC, is the only one ever used for this. imagetyp should always be set to flat. return good_exp & (fitstbl['lampstat01'] == 'off') & (fitstbl['target'] == 'skyflat') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def get_rawimage(self, raw_file, det): diff --git a/pypeit/spectrographs/mmt_mmirs.py b/pypeit/spectrographs/mmt_mmirs.py index ff8dd2e3c0..515d6e1262 100644 --- a/pypeit/spectrographs/mmt_mmirs.py +++ b/pypeit/spectrographs/mmt_mmirs.py @@ -272,7 +272,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & (fitstbl['idname'] == 'object') if ftype == 'dark': return good_exp & (fitstbl['idname'] == 'dark') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def bpm(self, filename, det, shape=None, msbias=None): diff --git a/pypeit/spectrographs/not_alfosc.py b/pypeit/spectrographs/not_alfosc.py index 3ac39ee04e..4eff1b8725 100644 --- a/pypeit/spectrographs/not_alfosc.py +++ b/pypeit/spectrographs/not_alfosc.py @@ -267,7 +267,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return np.zeros(len(fitstbl), dtype=bool) if ftype in ['arc','tilt']: return good_exp & (fitstbl['idname'] == 'WAVE,LAMP') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def config_specific_par(self, scifile, inp_par=None): diff --git a/pypeit/spectrographs/ntt_efosc2.py b/pypeit/spectrographs/ntt_efosc2.py index 00542e2dde..8bd1103632 100644 --- a/pypeit/spectrographs/ntt_efosc2.py +++ b/pypeit/spectrographs/ntt_efosc2.py @@ -352,7 +352,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp & ((fitstbl['target'] == 'WAVE')) - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def bpm(self, filename, det, shape=None, msbias=None): diff --git a/pypeit/spectrographs/p200_dbsp.py b/pypeit/spectrographs/p200_dbsp.py index 03d6dfa54b..5b87a7e879 100644 --- a/pypeit/spectrographs/p200_dbsp.py +++ b/pypeit/spectrographs/p200_dbsp.py @@ -164,7 +164,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return np.zeros(len(fitstbl), dtype=bool) if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['lampstat01'] != '0000000') & (fitstbl['idname'] == 'cal') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def get_rawimage(self, raw_file, det): diff --git a/pypeit/spectrographs/p200_ngps.py b/pypeit/spectrographs/p200_ngps.py index 8425165812..d6d193de57 100644 --- a/pypeit/spectrographs/p200_ngps.py +++ b/pypeit/spectrographs/p200_ngps.py @@ -147,7 +147,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & (fitstbl['idname'] == 'THAR') # Temporary fix, do not use FEAR arcs - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/spectrographs/shane_kast.py b/pypeit/spectrographs/shane_kast.py index c2a30a6d58..51d152a0e0 100644 --- a/pypeit/spectrographs/shane_kast.py +++ b/pypeit/spectrographs/shane_kast.py @@ -154,7 +154,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp & self.lamps(fitstbl, 'arcs')# & (fitstbl['target'] == 'Arcs') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def lamps(self, fitstbl, status): diff --git a/pypeit/spectrographs/soar_goodman.py b/pypeit/spectrographs/soar_goodman.py index f073db5141..a581e13be7 100644 --- a/pypeit/spectrographs/soar_goodman.py +++ b/pypeit/spectrographs/soar_goodman.py @@ -205,7 +205,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return np.zeros(len(fitstbl), dtype=bool) if ftype in ['arc', 'tilt']: return good_exp & self.lamps(fitstbl, 'arc') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/spectrographs/subaru_focas.py b/pypeit/spectrographs/subaru_focas.py index 074677d790..571f847242 100644 --- a/pypeit/spectrographs/subaru_focas.py +++ b/pypeit/spectrographs/subaru_focas.py @@ -177,7 +177,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['idname'] == 'COMPARISON') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/spectrographs/tng_dolores.py b/pypeit/spectrographs/tng_dolores.py index fb35daef82..4f61ea5d8d 100644 --- a/pypeit/spectrographs/tng_dolores.py +++ b/pypeit/spectrographs/tng_dolores.py @@ -249,7 +249,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['idname'] == 'CALIB') & ( (fitstbl['lampstat01'] == 'Ne+Hg') | (fitstbl['lampstat01'] == 'Helium') ) \ & (fitstbl['dispname'] != 'OPEN') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/spectrographs/vlt_fors.py b/pypeit/spectrographs/vlt_fors.py index 6f27614972..768b47d374 100644 --- a/pypeit/spectrographs/vlt_fors.py +++ b/pypeit/spectrographs/vlt_fors.py @@ -204,7 +204,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & ((fitstbl['target'] == 'LAMP,WAVE') | (fitstbl['target'] == 'WAVE,LAMP')) - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/spectrographs/vlt_sinfoni.py b/pypeit/spectrographs/vlt_sinfoni.py index 24fcedf94c..8a5717cc87 100644 --- a/pypeit/spectrographs/vlt_sinfoni.py +++ b/pypeit/spectrographs/vlt_sinfoni.py @@ -314,7 +314,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['sky']: return good_exp & (fitstbl['idname'] == 'SINFONI_IFS_SKY') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/spectrographs/vlt_xshooter.py b/pypeit/spectrographs/vlt_xshooter.py index 0bf708cd78..461292b524 100644 --- a/pypeit/spectrographs/vlt_xshooter.py +++ b/pypeit/spectrographs/vlt_xshooter.py @@ -194,7 +194,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['target'] == 'LAMP,WAVE') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @@ -427,7 +427,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp & ((fitstbl['target'] == 'LAMP,WAVE') | (fitstbl['target'] == 'SCIENCE')) - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def bpm(self, filename, det, shape=None, msbias=None): diff --git a/pypeit/spectrographs/wht_isis.py b/pypeit/spectrographs/wht_isis.py index 89c832c8bc..7292356a34 100644 --- a/pypeit/spectrographs/wht_isis.py +++ b/pypeit/spectrographs/wht_isis.py @@ -263,7 +263,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return np.zeros(len(fitstbl), dtype=bool) if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['lampstat01'] == 'CuNe+CuAr') & (fitstbl['idname'] == 'arc') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @@ -409,7 +409,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return np.zeros(len(fitstbl), dtype=bool) if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['lampstat01'] == 'CuNe+CuAr') & (fitstbl['idname'] == 'arc') - msgs.warning('Cannot determine if frames are of type {0}.'.format(ftype)) + msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/tests/test_atmext.py b/pypeit/tests/test_atmext.py index b9e9eda172..34f9d7cbce 100644 --- a/pypeit/tests/test_atmext.py +++ b/pypeit/tests/test_atmext.py @@ -13,7 +13,7 @@ from pypeit.par.pypeitpar import Coadd1DPar -from pypeit.pypmsgs import PypeItError +from pypeit import PypeItError from pypeit.core.atmextinction import AtmosphericExtinction diff --git a/pypeit/tests/test_spectrum.py b/pypeit/tests/test_spectrum.py index 2d7455694f..7dcdc6cdf7 100644 --- a/pypeit/tests/test_spectrum.py +++ b/pypeit/tests/test_spectrum.py @@ -6,7 +6,7 @@ from scipy import special from pypeit.core import spectrum -from pypeit.pypmsgs import PypeItError +from pypeit import PypeItError def pixelated_gaussian(x, c=0.0, s=1.0): diff --git a/pypeit/tests/test_standard.py b/pypeit/tests/test_standard.py index 436dd6b01c..7fba3153d0 100644 --- a/pypeit/tests/test_standard.py +++ b/pypeit/tests/test_standard.py @@ -8,7 +8,7 @@ from astropy import coordinates from pypeit.core import standard -from pypeit.pypmsgs import PypeItError +from pypeit import PypeItError def test_mab_to_cgs(): wave = 5000. From 81807beff76b093596effe8dba3a9af225ea6c0b Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Thu, 9 Oct 2025 07:31:21 -0700 Subject: [PATCH 10/33] msgs -> log --- pypeit/__init__.py | 24 +- pypeit/alignframe.py | 12 +- pypeit/archive.py | 4 +- pypeit/cache.py | 12 +- pypeit/calibframe.py | 8 +- pypeit/calibrations.py | 110 ++++---- pypeit/coadd1d.py | 10 +- pypeit/coadd2d.py | 82 +++--- pypeit/coadd3d.py | 92 +++---- pypeit/core/arc.py | 28 +- pypeit/core/atmextinction.py | 18 +- pypeit/core/coadd.py | 56 ++-- pypeit/core/collate.py | 2 +- pypeit/core/combine.py | 6 +- pypeit/core/datacube.py | 78 +++--- pypeit/core/extract.py | 64 ++--- pypeit/core/findobj_skymask.py | 62 ++--- pypeit/core/fitting.py | 44 +-- pypeit/core/flat.py | 30 +-- pypeit/core/flexure.py | 104 ++++---- pypeit/core/flux_calib.py | 76 +++--- pypeit/core/framematch.py | 4 +- pypeit/core/gui/edge_inspector.py | 10 +- pypeit/core/gui/identify.py | 68 ++--- pypeit/core/gui/object_find.py | 8 +- pypeit/core/gui/skysub_regions.py | 10 +- pypeit/core/mosaic.py | 6 +- pypeit/core/parse.py | 4 +- pypeit/core/pca.py | 6 +- pypeit/core/pixels.py | 12 +- pypeit/core/procimg.py | 50 ++-- pypeit/core/pydl.py | 8 +- pypeit/core/qa.py | 6 +- pypeit/core/scattlight.py | 12 +- pypeit/core/skysub.py | 76 +++--- pypeit/core/slitdesign_matching.py | 24 +- pypeit/core/spectrum.py | 18 +- pypeit/core/standard.py | 24 +- pypeit/core/telluric.py | 22 +- pypeit/core/trace.py | 34 +-- pypeit/core/tracewave.py | 40 +-- pypeit/core/transform.py | 2 +- pypeit/core/wave.py | 2 +- pypeit/core/wavecal/autoid.py | 162 ++++++------ pypeit/core/wavecal/echelle.py | 10 +- pypeit/core/wavecal/templates.py | 4 +- pypeit/core/wavecal/waveio.py | 6 +- pypeit/core/wavecal/wv_fitting.py | 16 +- pypeit/core/wavecal/wvutils.py | 30 +-- pypeit/datamodel.py | 8 +- pypeit/display/display.py | 6 +- pypeit/edgetrace.py | 324 +++++++++++------------ pypeit/extraction.py | 34 +-- pypeit/find_objects.py | 52 ++-- pypeit/flatfield.py | 122 ++++----- pypeit/fluxcalibrate.py | 2 +- pypeit/images/bitmaskarray.py | 2 +- pypeit/images/buildimage.py | 2 +- pypeit/images/combineimage.py | 10 +- pypeit/images/detector_container.py | 2 +- pypeit/images/mosaic.py | 4 +- pypeit/images/pypeitimage.py | 4 +- pypeit/images/rawimage.py | 48 ++-- pypeit/inputfiles.py | 20 +- pypeit/io.py | 8 +- pypeit/manual_extract.py | 2 +- pypeit/metadata.py | 56 ++-- pypeit/par/parset.py | 10 +- pypeit/par/pypeitpar.py | 4 +- pypeit/par/util.py | 2 +- pypeit/pypeit.py | 92 +++---- pypeit/pypeitdata.py | 6 +- pypeit/pypeitsetup.py | 6 +- pypeit/scattlight.py | 6 +- pypeit/scripts/arxiv_solution.py | 4 +- pypeit/scripts/cache_github_data.py | 14 +- pypeit/scripts/chk_edges.py | 6 +- pypeit/scripts/chk_flexure.py | 4 +- pypeit/scripts/chk_for_calibs.py | 22 +- pypeit/scripts/chk_noise_1dspec.py | 4 +- pypeit/scripts/chk_noise_2dspec.py | 6 +- pypeit/scripts/chk_plugins.py | 4 +- pypeit/scripts/chk_scattlight.py | 4 +- pypeit/scripts/chk_wavecalib.py | 2 +- pypeit/scripts/clean_cache.py | 12 +- pypeit/scripts/coadd_1dspec.py | 6 +- pypeit/scripts/coadd_2dspec.py | 30 +-- pypeit/scripts/coadd_datacube.py | 8 +- pypeit/scripts/collate_1d.py | 118 ++++----- pypeit/scripts/compile_wvarxiv.py | 6 +- pypeit/scripts/extract_datacube.py | 6 +- pypeit/scripts/flux_calib.py | 6 +- pypeit/scripts/flux_setup.py | 8 +- pypeit/scripts/identify.py | 8 +- pypeit/scripts/install_extinctfile.py | 6 +- pypeit/scripts/install_linelist.py | 6 +- pypeit/scripts/install_wvarxiv.py | 4 +- pypeit/scripts/multislit_flexure.py | 14 +- pypeit/scripts/parse_slits.py | 2 +- pypeit/scripts/print_bpm.py | 14 +- pypeit/scripts/ql.py | 26 +- pypeit/scripts/run_pypeit.py | 6 +- pypeit/scripts/run_to_calibstep.py | 6 +- pypeit/scripts/sensfunc.py | 6 +- pypeit/scripts/setup.py | 6 +- pypeit/scripts/setup_coadd2d.py | 10 +- pypeit/scripts/show_1dspec.py | 2 +- pypeit/scripts/show_2dspec.py | 18 +- pypeit/scripts/show_pixflat.py | 4 +- pypeit/scripts/skysub_regions.py | 4 +- pypeit/scripts/tellfit.py | 12 +- pypeit/scripts/trace_edges.py | 10 +- pypeit/scripts/view_fits.py | 6 +- pypeit/sensfilearchive.py | 4 +- pypeit/sensfunc.py | 14 +- pypeit/setup_gui/controller.py | 50 ++-- pypeit/setup_gui/dialog_helpers.py | 4 +- pypeit/setup_gui/model.py | 68 ++--- pypeit/setup_gui/text_viewer.py | 4 +- pypeit/setup_gui/view.py | 98 +++---- pypeit/slittrace.py | 66 ++--- pypeit/spec2dobj.py | 12 +- pypeit/specobj.py | 16 +- pypeit/specobjs.py | 12 +- pypeit/spectrographs/aat_uhrf.py | 6 +- pypeit/spectrographs/apf_levy.py | 4 +- pypeit/spectrographs/bok_bc.py | 6 +- pypeit/spectrographs/gemini_flamingos.py | 6 +- pypeit/spectrographs/gemini_gmos.py | 16 +- pypeit/spectrographs/gemini_gnirs.py | 30 +-- pypeit/spectrographs/gtc_osiris.py | 42 +-- pypeit/spectrographs/jwst_nircam.py | 2 +- pypeit/spectrographs/jwst_nirspec.py | 6 +- pypeit/spectrographs/keck_deimos.py | 38 +-- pypeit/spectrographs/keck_esi.py | 8 +- pypeit/spectrographs/keck_hires.py | 12 +- pypeit/spectrographs/keck_kcwi.py | 42 +-- pypeit/spectrographs/keck_lris.py | 22 +- pypeit/spectrographs/keck_mosfire.py | 30 +-- pypeit/spectrographs/keck_nires.py | 4 +- pypeit/spectrographs/keck_nirspec.py | 20 +- pypeit/spectrographs/lbt_luci.py | 4 +- pypeit/spectrographs/lbt_mods.py | 14 +- pypeit/spectrographs/ldt_deveny.py | 8 +- pypeit/spectrographs/magellan_fire.py | 6 +- pypeit/spectrographs/magellan_mage.py | 4 +- pypeit/spectrographs/mdm_modspec.py | 4 +- pypeit/spectrographs/mdm_osmos.py | 6 +- pypeit/spectrographs/mmt_binospec.py | 10 +- pypeit/spectrographs/mmt_bluechannel.py | 8 +- pypeit/spectrographs/mmt_mmirs.py | 8 +- pypeit/spectrographs/not_alfosc.py | 6 +- pypeit/spectrographs/ntt_efosc2.py | 6 +- pypeit/spectrographs/opticalmodel.py | 4 +- pypeit/spectrographs/p200_dbsp.py | 20 +- pypeit/spectrographs/p200_ngps.py | 4 +- pypeit/spectrographs/p200_tspec.py | 4 +- pypeit/spectrographs/shane_kast.py | 6 +- pypeit/spectrographs/soar_goodman.py | 8 +- pypeit/spectrographs/spectrograph.py | 36 +-- pypeit/spectrographs/subaru_focas.py | 6 +- pypeit/spectrographs/tng_dolores.py | 6 +- pypeit/spectrographs/util.py | 2 +- pypeit/spectrographs/vlt_fors.py | 6 +- pypeit/spectrographs/vlt_sinfoni.py | 4 +- pypeit/spectrographs/vlt_xshooter.py | 6 +- pypeit/spectrographs/wht_isis.py | 6 +- pypeit/specutils/pypeit_loaders.py | 8 +- pypeit/tests/test_collate_1d.py | 20 +- pypeit/tests/test_qa.py | 2 +- pypeit/tests/test_specobj.py | 2 +- pypeit/tests/test_utils.py | 2 +- pypeit/tracepca.py | 4 +- pypeit/utils.py | 20 +- pypeit/wavecalib.py | 34 +-- pypeit/wavemodel.py | 94 +++---- pypeit/wavetilts.py | 34 +-- 177 files changed, 1966 insertions(+), 1966 deletions(-) diff --git a/pypeit/__init__.py b/pypeit/__init__.py index c2c4c6cd69..85467b2501 100644 --- a/pypeit/__init__.py +++ b/pypeit/__init__.py @@ -21,23 +21,23 @@ import logging from pypeit import logger -msgs = logger.get_logger(level=logging.DEBUG) +log = logger.get_logger(level=logging.DEBUG) # Import all the exceptions from pypeit.exceptions import * # Import and instantiate the data path parser -# NOTE: This *MUST* come after msgs and __version__ are defined above +# NOTE: This *MUST* come after log and __version__ are defined above from pypeit import pypeitdata dataPaths = pypeitdata.PypeItDataPaths() -# Send all signals to messages to be dealt with (i.e. someone hits ctrl+c) -def signal_handler(signalnum, handler): - """ - Handle signals sent by the keyboard during code execution - """ - if signalnum == 2: - msgs.info('Ctrl+C was pressed. Ending processes...') - sys.exit() - -signal.signal(signal.SIGINT, signal_handler) +## Send all signals to messages to be dealt with (i.e. someone hits ctrl+c) +#def signal_handler(signalnum, handler): +# """ +# Handle signals sent by the keyboard during code execution +# """ +# if signalnum == 2: +# log.info('Ctrl+C was pressed. Ending processes...') +# sys.exit() +# +#signal.signal(signal.SIGINT, signal_handler) diff --git a/pypeit/alignframe.py b/pypeit/alignframe.py index 08a2a58b2c..265f45b1ad 100644 --- a/pypeit/alignframe.py +++ b/pypeit/alignframe.py @@ -13,7 +13,7 @@ from pypeit.core import findobj_skymask from pypeit import datamodel from pypeit import calibframe -from pypeit import msgs +from pypeit import log from pypeit import PypeItError @@ -189,13 +189,13 @@ def build_traces(self, show_peaks=False, debug=False): # Go through the slits for slit_idx, slit_spat in enumerate(self.slits.spat_id): if self.slit_bpm[slit_idx]: - msgs.info(f'Skipping bad slit/order {self.slits.slitord_id[slit_idx]} ({slit_idx+1}/{self.slits.nslits})') + log.info(f'Skipping bad slit/order {self.slits.slitord_id[slit_idx]} ({slit_idx+1}/{self.slits.nslits})') self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADALIGNCALIB') continue specobj_dict = {'SLITID': slit_idx, 'DET': self.rawalignimg.detector.name, 'OBJTYPE': "align_profile", 'PYPELINE': self.spectrograph.pypeline} - msgs.info("Fitting alignment traces in slit {0:d}/{1:d}".format(slit_idx+1, self.slits.nslits)) + log.info("Fitting alignment traces in slit {0:d}/{1:d}".format(slit_idx+1, self.slits.nslits)) align_traces = findobj_skymask.objs_in_slit( self.rawalignimg.image, self.rawalignimg.ivar, slitid_img_init == slit_spat, left[:, slit_idx], right[:, slit_idx], @@ -237,7 +237,7 @@ def generate_traces(self, align_prof): # Go through the slits for slit_idx, slit_spat in enumerate(self.slits.spat_id): if self.slit_bpm[slit_idx]: - msgs.info(f'Skipping bad slit/order {self.slits.slitord_id[slit_idx]} ({slit_idx+1}/{self.slits.nslits})') + log.info(f'Skipping bad slit/order {self.slits.slitord_id[slit_idx]} ({slit_idx+1}/{self.slits.nslits})') self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADALIGNCALIB') continue sls = '{0:d}'.format(slit_idx) @@ -347,7 +347,7 @@ def __init__(self, traces, locations, tilts): Spectral tilts. """ # Perform some checks - msgs.debug("Spatial flexure is not currently implemented for the astrometric alignment") + log.debug("Spatial flexure is not currently implemented for the astrometric alignment") if type(locations) is list: locations = np.array(locations) if locations.size != traces.shape[1]: @@ -372,7 +372,7 @@ def build_splines(self): spldict = dict(kind='linear', bounds_error=False, fill_value="extrapolate") ycoord = np.arange(self.nspec) for sl in range(self.nslit): - msgs.info("Calculating astrometric transform of slit {0:d}/{1:d}".format(sl+1, self.nslit)) + log.info("Calculating astrometric transform of slit {0:d}/{1:d}".format(sl+1, self.nslit)) xlr, tlr = np.zeros((self.nspec, 2)), np.zeros((self.nspec, 2)) eval_trim = 2 # This evaluates the slit length inside the actual slit edges, due to edge effects. for sp in range(self.nspec): diff --git a/pypeit/archive.py b/pypeit/archive.py index 9f69ff4eec..a4f9b286e8 100644 --- a/pypeit/archive.py +++ b/pypeit/archive.py @@ -58,7 +58,7 @@ def get_target_metadata(file_info): from astropy.io import ascii from astropy.table import Table -from pypeit import msgs +from pypeit import log from pypeit import PypeItError @@ -230,7 +230,7 @@ def _archive_file(self, orig_file, dest_file): full_dest_path = os.path.join(self.archive_root, dest_file) os.makedirs(os.path.dirname(full_dest_path), exist_ok=True) - msgs.info(f'Copying {orig_file} to archive root {self.archive_root}') + log.info(f'Copying {orig_file} to archive root {self.archive_root}') try: shutil.copy2(orig_file, full_dest_path) except: diff --git a/pypeit/cache.py b/pypeit/cache.py index c66fdd5cff..4cdff5cedd 100644 --- a/pypeit/cache.py +++ b/pypeit/cache.py @@ -56,7 +56,7 @@ # NOTE: To avoid circular imports, avoid (if possible) importing anything from # pypeit into this module! Objects created or available in pypeit/__init__.py # are the exceptions, for now. -from pypeit import msgs +from pypeit import log from pypeit import PypeItError, PypeItPathError from pypeit import __version__ @@ -179,7 +179,7 @@ def git_most_recent_tag(): tags = [packaging.version.parse(ref.split('/')[-1]) \ for ref in repo.references if 'refs/tags' in ref] if len(tags) == 0: - msgs.warning('Unable to find any tags in pypeit repository.') + log.warning('Unable to find any tags in pypeit repository.') return __version__, None latest_version = str(sorted(tags)[-1]) timestamp = repo.resolve_refish(f'refs/tags/{latest_version}')[0].author.time @@ -239,7 +239,7 @@ def fetch_remote_file( if remote_host == "s3_cloud" and not install_script: # Display a warning that this may take a while, and the user may wish to # download use an install script - msgs.warning(f'Note: If this file takes a while to download, you may wish to used one of ' + log.warning(f'Note: If this file takes a while to download, you may wish to used one of ' 'the install scripts (e.g., pypeit_install_telluric) to install the file ' 'independent of this processing script.') @@ -388,7 +388,7 @@ def remove_from_cache(cache_url=None, pattern=None, allow_multiple=False): if cache_url is None: _url = search_cache(pattern, path_only=False) if len(_url) == 0: - msgs.warning(f'Cache does not include a file matching the pattern {pattern}.') + log.warning(f'Cache does not include a file matching the pattern {pattern}.') return _url = list(_url.keys()) elif not isinstance(cache_url, list): @@ -397,7 +397,7 @@ def remove_from_cache(cache_url=None, pattern=None, allow_multiple=False): _url = cache_url if len(_url) > 1 and not allow_multiple: - msgs.warning('Function found or was provided with multiple entries to be removed. Either ' + log.warning('Function found or was provided with multiple entries to be removed. Either ' 'set allow_multiple=True, or try again with a single url or more specific ' 'pattern. URLs passed/found are:\n' + '\n'.join(_url)) return @@ -452,7 +452,7 @@ def parse_cache_url(url): return 's3_cloud', None, None, str(sub_path.parent), sub_path.name # Unknown host - msgs.warning(f'URL not recognized as a pypeit cache url:\n\t{url}') + log.warning(f'URL not recognized as a pypeit cache url:\n\t{url}') return None, None, None, None, None diff --git a/pypeit/calibframe.py b/pypeit/calibframe.py index add316e4a6..316673f139 100644 --- a/pypeit/calibframe.py +++ b/pypeit/calibframe.py @@ -13,7 +13,7 @@ from astropy.io import fits -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import datamodel from pypeit import io @@ -197,11 +197,11 @@ def calib_keys_from_header(self, hdr): try: self.calib_key, self.calib_dir = CalibFrame.parse_key_dir(hdr) except PypeItError as e: - msgs.warning(f'{e}') + log.warning(f'{e}') if 'CALIBID' in hdr: self.calib_id = self.ingest_calib_id(hdr['CALIBID']) else: - msgs.warning('Header does not have CALIBID card; cannot parse calibration IDs.') + log.warning('Header does not have CALIBID card; cannot parse calibration IDs.') @staticmethod def parse_key_dir(inp, from_filename=False): @@ -284,7 +284,7 @@ def ingest_calib_id(calib_id): _calib_id = [calib_id] _calib_id = np.unique(np.concatenate([str(c).split(',') for c in _calib_id])) if 'all' in _calib_id and len(_calib_id) != 1: - msgs.warning(f'Calibration groups set to {_calib_id}, resetting to simply "all".') + log.warning(f'Calibration groups set to {_calib_id}, resetting to simply "all".') _calib_id = np.array(['all']) for c in _calib_id: if c == 'all': diff --git a/pypeit/calibrations.py b/pypeit/calibrations.py index 4691e188c0..c466158924 100644 --- a/pypeit/calibrations.py +++ b/pypeit/calibrations.py @@ -15,7 +15,7 @@ import yaml from pypeit import __version__ -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import alignframe from pypeit import flatfield @@ -228,7 +228,7 @@ def check_calibrations(self, file_list, check_lamps=True): # Check that the lamps being combined are all the same if check_lamps: if not lampstat[1:] == lampstat[:-1]: - msgs.warning("The following files contain different lamp status") + log.warning("The following files contain different lamp status") # Get the longest strings maxlen = max([len("Filename")] + [len(os.path.split(x)[1]) for x in file_list]) maxlmp = max([len("Lamp status")] + [len(x) for x in lampstat]) @@ -359,7 +359,7 @@ def get_arc(self, force:str=None): = self.find_calibrations(frame['type'], frame['class']) if len(raw_files) == 0 and cal_file is None: - msgs.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' + log.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' 'processed calibration frame. Continuing...') self.msarc = None return self.msarc @@ -377,7 +377,7 @@ def get_arc(self, force:str=None): self.check_calibrations(raw_files) # Otherwise, create the processed file. - msgs.info(f'Preparing a {frame["class"].calib_type} calibration frame.') + log.info(f'Preparing a {frame["class"].calib_type} calibration frame.') self.msarc = buildimage.buildimage_fromlist(self.spectrograph, self.det, self.par['arcframe'], raw_files, bias=self.msbias, bpm=self.msbpm, @@ -411,7 +411,7 @@ def get_tiltimg(self, force:str=None): = self.find_calibrations(frame['type'], frame['class']) if len(raw_files) == 0 and cal_file is None: - msgs.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' + log.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' 'processed calibration frame. Continuing...') self.mstilt = None return self.mstilt @@ -429,7 +429,7 @@ def get_tiltimg(self, force:str=None): self.check_calibrations(raw_files) # Otherwise, create the processed file. - msgs.info(f'Preparing a {frame["class"].calib_type} calibration frame.') + log.info(f'Preparing a {frame["class"].calib_type} calibration frame.') self.mstilt = buildimage.buildimage_fromlist(self.spectrograph, self.det, self.par['tiltframe'], raw_files, bias=self.msbias, bpm=self.msbpm, @@ -468,7 +468,7 @@ def get_align(self, force:str=None): = self.find_calibrations(frame['type'], frame['class']) if len(raw_files) == 0 and cal_file is None: - msgs.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' + log.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' 'processed calibration frame. Continuing...') self.alignments = None return self.alignments @@ -489,7 +489,7 @@ def get_align(self, force:str=None): self.check_calibrations(raw_files) # Otherwise, create the processed file. - msgs.info(f'Preparing a {frame["class"].calib_type} calibration frame.') + log.info(f'Preparing a {frame["class"].calib_type} calibration frame.') msalign = buildimage.buildimage_fromlist(self.spectrograph, self.det, self.par['alignframe'], raw_files, bias=self.msbias, bpm=self.msbpm, @@ -531,7 +531,7 @@ def get_bias(self, force:str=None): # If no raw files are available and no processed calibration frame if len(raw_files) == 0 and cal_file is None: - msgs.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' + log.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' 'processed calibration frame. Continuing without a bias...') self.msbias = None return self.msbias @@ -546,7 +546,7 @@ def get_bias(self, force:str=None): self.check_calibrations(raw_files) # Otherwise, create the processed file. - msgs.info(f'Preparing a {frame["class"].calib_type} calibration frame.') + log.info(f'Preparing a {frame["class"].calib_type} calibration frame.') self.msbias = buildimage.buildimage_fromlist(self.spectrograph, self.det, self.par['biasframe'], raw_files, calib_dir=self.calib_dir, setup=setup, @@ -579,7 +579,7 @@ def get_dark(self, force:str=None): = self.find_calibrations(frame['type'], frame['class']) if len(raw_files) == 0 and cal_file is None: - msgs.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' + log.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' 'processed calibration frame. Continuing...') self.msdark = None return self.msdark @@ -657,7 +657,7 @@ def get_scattlight(self, force:str=None): """ # Check for existing data if not self._chk_objs(['msbpm', 'slits']): - msgs.warning('Must have the bpm and the slits defined to make a scattered light image! ' + log.warning('Must have the bpm and the slits defined to make a scattered light image! ' 'Skipping and may crash down the line') return self.msscattlight @@ -671,7 +671,7 @@ def get_scattlight(self, force:str=None): scatt_idx = self.fitstbl.find_frames(frame['type'], calib_ID=self.calib_ID, index=True) if len(raw_scattlight_files) == 0 and cal_file is None: - msgs.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' + log.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' 'processed calibration frame. Continuing...') return self.msscattlight @@ -683,9 +683,9 @@ def get_scattlight(self, force:str=None): # Scattered light model does not exist or we're not reusing it. # Need to build everything from scratch. Start with the trace image. - msgs.info('Creating scattered light calibration frame using files: ') + log.info('Creating scattered light calibration frame using files: ') for f in raw_scattlight_files: - msgs.info(f' {Path(f).name}') + log.info(f' {Path(f).name}') # Reset the BPM self.get_bpm(frame=raw_scattlight_files[0]) @@ -713,7 +713,7 @@ def get_scattlight(self, force:str=None): if not success: # Something went awry - msgs.warning('Scattered light modelling failed. Continuing, but likely to fail soon...') + log.warning('Scattered light modelling failed. Continuing, but likely to fail soon...') self.success = False return self.msscattlight @@ -757,7 +757,7 @@ def get_flats(self, force:str=None): """ # Check for existing data if not self._chk_objs(['msarc', 'msbpm', 'slits', 'wv_calib']): - msgs.warning('Must have the arc, bpm, slits, and wv_calib defined to make flats! ' + log.warning('Must have the arc, bpm, slits, and wv_calib defined to make flats! ' 'Skipping and may crash down the line') # TODO: Why was this an empty object and not None? self.flatimages = None #flatfield.FlatImages() @@ -766,7 +766,7 @@ def get_flats(self, force:str=None): # Slit and tilt traces are required to flat-field the data if not self._chk_objs(['slits', 'wavetilts']): # TODO: Why doesn't this fault? - msgs.warning('Flats were requested, but there are quantities missing necessary to ' + log.warning('Flats were requested, but there are quantities missing necessary to ' 'create flats. Proceeding without flat fielding....') # TODO: Why was this an empty object and not None? self.flatimages = None #flatfield.FlatImages() @@ -807,7 +807,7 @@ def get_flats(self, force:str=None): and len(raw_illum_files) == 0 and illum_cal_file is None: # if no calibration frames are found, check if the user has provided a pixel flat file if self.par['flatfield']['pixelflat_file'] is not None: - msgs.warning(f'No raw {pixel_frame["type"]} or {illum_frame["type"]} frames found but a ' + log.warning(f'No raw {pixel_frame["type"]} or {illum_frame["type"]} frames found but a ' 'user-defined pixel flat file was provided. Using that file.') self.flatimages = flatfield.FlatImages(PYP_SPEC=self.spectrograph.name, spat_id=self.slits.spat_id) self.flatimages.calib_key = flatfield.FlatImages.construct_calib_key(self.fitstbl['setup'][self.frame], @@ -816,7 +816,7 @@ def get_flats(self, force:str=None): self.det, self.flatimages, calib_dir=self.calib_dir, chk_version=self.chk_version) else: - msgs.warning(f'No raw {pixel_frame["type"]} or {illum_frame["type"]} frames found and ' + log.warning(f'No raw {pixel_frame["type"]} or {illum_frame["type"]} frames found and ' 'unable to identify a relevant processed calibration frame. Continuing...') self.flatimages = None return self.flatimages @@ -826,7 +826,7 @@ def get_flats(self, force:str=None): # issued if both are present and not the same. if illum_cal_file is not None and pixel_cal_file is not None \ and pixel_cal_file != illum_cal_file: - msgs.warning('Processed calibration frames were found for both pixel and ' + log.warning('Processed calibration frames were found for both pixel and ' 'slit-illumination flats, and the files are not the same. Ignoring the ' 'slit-illumination flat.') cal_file = illum_cal_file if pixel_cal_file is None else pixel_cal_file @@ -860,9 +860,9 @@ def get_flats(self, force:str=None): # Perform a check on the files self.check_calibrations(raw_pixel_files) - msgs.info('Creating pixel-flat calibration frame using files: ') + log.info('Creating pixel-flat calibration frame using files: ') for f in raw_pixel_files: - msgs.info(f' {Path(f).name}') + log.info(f' {Path(f).name}') pixel_flat = buildimage.buildimage_fromlist(self.spectrograph, self.det, self.par['pixelflatframe'], raw_pixel_files, dark=self.msdark, @@ -876,9 +876,9 @@ def get_flats(self, force:str=None): # Perform a check on the files self.check_calibrations(raw_lampoff_files) - msgs.info('Subtracting lamp off flats using files: ') + log.info('Subtracting lamp off flats using files: ') for f in raw_lampoff_files: - msgs.info(f' {Path(f).name}') + log.info(f' {Path(f).name}') lampoff_flat = buildimage.buildimage_fromlist(self.spectrograph, self.det, self.par['lampoffflatsframe'], raw_lampoff_files, @@ -906,18 +906,18 @@ def get_flats(self, force:str=None): # Perform a check on the files self.check_calibrations(raw_illum_files) - msgs.info('Creating slit-illumination flat calibration frame using files: ') + log.info('Creating slit-illumination flat calibration frame using files: ') for f in raw_illum_files: - msgs.info(f' {Path(f).name}') + log.info(f' {Path(f).name}') illum_flat = buildimage.buildimage_fromlist(self.spectrograph, self.det, self.par['illumflatframe'], raw_illum_files, dark=self.msdark, bias=self.msbias, scattlight=self.msscattlight, slits=self.slits, flatimages=self.flatimages, bpm=self.msbpm) if len(raw_lampoff_files) > 0: - msgs.info('Subtracting lamp off flats using files: ') + log.info('Subtracting lamp off flats using files: ') for f in raw_lampoff_files: - msgs.info(f' {Path(f).name}') + log.info(f' {Path(f).name}') if lampoff_flat is None: # Perform a check on the files self.check_calibrations(raw_lampoff_files) @@ -999,7 +999,7 @@ def get_slits(self, force:str=None): raw_lampoff_files = self.fitstbl.find_frame_files('lampoffflats', calib_ID=self.calib_ID) if len(raw_trace_files) == 0 and cal_file is None: - msgs.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' + log.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' 'processed calibration frame. Continuing...') self.slits = None return self.slits @@ -1031,9 +1031,9 @@ def get_slits(self, force:str=None): return self.slits # Need to build everything from scratch. Start with the trace image. - msgs.info('Creating edge tracing calibration frame using files: ') + log.info('Creating edge tracing calibration frame using files: ') for f in raw_trace_files: - msgs.info(f' {Path(f).name}') + log.info(f' {Path(f).name}') # Reset the BPM self.get_bpm(frame=raw_trace_files[0]) @@ -1050,9 +1050,9 @@ def get_slits(self, force:str=None): dark=self.msdark, calib_dir=self.calib_dir, setup=setup, calib_id=calib_id) if len(raw_lampoff_files) > 0: - msgs.info('Subtracting lamp off flats using files: ') + log.info('Subtracting lamp off flats using files: ') for f in raw_lampoff_files: - msgs.info(f' {Path(f).name}') + log.info(f' {Path(f).name}') # Reset the BPM self.get_bpm(frame=raw_trace_files[0]) @@ -1070,7 +1070,7 @@ def get_slits(self, force:str=None): qa_path=self.qa_path, auto=True) if not edges.success: # Something went amiss - msgs.warning('Edge tracing failed. Continuing, but likely to fail soon...') + log.warning('Edge tracing failed. Continuing, but likely to fail soon...') traceImage = None edges = None self.success = False @@ -1110,7 +1110,7 @@ def get_wv_calib(self, force:str=None): """ # No wavelength calibration requested if self.par['wavelengths']['reference'] == 'pixel': - msgs.info('Wavelength "reference" parameter set to "pixel"; no wavelength ' + log.info('Wavelength "reference" parameter set to "pixel"; no wavelength ' 'calibration will be performed.') self.wv_calib = None return self.wv_calib @@ -1118,7 +1118,7 @@ def get_wv_calib(self, force:str=None): # Check for existing data req_objs = ['msarc', 'msbpm', 'slits'] if not self._chk_objs(req_objs): - msgs.warning('Not enough information to load/generate the wavelength calibration. ' + log.warning('Not enough information to load/generate the wavelength calibration. ' 'Skipping and may crash down the line') return None @@ -1131,7 +1131,7 @@ def get_wv_calib(self, force:str=None): = self.find_calibrations(frame['type'], frame['class']) if len(raw_files) == 0 and cal_file is None: - msgs.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' + log.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' 'processed calibration frame. Continuing...') self.wv_calib = None return self.wv_calib @@ -1145,7 +1145,7 @@ def get_wv_calib(self, force:str=None): self.wv_calib.chk_synced(self.slits) self.slits.mask_wvcalib(self.wv_calib) if self.par['wavelengths']['method'] == 'echelle': - msgs.info('Method set to Echelle -- checking wv_calib for 2dfits') + log.info('Method set to Echelle -- checking wv_calib for 2dfits') if not hasattr(self.wv_calib, 'wv_fit2d'): raise PypeItError('There is no 2d fit in this Echelle wavelength ' 'calibration! Please generate a new one with a 2d fit.') @@ -1170,7 +1170,7 @@ def get_wv_calib(self, force:str=None): # TODO: (Added 30 Mar 2023) The need for the meta_dict is for echelle # wavelength calibration. Create EchelleCalibrations and # EchelleBuildWaveCalib subclasses instead.. - msgs.info(f'Preparing a {wavecalib.WaveCalib.calib_type} calibration frame.') + log.info(f'Preparing a {wavecalib.WaveCalib.calib_type} calibration frame.') waveCalib = wavecalib.BuildWaveCalib(self.msarc, self.slits, self.spectrograph, self.par['wavelengths'], lamps, meta_dict=meta_dict, det=self.det, qa_path=self.qa_path) @@ -1204,7 +1204,7 @@ def get_tilts(self, force:str=None): # Check for existing data # TODO: add mstilt_inmask to this list when it gets implemented. if not self._chk_objs(['mstilt', 'msbpm', 'slits', 'wv_calib']): - msgs.warning('Do not have all the necessary objects for tilts. Skipping and may crash ' + log.warning('Do not have all the necessary objects for tilts. Skipping and may crash ' 'down the line.') return None @@ -1217,7 +1217,7 @@ def get_tilts(self, force:str=None): = self.find_calibrations(frame['type'], frame['class']) if len(raw_files) == 0 and cal_file is None: - msgs.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' + log.warning(f'No raw {frame["type"]} frames found and unable to identify a relevant ' 'processed calibration frame. Continuing...') self.wavetilts = None return self.wavetilts @@ -1287,7 +1287,7 @@ def process_load_selection(self, frame, cal_file, force): return None _cal_file = Path(cal_file).absolute() if force == 'reload' and not _cal_file.exists(): - msgs.warning(f"{_cal_file} does not exist; cannot reload " + log.warning(f"{_cal_file} does not exist; cannot reload " f"{frame['class'].__name__} calibration.") self.success = False return None @@ -1302,7 +1302,7 @@ def run_the_steps(self, stop_at_step:str=None): for step in self.steps: if stop_at_step is not None and step == stop_at_step: force = 'remake' - msgs.info(f"Calibrations will stop at {stop_at_step}") + log.info(f"Calibrations will stop at {stop_at_step}") else: force = None getattr(self, f'get_{step}')(force=force) @@ -1310,10 +1310,10 @@ def run_the_steps(self, stop_at_step:str=None): self.failed_step = f'get_{step}' return if stop_at_step is not None and step == stop_at_step: - msgs.info(f"Calibrations stopping at {stop_at_step}") + log.info(f"Calibrations stopping at {stop_at_step}") return - msgs.info("Calibration complete and/or fully loaded!") - msgs.info("#######################################################################") + log.info("Calibration complete and/or fully loaded!") + log.info("#######################################################################") def _chk_set(self, items): """ @@ -1344,8 +1344,8 @@ def _chk_objs(self, items): if getattr(self, obj) is None: # Strip ms iobj = obj[2:] if obj[0:2] == 'ms' else obj - msgs.warning("You need to generate {:s} prior to this calibration..".format(obj)) - msgs.warning("Use get_{:s}".format(iobj)) + log.warning("You need to generate {:s} prior to this calibration..".format(obj)) + log.warning("Use get_{:s}".format(iobj)) return False return True @@ -1409,7 +1409,7 @@ def get_association(fitstbl, spectrograph, caldir, setup, calib_ID, det, must_ex raise PypeItError('Calibration groups have not been defined!') if include_science and proc_only: - msgs.warning('Requested to include the science/standard frames and to only return the ' + log.warning('Requested to include the science/standard frames and to only return the ' 'processed calibration frames. Ignoring former request.') # Set the calibrations path @@ -1436,7 +1436,7 @@ def get_association(fitstbl, spectrograph, caldir, setup, calib_ID, det, must_ex asn = {} setups = fitstbl.unique_configurations(copy=True, rm_none=True) if setup not in setups: - msgs.warning(f'Requested setup {setup} is invalid. Choose from {",".join(setups)}.') + log.warning(f'Requested setup {setup} is invalid. Choose from {",".join(setups)}.') return asn # Subset to output @@ -1582,7 +1582,7 @@ def association_summary(ofile, fitstbl, spectrograph, caldir, subset=None, det=N if det is None: ff.write(f'# NOTE: {detname} is a placeholder for the reduced detectors/mosaics\n') ff.write(yaml.dump(utils.yamlify(asn))) - msgs.info(f'Calibration association file written to: {_ofile}') + log.info(f'Calibration association file written to: {_ofile}') @staticmethod def default_steps(): @@ -1686,7 +1686,7 @@ def check_for_calibs(par, fitstbl, raise_error=True, cut_cfg=None): if raise_error: raise PypeItError(msg) else: - msgs.warning(msg) + log.warning(msg) # Explore science frame for key, ftype in zip(['use_biasimage', 'use_darkimage', 'use_pixelflat', @@ -1710,10 +1710,10 @@ def check_for_calibs(par, fitstbl, raise_error=True, cut_cfg=None): if raise_error: raise PypeItError(msg) else: - msgs.warning(msg) + log.warning(msg) if pass_calib: - msgs.info("Congrats!! You passed the calibrations inspection!!") + log.info("Congrats!! You passed the calibrations inspection!!") return pass_calib diff --git a/pypeit/coadd1d.py b/pypeit/coadd1d.py index d617686c0c..30dc329268 100644 --- a/pypeit/coadd1d.py +++ b/pypeit/coadd1d.py @@ -20,7 +20,7 @@ from pypeit import utils from pypeit import sensfunc from pypeit import specobjs -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit.core import coadd, flux_calib from pypeit.history import History @@ -287,8 +287,8 @@ def check_exposures(self): # check if there are exposures that are completely masked out, i.e., gpms = False for all spectral pixels masked_exps = [np.all(np.logical_not(gpm)) for gpm in _gpms] if np.any(masked_exps): - msgs.warning(f'The following exposure(s) is/are completely masked out. It/They will not be coadded.') - [msgs.warning(f"Exposure {i}: {fname.split('/')[-1]} {obj}") + log.warning(f'The following exposure(s) is/are completely masked out. It/They will not be coadded.') + [log.warning(f"Exposure {i}: {fname.split('/')[-1]} {obj}") for i, (fname, obj, masked_exp) in enumerate(zip(_spec1dfiles, _objids, masked_exps)) if masked_exp] # remove masked out exposure _waves = [wave for (wave, masked_exp) in zip(_waves, masked_exps) if not masked_exp] @@ -319,8 +319,8 @@ def check_exposures(self): f'({_sigrej} sigma above the median S/N in the stack).' if self.par['sigrej_exp'] is not None: warn_msg += ' It/They WILL NOT BE COADDED.' - msgs.warning(warn_msg) - [msgs.warning(f"Exposure {i}: {fname.split('/')[-1]} {obj}") + log.warning(warn_msg) + [log.warning(f"Exposure {i}: {fname.split('/')[-1]} {obj}") for i, (fname, obj, bad_exp) in enumerate(zip(_spec1dfiles, _objids, bad_exps)) if bad_exp] if self.par['sigrej_exp'] is not None: # remove bad exposure diff --git a/pypeit/coadd2d.py b/pypeit/coadd2d.py index 1654985c06..849303b7b4 100644 --- a/pypeit/coadd2d.py +++ b/pypeit/coadd2d.py @@ -16,7 +16,7 @@ from astropy.table import Table, vstack from astropy.io import fits -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import utils from pypeit import specobjs @@ -257,7 +257,7 @@ def default_par(spectrograph, inp_cfg=None, det=None, only_slits=None, exclude_s if inp_cfg is not None: cfg = utils.recursive_update(cfg, dict(inp_cfg)) if only_slits is not None and det is not None: - msgs.warning('only_slits and det are mutually exclusive. Ignoring det.') + log.warning('only_slits and det are mutually exclusive. Ignoring det.') _det = None else: _det = det @@ -266,7 +266,7 @@ def default_par(spectrograph, inp_cfg=None, det=None, only_slits=None, exclude_s cfg['rdx']['detnum'] = _det if only_slits is not None and exclude_slits is not None: - msgs.warning('only_slits and exclude_slits are mutually exclusive. Ignoring exclude_slits.') + log.warning('only_slits and exclude_slits are mutually exclusive. Ignoring exclude_slits.') _exclude_slits = None else: _exclude_slits = exclude_slits @@ -384,7 +384,7 @@ def good_slitindx(self, only_slits=None, exclude_slits=None): """ if exclude_slits is not None and only_slits is not None: - msgs.warning('Both `only_slits` and `exclude_slits` are provided. They are mutually exclusive. ' + log.warning('Both `only_slits` and `exclude_slits` are provided. They are mutually exclusive. ' 'Using `only_slits` and ignoring `exclude_slits`') _exclude_slits = None else: @@ -414,13 +414,13 @@ def good_slitindx(self, only_slits=None, exclude_slits=None): _only_slits = np.atleast_1d(only_slits) # create an array of slit index that are selected by the user and are also good slits good_onlyslits = np.array([], dtype=int) - msgs.info('Coadding only the following slits:') + log.info('Coadding only the following slits:') for islit in _only_slits: if islit not in slits0.slitord_id[good_slitindx]: # Warnings for the slits that are selected by the user but NOT good slits - msgs.warning('Slit {} cannot be coadd because masked'.format(islit)) + log.warning('Slit {} cannot be coadd because masked'.format(islit)) else: - msgs.info(f'Slit {islit}') + log.info(f'Slit {islit}') indx = np.where(slits0.slitord_id[good_slitindx] == islit)[0] good_onlyslits = np.append(good_onlyslits, good_slitindx[indx]) return good_onlyslits @@ -430,10 +430,10 @@ def good_slitindx(self, only_slits=None, exclude_slits=None): _exclude_slits = np.atleast_1d(_exclude_slits) # create an array of slit index that are excluded by the user exclude_slitindx = np.array([], dtype=int) - msgs.info('Excluding the following slits:') + log.info('Excluding the following slits:') for islit in _exclude_slits: if islit in slits0.slitord_id[good_slitindx]: - msgs.info(f'Slit {islit}') + log.info(f'Slit {islit}') exclude_slitindx = np.append(exclude_slitindx, np.where(slits0.slitord_id[good_slitindx] == islit)[0][0]) # these are the good slit index excluding the slits that are selected by the user @@ -519,7 +519,7 @@ def optimal_weights(self, uniq_obj_id, order=None, weight_method='auto'): fluxes.append(flux_iexp) ivars.append(ivar_iexp) gpms.append(gpm_iexp) - msgs.warning(f'Optimal extraction not available for object ' + log.warning(f'Optimal extraction not available for object ' f'{uniq_obj_id[iexp]} {order_str} in exp {iexp}. Using box extraction.') else: raise PypeItError(f'Optimal weights cannot be determined because ' @@ -554,7 +554,7 @@ def coadd(self, interp_dspat=True): coadd_list = [] for slit_idx in self.good_slits: _slitord_id = self.stack_dict['slits_list'][0].slitord_id - msgs.info(f'Performing 2D coadd for slit/order {_slitord_id[slit_idx]} ({slit_idx + 1}/{self.nslits_single})') + log.info(f'Performing 2D coadd for slit/order {_slitord_id[slit_idx]} ({slit_idx + 1}/{self.nslits_single})') # mask identifying the current slit in each exposure thismask_stack = [np.abs(slitmask - self.spat_ids[slit_idx]) <= self.par['coadd2d']['spat_toler'] @@ -562,7 +562,7 @@ def coadd(self, interp_dspat=True): # check if the slit is found in every exposure if not np.all([np.any(thismask) for thismask in thismask_stack]): - msgs.warning(f'Slit/order {_slitord_id[slit_idx]} was not found in every exposures. ' + log.warning(f'Slit/order {_slitord_id[slit_idx]} was not found in every exposures. ' f'2D coadd cannot be performed on this slit. Try increasing the parameter spat_toler') continue @@ -856,7 +856,7 @@ def offsets_report(offsets, pixscale, offsets_method): for iexp, off in enumerate(offsets): msg_string += ' {:2d} {:6.2f} {:6.3f}'.format(iexp, off, off*pixscale) msg_string += '---------------------------------------------------------------------------------' - msgs.info(msg_string) + log.info(msg_string) def offset_slit_cen(self, slitid, offsets): """ @@ -1005,7 +1005,7 @@ def load_coadd2d_stacks(self, spec2d, chk_version=False): exptime_coadd = np.percentile(exptime_stack, 50., method='higher') isclose_exptime = np.isclose(exptime_stack, exptime_coadd, atol=1.) if not np.all(isclose_exptime): - msgs.warning('Exposure time is not consistent (within 1 sec) for all frames being coadded! ' + log.warning('Exposure time is not consistent (within 1 sec) for all frames being coadded! ' f'Scaling each image by the median exposure time ({exptime_coadd} s) before coadding.') exp_scale = exptime_coadd / exptime_stack for iexp in range(nfiles): @@ -1054,12 +1054,12 @@ def compute_offsets(self): This is partially overloaded by the child methods. """ - msgs.info('Get Offsets') + log.info('Get Offsets') # binned pixel scale of the frames to be coadded pixscale = parse.parse_binning(self.stack_dict['detectors'][0].binning)[1]*self.stack_dict['detectors'][0].platescale # 1) offsets are provided in the header of the spec2d files if self.par['coadd2d']['offsets'] == 'header': - msgs.info('Using offsets from header') + log.info('Using offsets from header') dithoffs = [self.spectrograph.get_meta_value(f, 'dithoff') for f in self.spec2d] if None in dithoffs: raise PypeItError('Dither offsets keyword not found for one or more spec2d files. ' @@ -1074,7 +1074,7 @@ def compute_offsets(self): # 2) a list of offsets is provided by the user (no matter if we have a bright object or not) elif isinstance(self.par['coadd2d']['offsets'], (list, np.ndarray)): - msgs.info('Using user input offsets') + log.info('Using user input offsets') # use them self.offsets = self.check_input(self.par['coadd2d']['offsets'], 'offsets') self.offsets_report(self.offsets, pixscale, 'user input') @@ -1087,7 +1087,7 @@ def compute_offsets(self): raise PypeItError('maskdef_offsets are not recoded in the SlitTraceSet ' 'for one or more exposures. They cannot be used.') # the offsets computed during the main reduction (`run_pypeit`) are used - msgs.info('Determining offsets using maskdef_offset recoded in SlitTraceSet') + log.info('Determining offsets using maskdef_offset recoded in SlitTraceSet') self.offsets = self.maskdef_offset[0] - self.maskdef_offset self.offsets_report(self.offsets, pixscale, 'maskdef_offset') @@ -1106,13 +1106,13 @@ def compute_weights(self): This method sets the internal :attr:`use_weights`. Documentation on the form of self.use_weights needs to be written. """ - msgs.info('Get Weights') + log.info('Get Weights') # 1) User input weight if isinstance(self.par['coadd2d']['weights'], (list, np.ndarray)): # use those inputs self.use_weights = self.check_input(self.par['coadd2d']['weights'], 'weights') - msgs.info('Using user input weights') + log.info('Using user input weights') # 2) No bright object and parset `weights` is 'auto' or 'uniform', # or Yes bright object but the user wants to still use uniform weights @@ -1124,10 +1124,10 @@ def compute_weights(self): # and they might miss the warning. Its debatable though. # warn if the user had put `auto` in the parset - msgs.warning('Weights cannot be computed because no unique reference object ' + log.warning('Weights cannot be computed because no unique reference object ' 'with the highest S/N was found. Using uniform weights instead.') elif self.par['coadd2d']['weights'] == 'uniform': - msgs.info('Using uniform weights') + log.info('Using uniform weights') # use uniform weights self.use_weights = (np.ones(self.nexp) / float(self.nexp)).tolist() @@ -1185,10 +1185,10 @@ def unpack_specobj(spec, spatord_id=None): # check if BOX_COUNTS is available elif spec.has_box_ext() and np.any(spec.BOX_MASK): _, flux, ivar, gpm = spec.get_box_ext() - msgs.warning(f'Optimal extraction not available for obj_id {objid} ' + log.warning(f'Optimal extraction not available for obj_id {objid} ' f'in slit/order {spatord_id}. Using box extraction.') else: - msgs.warning(f'Optimal and Boxcar extraction not available for obj_id {objid} in slit/order {spatord_id}.') + log.warning(f'Optimal and Boxcar extraction not available for obj_id {objid} in slit/order {spatord_id}.') _, flux, ivar, gpm = None, None, None, None return flux, ivar, gpm @@ -1387,7 +1387,7 @@ def compute_offsets(self): else: offsets_method = 'brightest object found on slit: {:d} with avg SNR={:5.2f}'.format(self.spatid_bri,np.mean(self.snr_bar_bri)) - msgs.info(f'Determining offsets using {offsets_method}') + log.info(f'Determining offsets using {offsets_method}') thismask_stack = [np.abs(slitmask - self.spatid_bri) <= self.par['coadd2d']['spat_toler'] for slitmask in self.stack_dict['slitmask_stack']] slitidx_bri = np.where(np.abs(self.spat_ids - self.spatid_bri) <= self.par['coadd2d']['spat_toler'])[0][0] @@ -1404,7 +1404,7 @@ def compute_offsets(self): sci_list = [[sciimg - skymodel for sciimg, skymodel in zip(self.stack_dict['sciimg_stack'], self.stack_dict['skymodel_stack'])]] var_list = [[utils.inverse(sciivar) for sciivar in self.stack_dict['sciivar_stack']]] - msgs.info('Rebinning Images') + log.info('Rebinning Images') mask_stack = [mask == 0 for mask in self.stack_dict['mask_stack']] sci_list_rebin, var_list_rebin, norm_rebin_stack, nsmp_rebin_stack = coadd.rebin2d( wave_bins, dspat_bins, self.stack_dict['waveimg_stack'], dspat_stack, thismask_stack, mask_stack, sci_list, var_list) @@ -1457,7 +1457,7 @@ def compute_offsets(self): traces_rect[:, iexp] = sobjs_exp.TRACE_SPAT if self.par['coadd2d']['user_obj_ids'] is not None: - msgs.info(f'The median distance between the original traces and those in the ' + log.info(f'The median distance between the original traces and those in the ' f'rebinned image for the user_obj_ids is {np.median(user_obj_dspats):.2f} pixels') # Now deterimine the offsets. Arbitrarily set the zeroth trace to the reference @@ -1500,9 +1500,9 @@ def compute_weights(self): # TODO add a parset for weight_method in optimal_weights. The default is currently 'auto' _, self.use_weights = self.optimal_weights(self.obj_id_bri) if self.par['coadd2d']['user_obj_ids'] is not None: - msgs.info(f'Weights computed using a unique reference object in slit={self.spatid_bri} provided by the user') + log.info(f'Weights computed using a unique reference object in slit={self.spatid_bri} provided by the user') else: - msgs.info(f'Weights computed using a unique reference object in slit={self.spatid_bri} with the highest S/N') + log.info(f'Weights computed using a unique reference object in slit={self.spatid_bri} with the highest S/N') self.snr_report(self.spatid_bri, self.spat_pixpos_bri, self.snr_bar_bri) @@ -1529,7 +1529,7 @@ def get_brightest_obj(self, specobjs_list, slit_spat_ids): The SPAT_ID for the slit that the highest S/N ratio object is on - snr_bar: ndarray, float, shape (len(list),): RMS S/N computed for this brightest reference object in each exposure """ - msgs.info('Finding brightest object') + log.info('Finding brightest object') nexp = len(specobjs_list) nslits = slit_spat_ids.size @@ -1539,7 +1539,7 @@ def get_brightest_obj(self, specobjs_list, slit_spat_ids): spat_pixpos_max = np.zeros((nslits, nexp), dtype=float) # Loop over each exposure, slit, find the brightest object on that slit for every exposure for iexp, sobjs in enumerate(specobjs_list): - msgs.info("Working on exposure {}".format(iexp)) + log.info("Working on exposure {}".format(iexp)) for islit, spat_id in enumerate(slit_spat_ids): if len(sobjs) == 0: continue @@ -1573,7 +1573,7 @@ def get_brightest_obj(self, specobjs_list, slit_spat_ids): # Find the highest snr object among all the slits if np.all(bpm): - msgs.warning('You do not appear to have a unique reference object that was traced as the highest S/N ' + log.warning('You do not appear to have a unique reference object that was traced as the highest S/N ' 'ratio on the same slit of every exposure. Try increasing the parameter `spat_toler`') return None, None, None, None else: @@ -1616,7 +1616,7 @@ def snr_report(self, slitid, spat_pixpos, snr_bar): for iexp, (spat,snr) in enumerate(zip(spat_pixpos, snr_bar)): msg_string += ' {:2d} {:7.1f} {:5.2f}'.format(iexp, spat, snr) msg_string += '-------------------------------------' - msgs.info(msg_string) + log.info(msg_string) # TODO add an option here to actually use the reference trace for cases where they are on the same slit and it is @@ -1822,9 +1822,9 @@ def compute_offsets(self): # a reference trace (this is done in coadd using method `reference_trace_stack`) self.offsets = None if self.par['coadd2d']['user_obj_ids'] is not None: - msgs.info('Reference trace about which 2d coadd is performed is computed using user object') + log.info('Reference trace about which 2d coadd is performed is computed using user object') else: - msgs.info('Reference trace about which 2d coadd is performed is computed using the brightest object') + log.info('Reference trace about which 2d coadd is performed is computed using the brightest object') def compute_weights(self): """ @@ -1848,10 +1848,10 @@ def compute_weights(self): _, iweights = self.optimal_weights(self.obj_id_bri, order=order) self.use_weights.append(iweights) if self.par['coadd2d']['user_obj_ids'] is not None: - msgs.info('Weights computed using a unique reference object provided by the user') + log.info('Weights computed using a unique reference object provided by the user') # TODO: implement something here to print out the snr_report else: - msgs.info('Weights computed using a unique reference object with the highest S/N') + log.info('Weights computed using a unique reference object with the highest S/N') self.snr_report(self.snr_bar_bri) def _get_weights(self, indx=None): @@ -1894,13 +1894,13 @@ def get_brightest_obj(self, specobjs_list, orders): - snr_bar: ndarray, float, shape (len(list),): Average S/N over all the orders for this object """ - msgs.info('Finding brightest object') + log.info('Finding brightest object') nexp = len(specobjs_list) fracpos_id = np.zeros(nexp, dtype=int) snr_bar = np.zeros(nexp) for iexp, sobjs in enumerate(specobjs_list): - msgs.info("Working on exposure {}".format(iexp)) + log.info("Working on exposure {}".format(iexp)) uni_fracpos_id = np.unique(sobjs.ECH_FRACPOS_ID) nobjs = len(uni_fracpos_id) order_snr = np.zeros((orders.size, nobjs), dtype=float) @@ -1930,7 +1930,7 @@ def get_brightest_obj(self, specobjs_list, orders): fracpos_id[iexp] = uni_fracpos_id[snr_bar_vec.argmax()] snr_bar[iexp] = snr_bar_vec[snr_bar_vec.argmax()] if 0 in snr_bar: - msgs.warning('You do not appear to have a unique reference object that was traced as the highest S/N ' + log.warning('You do not appear to have a unique reference object that was traced as the highest S/N ' 'ratio for every exposure') return None, None return fracpos_id, snr_bar @@ -1956,7 +1956,7 @@ def snr_report(self, snr_bar): msg_string += '\n {:d} {:5.2f}'.format(iexp, snr) msg_string += '\n-------------------------------------' - msgs.info(msg_string) + log.info(msg_string) def reference_trace_stack(self, slitid, offsets=None, uniq_obj_id=None): diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index 26a714a138..62c67e14d7 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -14,7 +14,7 @@ from scipy.interpolate import interp1d import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import alignframe, datamodel, flatfield, io, sensfunc, spec2dobj, utils from pypeit.core.flexure import calculate_image_phase @@ -275,7 +275,7 @@ def __init__(self, airmass, parangle, pressure, temperature, humidity, cosdec, w wave_ref (:obj:`float`, optional): Reference wavelength (The DAR correction will be performed relative to this wavelength) """ - msgs.info("Preparing the parameters for the DAR correction") + log.info("Preparing the parameters for the DAR correction") # Get DAR parameters self.airmass = airmass # unitless @@ -291,7 +291,7 @@ def __init__(self, airmass, parangle, pressure, temperature, humidity, cosdec, w self.humidity, self.wave_ref.to_value(units.micron)) # Print out the DAR parameters - msgs.info( + log.info( "DAR correction parameters:\n" f" Airmass = {self.airmass:.2f}\n" f" Pressure = {self.pressure.to_value(units.mbar):.2f} mbar\n" @@ -481,20 +481,20 @@ def __init__(self, spec2dfiles, par, skysub_frame=None, sensfile=None, scale_cor # If there is only one frame being "combined" AND there's no reference image, then don't compute the translation. if self.numfiles == 1 and self.cubepar["reference_image"] is None: if self.align: - msgs.warning("Parameter 'align' should be False when there is only one frame and no reference image") - msgs.info("Setting 'align' to False") + log.warning("Parameter 'align' should be False when there is only one frame and no reference image") + log.info("Setting 'align' to False") self.align = False if self.ra_offsets is not None: if not self.align: - msgs.warning("When 'ra_offset' and 'dec_offset' are set, 'align' must be True.") - msgs.info("Setting 'align' to True") + log.warning("When 'ra_offset' and 'dec_offset' are set, 'align' must be True.") + log.info("Setting 'align' to True") self.align = True # If no ra_offsets or dec_offsets have been provided, initialise the lists self.user_alignment = True if self.ra_offsets is None and self.dec_offsets is None: - msgs.info("No RA or Dec offsets have been provided.") + log.info("No RA or Dec offsets have been provided.") if self.align: - msgs.info("An automatic alignment will be performed using WCS information from the headers.") + log.info("An automatic alignment will be performed using WCS information from the headers.") # User offsets are not provided, so turn off the user_alignment self.user_alignment = False # Initialise the lists of ra_offsets and dec_offsets @@ -533,14 +533,14 @@ def __init__(self, spec2dfiles, par, skysub_frame=None, sensfile=None, scale_cor if self.method == "subpixel": self.spec_subpixel, self.spat_subpixel, self.slice_subpixel = self.cubepar['spec_subpixel'], self.cubepar['spat_subpixel'], self.cubepar['slice_subpixel'] self.skip_subpix_weights = False - msgs.info( + log.info( "Adopting the subpixel algorithm to generate the datacube, with subpixellation scales:\n" f" Spectral: {self.spec_subpixel}\n" f" Spatial: {self.spat_subpixel}\n" f" Slices: {self.slice_subpixel}" ) elif self.method == "ngp": - msgs.info("Adopting the nearest grid point (NGP) algorithm to generate the datacube.") + log.info("Adopting the nearest grid point (NGP) algorithm to generate the datacube.") self.skip_subpix_weights = True else: raise PypeItError(f"The following datacube method is not allowed: {self.method}") @@ -632,10 +632,10 @@ def set_default_scalecorr(self): """ if self.cubepar['scale_corr'] is not None: if self.cubepar['scale_corr'] == "image": - msgs.info("The default relative spectral illumination correction will use the science image") + log.info("The default relative spectral illumination correction will use the science image") self.scalecorr_default = "image" else: - msgs.info( + log.info( "Loading default scale image for relative spectral illumination correction:\n" +self.cubepar['scale_corr'] ) @@ -644,8 +644,8 @@ def set_default_scalecorr(self): self.detname, chk_version=self.chk_version) except Exception as e: - msgs.warning(f'Loading spec2d file raised {type(e).__name__}:\n{str(e)}') - msgs.warning( + log.warning(f'Loading spec2d file raised {type(e).__name__}:\n{str(e)}') + log.warning( "Could not load scaleimg from spec2d file:\n" + self.cubepar['scale_corr'] + "\nscale correction will not be performed unless you have specified the " @@ -701,7 +701,7 @@ def get_current_scalecorr(self, spec2DObj, scalecorr=None): this_scalecorr = "none" # Don't do relative spectral illumination scaling else: # Load a user specified frame for sky subtraction - msgs.info( + log.info( "Loading the following frame for the relative spectral illumination " "correction:\n" + scalecorr ) @@ -709,15 +709,15 @@ def get_current_scalecorr(self, spec2DObj, scalecorr=None): spec2DObj_scl = spec2dobj.Spec2DObj.from_file(scalecorr, self.detname, chk_version=self.chk_version) except Exception as e: - msgs.warning(f'Loading spec2d file raised {type(e).__name__}:\n{str(e)}') + log.warning(f'Loading spec2d file raised {type(e).__name__}:\n{str(e)}') raise PypeItError("Could not load skysub image from spec2d file:\n" + scalecorr) else: relScaleImg = spec2DObj_scl.scaleimg this_scalecorr = scalecorr if this_scalecorr == "none": - msgs.info("Relative spectral illumination correction will not be performed.") + log.info("Relative spectral illumination correction will not be performed.") else: - msgs.info( + log.info( "Using the following frame for the relative spectral illumination correction:\n" + this_scalecorr ) @@ -733,11 +733,11 @@ def set_default_skysub(self): self.skyImgDef = np.array([0.0]) # Do not perform sky subtraction self.skySclDef = np.array([0.0]) # Do not perform sky subtraction elif self.cubepar['skysub_frame'] == "image": - msgs.info("The sky model in the spec2d science frames will be used for sky " + log.info("The sky model in the spec2d science frames will be used for sky " "subtraction\n(unless specific skysub frames have been specified)") self.skysub_default = "image" else: - msgs.info("Loading default image for sky subtraction:\n" + log.info("Loading default image for sky subtraction:\n" + self.cubepar['skysub_frame']) try: spec2DObj = spec2dobj.Spec2DObj.from_file(self.cubepar['skysub_frame'], @@ -811,7 +811,7 @@ def get_current_skysub(self, spec2DObj, exptime, opts_skysub=None): this_skysub = "none" # Don't do sky subtraction else: # Load a user specified frame for sky subtraction - msgs.info("Loading skysub frame:\n" + opts_skysub) + log.info("Loading skysub frame:\n" + opts_skysub) try: spec2DObj_sky = spec2dobj.Spec2DObj.from_file(opts_skysub, self.detname, chk_version=self.chk_version) @@ -824,9 +824,9 @@ def get_current_skysub(self, spec2DObj, exptime, opts_skysub=None): skyScl = spec2DObj_sky.scaleimg this_skysub = opts_skysub # User specified spec2d for sky subtraction if this_skysub == "none": - msgs.info("Sky subtraction will not be performed.") + log.info("Sky subtraction will not be performed.") else: - msgs.info("Using the following frame for sky subtraction:\n" + this_skysub) + log.info("Using the following frame for sky subtraction:\n" + this_skysub) # Return the skysub params for this frame return this_skysub, skyImg, skyScl @@ -850,12 +850,12 @@ def add_grating_corr(self, flatfile, waveimg, slits, spat_flexure=None): """ # Check if the Flat file exists if not os.path.exists(flatfile): - msgs.warning( + log.warning( "Grating correction requested, but the following file does not exist:\n" + flatfile ) return if flatfile not in self.flat_splines.keys(): - msgs.info("Calculating relative sensitivity for grating correction") + log.info("Calculating relative sensitivity for grating correction") # Load the Flat file flatimages = flatfield.FlatImages.from_file(flatfile, chk_version=self.chk_version) total_illum = flatimages.fit2illumflat(slits, finecorr=False, frametype='illum', spat_flexure=spat_flexure) * \ @@ -869,7 +869,7 @@ def add_grating_corr(self, flatfile, waveimg, slits, spat_flexure=None): flexure=spat_flexure, smooth_npix=self.flatpar['slit_illum_smooth_npix']) else: - msgs.info("Using relative spectral illumination from FlatImages") + log.info("Using relative spectral illumination from FlatImages") scale_model = flatimages.pixelflat_spec_illum # Extract a quick spectrum of the flatfield wave_spl, spec_spl = extract.extract_hist_spectrum(waveimg, flatframe*utils.inverse(scale_model), @@ -959,14 +959,14 @@ def get_alignments(self, spec2DObj, slits, spat_flexure=None): if key in spec2DObj.calibs: alignfile = os.path.join(spec2DObj.calibs['DIR'], spec2DObj.calibs[key]) if os.path.exists(alignfile) and self.cubepar['astrometric']: - msgs.info("Loading alignments") + log.info("Loading alignments") alignments = alignframe.Alignments.from_file(alignfile, chk_version=self.chk_version) else: - msgs.warning(f'Processed alignment frame not recorded or not found!') - msgs.info("Using slit edges for astrometric transform") + log.warning(f'Processed alignment frame not recorded or not found!') + log.info("Using slit edges for astrometric transform") else: - msgs.info("Using slit edges for astrometric transform") + log.info("Using slit edges for astrometric transform") # If nothing better was provided, use the slit edges if alignments is None: left, right, _ = slits.select_edges(flexure=spat_flexure) @@ -975,7 +975,7 @@ def get_alignments(self, spec2DObj, slits, spat_flexure=None): else: locations = self.par['calibrations']['alignment']['locations'] traces = alignments.traces - msgs.info("Generating alignment splines") + log.info("Generating alignment splines") return alignframe.AlignmentSplines(traces, locations, spec2DObj.tilts) def load(self): @@ -1016,7 +1016,7 @@ def load(self): # Load all spec2d files and prepare the data for making a datacube for ff, fil in enumerate(self.spec2d): # Load it up - msgs.info(f"Loading PypeIt spec2d frame ({ff+1}/{len(self.spec2d)}):\n" + fil) + log.info(f"Loading PypeIt spec2d frame ({ff+1}/{len(self.spec2d)}):\n" + fil) spec2DObj = spec2dobj.Spec2DObj.from_file(fil, self.detname, chk_version=self.chk_version) detector = spec2DObj.detector @@ -1032,7 +1032,7 @@ def load(self): exptime = self.spec.compound_meta([hdr0], 'exptime') # Initialise the slit edges - msgs.info("Constructing slit image") + log.info("Constructing slit image") slits = spec2DObj.slits slitid_img = slits.slit_img(pad=0, flexure=spat_flexure) slits_left, slits_right, _ = slits.select_edges(flexure=spat_flexure) @@ -1087,7 +1087,7 @@ def load(self): dwaveimg[-1, :] = np.abs(waveimg[-1, :] - waveimp[-1, :]) dwv = np.median(dwaveimg[dwaveimg != 0.0]) if self.cubepar['wave_delta'] is None else self.cubepar['wave_delta'] - msgs.info("Using wavelength solution: wave0={0:.3f}, dispersion={1:.3f} Angstrom/pixel".format(wave0, dwv)) + log.info("Using wavelength solution: wave0={0:.3f}, dispersion={1:.3f} Angstrom/pixel".format(wave0, dwv)) # Obtain the minimum and maximum wavelength of all slits if self.mnmx_wv is None: @@ -1108,10 +1108,10 @@ def load(self): # If the spatial scale has been set by the user, check that it doesn't exceed the pixel or slicer scales if self._dspat is not None: if pxscl > self._dspat: - msgs.warning("Spatial scale requested ({0:f} arcsec) is less than the pixel scale ({1:f} arcsec)".format( + log.warning("Spatial scale requested ({0:f} arcsec) is less than the pixel scale ({1:f} arcsec)".format( 3600.0 * self._dspat, 3600.0 * pxscl)) if slscl > self._dspat: - msgs.warning("Spatial scale requested ({0:f} arcsec) is less than the slicer scale ({1:f} arcsec)".format( + log.warning("Spatial scale requested ({0:f} arcsec) is less than the slicer scale ({1:f} arcsec)".format( 3600.0 * self._dspat, 3600.0 * slscl)) # Construct a good pixel mask @@ -1163,7 +1163,7 @@ def load(self): extcorr_sort = 1.0 if False: # Compute the extinction correction - msgs.info("Applying extinction correction") + log.info("Applying extinction correction") atmext = self.spec.get_atmospheric_extinction(self.senspar['UVIS']['extinct_file']) extcorr_sort = atmext.correction_factor(wave_sort, airmass=airmass) ## TODO :: Change the ['UVIS']['extinct_file'] here when the sensitivity function calculation is unified. @@ -1189,7 +1189,7 @@ def load(self): # scaled by the exposure time and the wavelength sampling sens_sort = 1.0/(exptime * dwav_sort) # If no sensitivity function is provided if self.fluxcal: - msgs.info("Calculating the sensitivity function") + log.info("Calculating the sensitivity function") # Load the sensitivity function sens = sensfunc.SensFunc.from_file(self.sensfile[ff], chk_version=self.par['rdx']['chk_version']) # Interpolate the sensitivity function onto the wavelength grid of the data @@ -1260,7 +1260,7 @@ def load(self): else: hdr['FLUXUNIT'] = (1, "Flux units -- counts/s/Angstrom/arcsec^2") # Write out the datacube - msgs.info("Saving datacube as: {0:s}".format(outfile)) + log.info("Saving datacube as: {0:s}".format(outfile)) final_cube = DataCube(flxcube, sigcube, bpmcube, wave, self.specname, self.blaze_wave, self.blaze_spec, sensfunc=None, fluxed=self.fluxcal) final_cube.to_file(outfile, primary_hdr=self.all_header[ff], hdr=hdr, overwrite=self.overwrite) @@ -1307,7 +1307,7 @@ def run_align(self): # Iterate over white light image generation and spatial shifting numiter = 2 for dd in range(numiter): - msgs.info(f"Iterating on spatial translation - ITERATION #{dd+1}/{numiter}") + log.info(f"Iterating on spatial translation - ITERATION #{dd+1}/{numiter}") # Generate the WCS image_wcs, voxedge, reference_image = \ datacube.create_wcs(self.all_ra, self.all_dec, self.all_wave, slitid_img_gpm, self._dspat, wavediff, @@ -1331,9 +1331,9 @@ def run_align(self): # ref_idx will be the index of the cube with the highest S/N ref_idx = np.argmax(self.weights) reference_image = wl_imgs[:, :, ref_idx].copy() - msgs.info("Calculating spatial translation of each cube relative to cube #{0:d})".format(ref_idx+1)) + log.info("Calculating spatial translation of each cube relative to cube #{0:d})".format(ref_idx+1)) else: - msgs.info("Calculating the spatial translation of each cube relative to user-defined 'reference_image'") + log.info("Calculating the spatial translation of each cube relative to user-defined 'reference_image'") # Calculate the image offsets relative to the reference image for ff in range(self.numfiles): @@ -1342,7 +1342,7 @@ def run_align(self): # Convert pixel shift to degrees shift ra_shift *= self._dspat/cosdec dec_shift *= self._dspat - msgs.info(f"Spatial shift of cube #{ff + 1:d}:\n" + log.info(f"Spatial shift of cube #{ff + 1:d}:\n" f"RA, DEC (arcsec) = {ra_shift*3600.0:+0.3f} E, {dec_shift*3600.0:+0.3f} N" ) # Store the shift in the RA and DEC offsets in degrees @@ -1470,7 +1470,7 @@ def run(self): else: hdr['FLUXUNIT'] = (1, "Flux units -- counts/s/Angstrom/arcsec^2") # Write out the datacube - msgs.info("Saving datacube as: {0:s}".format(outfile)) + log.info("Saving datacube as: {0:s}".format(outfile)) final_cube = DataCube(flxcube, sigcube, bpmcube, wave, self.specname, self.blaze_wave, self.blaze_spec, sensfunc=sensfunc, fluxed=self.fluxcal) # Note, we only store in the primary header the first spec2d file @@ -1498,7 +1498,7 @@ def run(self): else: hdr['FLUXUNIT'] = (1, "Flux units -- counts/s/Angstrom/arcsec^2") # Write out the datacube - msgs.info("Saving datacube as: {0:s}".format(outfile)) + log.info("Saving datacube as: {0:s}".format(outfile)) final_cube = DataCube(flxcube, sigcube, bpmcube, wave, self.specname, self.blaze_wave, self.blaze_spec, sensfunc=sensfunc, fluxed=self.fluxcal) final_cube.to_file(outfile, primary_hdr=self.all_header[ff], hdr=hdr, overwrite=self.overwrite) diff --git a/pypeit/core/arc.py b/pypeit/core/arc.py index 8d62d2dda8..1dc7770ab2 100644 --- a/pypeit/core/arc.py +++ b/pypeit/core/arc.py @@ -16,7 +16,7 @@ import scipy from astropy import stats -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import utils from pypeit.core import fitting @@ -69,7 +69,7 @@ def fit2darc(all_wv,all_pix,all_orders,nspec, nspec_coeff=4,norder_coeff=4,sigre # set some plotting parameters utils.pyplot_rcparams() plt.figure(figsize=(7,5)) - msgs.info("Plot identified lines") + log.info("Plot identified lines") cm = plt.get_cmap('RdYlBu_r') sc = plt.scatter(all_orders, all_pix,c=all_wv/10000., cmap=cm) cbar = plt.colorbar(sc) @@ -89,7 +89,7 @@ def fit2darc(all_wv,all_pix,all_orders,nspec, nspec_coeff=4,norder_coeff=4,sigre # Report the RMS fin_rms = pypeitFit.calc_fit_rms(x2=all_orders, apply_mask=True) - msgs.info("RMS: {0:.5f} Ang*Order#".format(fin_rms)) + log.info("RMS: {0:.5f} Ang*Order#".format(fin_rms)) if debug: fit2darc_global_qa(pypeitFit, nspec) @@ -113,7 +113,7 @@ def fit2darc_global_qa(pypeitFit, nspec, outfile=None): Name of the outfile to write to disk. If not provided, show to screen. """ - msgs.info("Creating QA for 2D wavelength solution") + log.info("Creating QA for 2D wavelength solution") utils.pyplot_rcparams() @@ -206,7 +206,7 @@ def fit2darc_orders_qa(pypeitFit, nspec, outfile=None): """ - msgs.info("Creating QA for 2D wavelength solution") + log.info("Creating QA for 2D wavelength solution") utils.pyplot_rcparams() @@ -346,7 +346,7 @@ def resize_mask2arc(shape_arc, slitmask_orig): if ((nspec_orig > nspec) & (nspec_orig % nspec != 0)) | ((nspec > nspec_orig) & (nspec % nspec_orig != 0)): raise PypeItError('Problem with images sizes. arcimg size and calibration size need to be integer multiples of each other') else: - msgs.info('Calibration images have different binning than the arcimg. Resizing calibs for arc spectrum extraction.') + log.info('Calibration images have different binning than the arcimg. Resizing calibs for arc spectrum extraction.') slitmask = utils.rebin_slice(slitmask_orig, (nspec, nspat)) else: slitmask = slitmask_orig @@ -376,7 +376,7 @@ def resize_slits2arc(shape_arc, shape_orig, trace_orig): # be a different size (nspec_orig,nspat_orig) = shape_orig if nspec_orig != nspec: - msgs.info('Calibration images have different binning than the arcimg. Resizing calibs for arc spectrum extraction.') + log.info('Calibration images have different binning than the arcimg. Resizing calibs for arc spectrum extraction.') spec_vec_orig = np.arange(nspec_orig)/float(nspec_orig - 1) spec_vec = np.arange(nspec)/float(nspec - 1) spat_ratio = float(nspat)/float(nspat_orig) @@ -483,12 +483,12 @@ def get_censpec(slit_cen, slitmask, arcimg, gpm=None, box_rad=3.0, continue # Check if this slit is masked if slit_bpm is not None and slit_bpm[islit]: - msgs.info('Ignoring masked slit {}'.format(islit+1)) + log.info('Ignoring masked slit {}'.format(islit+1)) # TODO -- Avoid using NaNs arc_spec[:,islit] = np.nan continue if verbose: - msgs.info(f'Extracting approximate arc spectrum of slit {islit+1}/{nslits}') + log.info(f'Extracting approximate arc spectrum of slit {islit+1}/{nslits}') # Create a mask for the pixels that will contribue to the arc arcmask = _gpm & (np.absolute(spat[None,:] - slit_cen[:,islit,None]) < box_rad) # Trimming the image makes this much faster @@ -804,7 +804,7 @@ def iter_continuum(spec, gpm=None, fwhm=4.0, sigthresh = 2.0, sigrej=3.0, niter_ #frac_mask = np.sum(np.invert(cont_mask))/float(nspec) nmask = np.sum(np.invert(peak_mask[gpm])) if nmask > max_nmask: - msgs.warning('Too many pixels {:d} masked in spectrum continuum definiton: frac_mask = {:5.3f} > {:5.3f} which is ' + log.warning('Too many pixels {:d} masked in spectrum continuum definiton: frac_mask = {:5.3f} > {:5.3f} which is ' 'max allowed. Only masking the {:d} largest values....'.format(nmask, nmask/nspec_available, max_mask_frac, max_nmask)) # Old #cont_mask = np.ones_like(cont_mask) & gpm @@ -819,7 +819,7 @@ def iter_continuum(spec, gpm=None, fwhm=4.0, sigthresh = 2.0, sigrej=3.0, niter_ ngood = np.sum(cont_mask) if ngood == 0: - msgs.warning("All pixels rejected for continuum. Returning a 0 array") + log.warning("All pixels rejected for continuum. Returning a 0 array") return np.zeros_like(spec), cont_mask samp_width = np.ceil(ngood/cont_samp).astype(int) @@ -974,7 +974,7 @@ def detect_lines(censpec, sigdetect=5.0, fwhm=4.0, fit_frac_fwhm=1.25, input_thr # Detect the location of the arc lines if verbose: - msgs.info("Detecting lines...isolating the strongest, nonsaturated lines") + log.info("Detecting lines...isolating the strongest, nonsaturated lines") # TODO: Why is this here? Can't the calling function be required to # pass a single spectrum? This is not reflected in the docstring. @@ -1003,7 +1003,7 @@ def detect_lines(censpec, sigdetect=5.0, fwhm=4.0, fit_frac_fwhm=1.25, input_thr sigma_lower=3.0, sigma_upper=3.0, cenfunc= np.nanmedian, stdfunc = np.nanstd) if stddev == 0.0: - msgs.warning('stddev = 0.0, so resetting to 0.1') + log.warning('stddev = 0.0, so resetting to 0.1') stddev = 0.1 thresh = med + sigdetect * stddev else: @@ -1049,7 +1049,7 @@ def detect_lines(censpec, sigdetect=5.0, fwhm=4.0, fit_frac_fwhm=1.25, input_thr # requested, then grab and return only these lines if nfind is not None: if nfind > len(nsig): - msgs.warning('Requested {0} peaks but only found {1}. '.format(nfind, len(tampl)) + + log.warning('Requested {0} peaks but only found {1}. '.format(nfind, len(tampl)) + ' Returning all the peaks found.') else: ikeep = (nsig.argsort()[::-1])[0:nfind] diff --git a/pypeit/core/atmextinction.py b/pypeit/core/atmextinction.py index 51681d57ba..84f32dd0c1 100644 --- a/pypeit/core/atmextinction.py +++ b/pypeit/core/atmextinction.py @@ -14,7 +14,7 @@ from astropy import coordinates from astropy import table -from pypeit import msgs +from pypeit import log from pypeit import dataPaths from pypeit import PypeItError from pypeit import utils @@ -46,13 +46,13 @@ class AtmosphericExtinction: def __init__(self, wave, mag_ext, assume_sorted=True, file=None): if len(wave) != len(mag_ext): - msgs.error('Wavelength and extinction vectors must have the same length.') + log.error('Wavelength and extinction vectors must have the same length.') self.wave = np.asarray(wave, dtype=float) self.mag_ext = np.asarray(mag_ext, dtype=float) if self.wave.ndim != 1 or self.mag_ext.ndim != 1: - msgs.error('Atmospheric extinction must be 1D.') + log.error('Atmospheric extinction must be 1D.') if not assume_sorted: srt = np.argsort(self.wave) @@ -95,7 +95,7 @@ def closest_extinction_file(longitude, latitude, toler=5.): return extinct_files[int(idx)]['File'] # Crash with a helpful error message - msgs.error( + log.error( f'No atmospheric extinction file was found within {toler} degrees of observation at ' f'lon = {longitude:.1f} lat = {latitude:.1f}.' ) @@ -118,14 +118,14 @@ def from_coordinates(cls, longitude, latitude, toler=5.): try: extinct_file = cls.closest_extinction_file(longitude, latitude, toler=toler) except PypeItError as e: - msgs.error( + log.error( f'{e} You may select a specific extinction file (e.g., KPNO) for use by adding ' 'an ``extinct_file`` to your pypeit_sensfunc or pypeit_fluxcalib input file. ' 'See instructions at' 'https://pypeit.readthedocs.io/en/latest/fluxing.html#extinction-correction.' ) - msgs.info(f'Using {extinct_file} for extinction corrections.') + log.info(f'Using {extinct_file} for extinction corrections.') return cls.from_file(extinct_file) @classmethod @@ -166,7 +166,7 @@ def correction_factor(self, wave, airmass=1.): """ # Warn if extrapolation is necessary if np.amin(wave) < np.amin(self.wave) or np.amax(wave) > np.amax(self.wave): - msgs.warn( + log.warn( 'Spectral regions outside of the bounds of the atmospheric extinction curve are ' 'set to the nearest value.' ) @@ -205,14 +205,14 @@ def correct(flux, factor, ivar=None): _flux = np.asarray(flux) _factor = np.asarray(factor) if _flux.size != _factor.size: - msgs.error('Flux and correction factor arrays must have the same size.') + log.error('Flux and correction factor arrays must have the same size.') if ivar is None: return _flux * _factor _ivar = np.asarray(ivar) if _ivar.size != _flux.size: - msgs.error('Inverse variance and flux arrays must have the same size.') + log.error('Inverse variance and flux arrays must have the same size.') return _flux * _factor, _ivar * utils.inverse(_factor**2) diff --git a/pypeit/core/coadd.py b/pypeit/core/coadd.py index 426cb40f6a..cb83c66320 100644 --- a/pypeit/core/coadd.py +++ b/pypeit/core/coadd.py @@ -22,7 +22,7 @@ from astropy import stats from astropy import convolution -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import dataPaths from pypeit import utils @@ -72,10 +72,10 @@ def renormalize_errors_qa(chi, maskchi, sigma_corr, sig_range = 6.0, plt.title(title, fontsize=16, color='red') if qafile is not None: if len(qafile.split('.'))==1: - msgs.info("No fomat given for the qafile, save to PDF format.") + log.info("No fomat given for the qafile, save to PDF format.") qafile = qafile+'.pdf' plt.savefig(qafile,dpi=300) - msgs.info("Wrote QA: {:s}".format(qafile)) + log.info("Wrote QA: {:s}".format(qafile)) plt.show() plt.close() @@ -117,13 +117,13 @@ def renormalize_errors(chi, mask, clip=6.0, max_corr=5.0, title = '', debug=Fals chi2_sigrej = np.percentile(chi2[maskchi], 100.0*gauss_prob) sigma_corr = np.sqrt(chi2_sigrej) if sigma_corr < 1.0: - msgs.warning( + log.warning( f"Error renormalization found correction factor sigma_corr = {sigma_corr} < 1.\n" "Errors are overestimated so not applying correction." ) sigma_corr = 1.0 if sigma_corr > max_corr: - msgs.warning( + log.warning( f"Error renormalization found sigma_corr/sigma = {sigma_corr} > {max_corr}.\n" "Errors are severely underestimated.\nSetting correction to sigma_corr = " f"{max_corr:4.2f}" @@ -134,7 +134,7 @@ def renormalize_errors(chi, mask, clip=6.0, max_corr=5.0, title = '', debug=Fals renormalize_errors_qa(chi, maskchi, sigma_corr, title=title) else: - msgs.warning('No good pixels in error_renormalize. There are probably issues with your data') + log.warning('No good pixels in error_renormalize. There are probably issues with your data') sigma_corr = 1.0 return sigma_corr, maskchi @@ -865,7 +865,7 @@ def sn_weights(fluxes, ivars, gpms, sn_smooth_npix=None, weight_method='auto', v # Check if relative weights input if verbose: - msgs.info('Computing weights with weight_method={:s}'.format(weight_method)) + log.info('Computing weights with weight_method={:s}'.format(weight_method)) weights = [] @@ -874,7 +874,7 @@ def sn_weights(fluxes, ivars, gpms, sn_smooth_npix=None, weight_method='auto', v # Relative weights are requested, use the highest S/N spectrum as a reference ref_spec = np.argmax(sn2) if verbose: - msgs.info( + log.info( "The reference spectrum (ref_spec={0:d}) has a typical S/N = {1:.3f}".format(ref_spec, sn2[ref_spec])) # Adjust the arrays to be relative refscale = utils.inverse(sn_val[ref_spec]) @@ -909,7 +909,7 @@ def sn_weights(fluxes, ivars, gpms, sn_smooth_npix=None, weight_method='auto', v if verbose: for iexp in range(nexp): - msgs.info('Using {:s} weights for coadding, S/N '.format(weight_method_used[iexp]) + + log.info('Using {:s} weights for coadding, S/N '.format(weight_method_used[iexp]) + '= {:4.2f}, weight = {:4.2f} for {:}th exposure'.format(rms_sn[iexp], np.mean(weights[iexp]), iexp)) # Finish @@ -1021,20 +1021,20 @@ def robust_median_ratio( flux_dat_median = np.median(flux[new_mask]) if (flux_ref_median < 0.0) or (flux_dat_median < 0.0): - msgs.warning('Negative median flux found. Not rescaling') + log.warning('Negative median flux found. Not rescaling') ratio = 1.0 else: if verbose: - msgs.info(f'Used {np.sum(new_mask)} good pixels for computing median flux ratio') + log.info(f'Used {np.sum(new_mask)} good pixels for computing median flux ratio') ratio = np.fmax(np.fmin(flux_ref_median/flux_dat_median, max_factor), 1.0/max_factor) else: if (np.sum(calc_mask) <= min_good*nspec): - msgs.warning( + log.warning( f'Found only {np.sum(calc_mask)} good pixels for computing median flux ratio.\n' 'No median rescaling applied' ) if (snr_resc_med <= snr_do_not_rescale): - msgs.warning( + log.warning( f'Median flux ratio of pixels in reference spectrum {snr_resc_med} <= ' f'snr_do_not_rescale = {snr_do_not_rescale}.\n' + 'No median rescaling applied' @@ -1503,10 +1503,10 @@ def coadd_iexp_qa(wave, flux, rejivar, mask, wave_stack, flux_stack, ivar_stack, spec_plot.set_title(title, fontsize=16, color='red') if qafile is not None: if len(qafile.split('.'))==1: - msgs.info("No fomat given for the qafile, save to PDF format.") + log.info("No fomat given for the qafile, save to PDF format.") qafile = qafile+'.pdf' plt.savefig(qafile,dpi=300) - msgs.info("Wrote QA: {:s}".format(qafile)) + log.info("Wrote QA: {:s}".format(qafile)) plt.show() def weights_qa(waves, weights, gpms, title='', colors=None): @@ -1623,10 +1623,10 @@ def coadd_qa(wave, flux, ivar, nused, gpm=None, tell=None, if qafile is not None: if len(qafile.split('.'))==1: - msgs.info("No fomat given for the qafile, save to PDF format.") + log.info("No fomat given for the qafile, save to PDF format.") qafile = qafile+'.pdf' plt.savefig(qafile,dpi=300) - msgs.info("Wrote QA: {:s}".format(qafile)) + log.info("Wrote QA: {:s}".format(qafile)) plt.show() def update_errors(fluxes, ivars, masks, fluxes_stack, ivars_stack, masks_stack, @@ -1876,7 +1876,7 @@ def spec_reject_comb(wave_grid, wave_grid_mid, waves_list, fluxes_list, ivars_li iter += 1 if (iter == maxiter_reject) & (maxiter_reject != 0): - msgs.warning('Maximum number of iterations maxiter={:}'.format(maxiter_reject) + ' reached in spec_reject_comb') + log.warning('Maximum number of iterations maxiter={:}'.format(maxiter_reject) + ' reached in spec_reject_comb') out_gpms = np.copy(this_gpms) out_gpms_list = utils.array_to_explist(out_gpms, nspec_list=nspec_list) @@ -1888,7 +1888,7 @@ def spec_reject_comb(wave_grid, wave_grid_mid, waves_list, fluxes_list, ivars_li if verbose: for iexp in range(nexp): # nrej = pixels that are now masked that were previously good - msgs.info("Rejected {:d} pixels in exposure {:d}/{:d}".format(nrej[iexp], iexp, nexp)) + log.info("Rejected {:d} pixels in exposure {:d}/{:d}".format(nrej[iexp], iexp, nexp)) # Compute the final stack using this outmask wave_stack, flux_stack, ivar_stack, gpm_stack, nused = compute_stack( @@ -2338,7 +2338,7 @@ def multi_combspec(waves, fluxes, ivars, masks, sn_smooth_npix=None, # This is the effective good number of spectral pixels in the stack nspec_eff = np.sum([np.sum(wave > 1.0) for wave in waves]) / nexp sn_smooth_npix = int(np.round(0.1*nspec_eff)) - msgs.info('Using a sn_smooth_npix={:d} to decide how to scale and weight your spectra'.format(sn_smooth_npix)) + log.info('Using a sn_smooth_npix={:d} to decide how to scale and weight your spectra'.format(sn_smooth_npix)) wave_grid_mid, wave_stack, flux_stack, ivar_stack, mask_stack = combspec( waves, fluxes,ivars, masks, wave_method=wave_method, dwave=dwave, dv=dv, dloglam=dloglam, @@ -2563,7 +2563,7 @@ def ech_combspec(waves_arr_setup, fluxes_arr_setup, ivars_arr_setup, gpms_arr_se # data shape nsetups=len(waves_arr_setup) - msgs.info(f'Number of setups to cycle through is: {nsetups}') + log.info(f'Number of setups to cycle through is: {nsetups}') if setup_ids is None: setup_ids = list(string.ascii_uppercase[:nsetups]) @@ -2595,7 +2595,7 @@ def ech_combspec(waves_arr_setup, fluxes_arr_setup, ivars_arr_setup, gpms_arr_se # ngood.append(norder*nexp) # nspec_eff = np.sum(nspec_good)/np.sum(ngood) # sn_smooth_npix = int(np.round(0.1 * nspec_eff)) - # msgs.info('Using a sn_smooth_pix={:d} to decide how to scale and weight your spectra'.format(sn_smooth_npix)) + # log.info('Using a sn_smooth_pix={:d} to decide how to scale and weight your spectra'.format(sn_smooth_npix)) # Create the setup lists waves_setup_list = [utils.echarr_to_echlist(wave)[0] for wave in waves_arr_setup] @@ -2613,7 +2613,7 @@ def ech_combspec(waves_arr_setup, fluxes_arr_setup, ivars_arr_setup, gpms_arr_se wave_grid_min=wave_grid_min, wave_grid_max=wave_grid_max, dwave=dwave, dv=dv, dloglam=dloglam, spec_samp_fact=spec_samp_fact) - msgs.info(f'The shape of the giant wave grid here is: {np.shape(wave_grid)}') + log.info(f'The shape of the giant wave grid here is: {np.shape(wave_grid)}') # Evaluate the sn_weights. This is done once at the beginning weights = [] rms_sn_setup_list = [] @@ -2745,7 +2745,7 @@ def ech_combspec(waves_arr_setup, fluxes_arr_setup, ivars_arr_setup, gpms_arr_se # if the wavelength grid is non-monotonic, resample onto a loglam grid wave_grid_diff_ord = np.diff(wave_grid_ord) if np.any(wave_grid_diff_ord < 0): - msgs.warning(f'This order ({iord}) has a non-monotonic wavelength solution. Resampling now: ') + log.warning(f'This order ({iord}) has a non-monotonic wavelength solution. Resampling now: ') wave_grid_ord = np.linspace(np.min(wave_grid_ord), np.max(wave_grid_ord), len(wave_grid_ord)) wave_grid_diff_ord = np.diff(wave_grid_ord) @@ -2810,7 +2810,7 @@ def ech_combspec(waves_arr_setup, fluxes_arr_setup, ivars_arr_setup, gpms_arr_se # QA for individual exposures for iexp in range(nexps[isetup]): # plot the residual distribution - msgs.info('QA plots for exposure {:} with new_sigma = {:}'.format(iexp, sigma_corrs_2d_exps[iexp])) + log.info('QA plots for exposure {:} with new_sigma = {:}'.format(iexp, sigma_corrs_2d_exps[iexp])) # plot the residual distribution for each exposure title_renorm = 'ech_combspec: Error distribution about stack for exposure {:d}/{:d} for setup={:s}'.format(iexp, nexps[isetup], setup_ids[isetup]) renormalize_errors_qa(outchi_2d_exps[:, iexp], gpm_chi_2d_exps[:, iexp], sigma_corrs_2d_exps[iexp], @@ -2883,14 +2883,14 @@ def get_wave_ind(wave_grid, wave_min, wave_max): diff[diff > 0] = np.inf if not np.any(diff < 0): ind_lower = 0 - msgs.warning('Your wave grid does not extend blue enough. Taking bluest point') + log.warning('Your wave grid does not extend blue enough. Taking bluest point') else: ind_lower = np.argmin(np.abs(diff)) diff = wave_max - wave_grid diff[diff > 0] = np.inf if not np.any(diff < 0): ind_upper = wave_grid.size-1 - msgs.warning('Your wave grid does not extend red enough. Taking reddest point') + log.warning('Your wave grid does not extend red enough. Taking reddest point') else: ind_upper = np.argmin(np.abs(diff)) @@ -3137,7 +3137,7 @@ def compute_coadd2d(ref_trace_stack, sciimg_stack, sciivar_stack, skymodel_stack nimgs =len(sciimg_stack) if weights is None: - msgs.info('No weights were provided. Using uniform weights.') + log.info('No weights were provided. Using uniform weights.') weights = (np.ones(nimgs)/float(nimgs)).tolist() shape_list = [sciimg.shape for sciimg in sciimg_stack] diff --git a/pypeit/core/collate.py b/pypeit/core/collate.py index ffb6dc3dd0..1acfebc507 100644 --- a/pypeit/core/collate.py +++ b/pypeit/core/collate.py @@ -20,7 +20,7 @@ from pypeit import specobjs from pypeit.spectrographs.util import load_spectrograph -from pypeit import msgs +from pypeit import log from pypeit import PypeItError diff --git a/pypeit/core/combine.py b/pypeit/core/combine.py index f58066ff64..44a79d0d39 100644 --- a/pypeit/core/combine.py +++ b/pypeit/core/combine.py @@ -6,7 +6,7 @@ from astropy import stats -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import utils @@ -109,7 +109,7 @@ def weighted_combine(weights, sci_list, var_list, inmask_stack, if nimgs == 1: # If only one image is passed in, simply return the input lists of images, but reshaped # to be (nspec, nspat) - msgs.warning('Cannot combine a single image. Returning input images') + log.warning('Cannot combine a single image. Returning input images') sci_list_out = [] for sci_stack in sci_list: sci_list_out.append(sci_stack.reshape(img_shape)) @@ -145,7 +145,7 @@ def weighted_combine(weights, sci_list, var_list, inmask_stack, mask_stack = np.logical_not(data_clipped.mask) # mask_stack = True are good values else: if sigma_clip and nimgs < 3: - msgs.warning('Sigma clipping requested, but you cannot sigma clip with less than 3 ' + log.warning('Sigma clipping requested, but you cannot sigma clip with less than 3 ' 'images. Proceeding without sigma clipping') mask_stack = inmask_stack # mask_stack = True are good values diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index 3e849b95d5..42db790f60 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -14,7 +14,7 @@ from scipy.interpolate import interp1d import numpy as np -from pypeit import msgs, utils, specobj, specobjs +from pypeit import log, utils, specobj, specobjs from pypeit import PypeItError from pypeit.core import coadd, extract, flux_calib @@ -176,7 +176,7 @@ def correct_grating_shift(wave_eval, wave_curr, spl_curr, wave_ref, spl_ref, ord Returns: `numpy.ndarray`_: The grating correction to apply """ - msgs.info("Calculating the grating correction") + log.info("Calculating the grating correction") # Calculate the grating correction grat_corr_tmp = spl_curr(wave_eval) / spl_ref(wave_eval) # Determine the useful overlapping wavelength range @@ -237,7 +237,7 @@ def extract_point_source(wave, flxcube, ivarcube, bpmcube, wcscube, exptime, whitelight_range = [np.min(wave), np.max(wave)] # Generate a spec1d object to hold the extracted spectrum - msgs.info("Initialising a PypeIt SpecObj spec1d file") + log.info("Initialising a PypeIt SpecObj spec1d file") sobj = specobj.SpecObj(pypeline, "DET01", SLITID=0) sobj.RA = wcscube.wcs.crval[0] sobj.DEC = wcscube.wcs.crval[1] @@ -264,7 +264,7 @@ def extract_point_source(wave, flxcube, ivarcube, bpmcube, wcscube, exptime, _varcube = utils.inverse(_ivarcube) # Generate a whitelight image, and fit a 2D Gaussian to estimate centroid and width - msgs.info("Making white light image") + log.info("Making white light image") wl_img = make_whitelight_fromcube(_flxcube, bpmcube, wave=wave, wavemin=whitelight_range[0], wavemax=whitelight_range[1]) popt, pcov, model = fitGaussian2D(wl_img, norm=True) if boxcar_radius is None: @@ -274,7 +274,7 @@ def extract_point_source(wave, flxcube, ivarcube, bpmcube, wcscube, exptime, # Set the user-defined radius wid = boxcar_radius / np.sqrt(arcsecSQ) # Set the radius of the extraction boxcar for the sky determination - msgs.info("Using a boxcar radius of {:0.2f} arcsec".format(wid*np.sqrt(arcsecSQ))) + log.info("Using a boxcar radius of {:0.2f} arcsec".format(wid*np.sqrt(arcsecSQ))) widsky = 2 * wid # Setup the coordinates of the mask @@ -283,7 +283,7 @@ def extract_point_source(wave, flxcube, ivarcube, bpmcube, wcscube, exptime, xx, yy = np.meshgrid(x, y, indexing='ij') # Generate a mask - msgs.info("Generating an object mask") + log.info("Generating an object mask") newshape = (numxx * subpixel, numyy * subpixel) mask = np.zeros(newshape) ww = np.where((np.sqrt((xx - popt[1]) ** 2 + (yy - popt[2]) ** 2) < wid)) @@ -291,7 +291,7 @@ def extract_point_source(wave, flxcube, ivarcube, bpmcube, wcscube, exptime, mask = utils.rebinND(mask, (numxx, numyy)).reshape(numxx, numyy, 1) # Generate a sky mask - msgs.info("Generating a sky mask") + log.info("Generating a sky mask") newshape = (numxx * subpixel, numyy * subpixel) smask = np.zeros(newshape) ww = np.where((np.sqrt((xx - popt[1]) ** 2 + (yy - popt[2]) ** 2) < widsky)) @@ -300,7 +300,7 @@ def extract_point_source(wave, flxcube, ivarcube, bpmcube, wcscube, exptime, # Subtract off the object mask region, so that we just have an annulus around the object smask -= mask - msgs.info("Subtracting the residual sky") + log.info("Subtracting the residual sky") # Subtract the residual sky from the datacube skymask = np.logical_not(bpmcube) * smask skycube = _flxcube * skymask @@ -312,7 +312,7 @@ def extract_point_source(wave, flxcube, ivarcube, bpmcube, wcscube, exptime, sky_val = np.sum(wl_img[:, :, np.newaxis] * smask) / np.sum(smask) wl_img -= sky_val - msgs.info("Extracting a boxcar spectrum of datacube") + log.info("Extracting a boxcar spectrum of datacube") # Construct an image that contains the fraction of flux included in the # boxcar extraction at each wavelength interval norm_flux = wl_img[:,:,np.newaxis] * mask @@ -343,7 +343,7 @@ def extract_point_source(wave, flxcube, ivarcube, bpmcube, wcscube, exptime, sobj.S2N = np.median(box_flux * np.sqrt(utils.inverse(box_var))) # Now do the OPTIMAL extraction - msgs.info("Extracting an optimal spectrum of datacube") + log.info("Extracting an optimal spectrum of datacube") # First, we need to rearrange the datacube and inverse variance cube into a 2D array. # The 3D -> 2D conversion is done so that there is a spectral and spatial dimension, # and the brightest white light pixel is transformed to be at the centre column of the 2D @@ -352,7 +352,7 @@ def extract_point_source(wave, flxcube, ivarcube, bpmcube, wcscube, exptime, # can be applied. optkern = wl_img if optfwhm is not None: - msgs.info("Generating a 2D Gaussian kernel for the optimal extraction, with FWHM = {:.2f} pixels".format(optfwhm)) + log.info("Generating a 2D Gaussian kernel for the optimal extraction, with FWHM = {:.2f} pixels".format(optfwhm)) x = np.linspace(0, wl_img.shape[0] - 1, wl_img.shape[0]) y = np.linspace(0, wl_img.shape[1] - 1, wl_img.shape[1]) xx, yy = np.meshgrid(x, y, indexing='ij') @@ -433,7 +433,7 @@ def make_good_skymask(slitimg, tilts): Returns: `numpy.ndarray`_: A mask of the good sky pixels (True = good) """ - msgs.info("Masking edge pixels where the sky model is poor") + log.info("Masking edge pixels where the sky model is poor") # Initialise the GPM gpm = np.zeros(slitimg.shape, dtype=bool) # Find unique slits @@ -546,7 +546,7 @@ def get_whitelight_pixels(all_wave, all_slitid, min_wl, max_wl): ww = np.where((_all_wave[ff] > min_wl) & (_all_wave[ff] < max_wl)) out_slitid[ff][ww] = _all_slitid[ff][ww] else: - msgs.warning("Datacubes do not completely overlap in wavelength.") + log.warning("Datacubes do not completely overlap in wavelength.") out_slitid = _all_slitid min_wl, max_wl = None, None for ff in range(numframes): @@ -589,7 +589,7 @@ def get_whitelight_range(wavemin, wavemax, wl_range): wlrng = [wavemin, wavemax] if wl_range[0] is not None: if wl_range[0] < wavemin: - msgs.warning( + log.warning( f"The user-specified minimum wavelength ({wl_range[0]:.2f}) to use for the white " f"light\nimages is lower than the recommended value ({wavemin:.2f}),\n" "which ensures that all spaxels cover the same wavelength range." @@ -597,13 +597,13 @@ def get_whitelight_range(wavemin, wavemax, wl_range): wlrng[0] = wl_range[0] if wl_range[1] is not None: if wl_range[1] > wavemax: - msgs.warning( + log.warning( f"The user-specified maximum wavelength ({wl_range[1]:.2f}) to use for the white " "light\nimages is greater than the recommended value ({wavemax:.2f}),\n" "which ensures that all spaxels cover the same wavelength range." ) wlrng[1] = wl_range[1] - msgs.info("The white light images will cover the wavelength range: {0:.2f}A - {1:.2f}A".format(wlrng[0], wlrng[1])) + log.info("The white light images will cover the wavelength range: {0:.2f}A - {1:.2f}A".format(wlrng[0], wlrng[1])) return wlrng @@ -718,7 +718,7 @@ def align_user_offsets(ifu_ra, ifu_dec, ra_offset, dec_offset): # Apply the shift out_ra_offsets[ff] = ref_shift_ra[ff] + ra_offset[ff] out_dec_offsets[ff] = ref_shift_dec[ff] + dec_offset[ff] - msgs.info( + log.info( f"Spatial shift of cube #{ff + 1}:\nRA, DEC (arcsec) = {ra_offset[ff]*3600.0:+0.3f} " f"E, {dec_offset[ff]*3600.0:+0.3f} N" ) @@ -755,28 +755,28 @@ def set_voxel_sampling(spatscale, specscale, dspat=None, dwv=None): # Make sure all frames have consistent pixel scales ratio = (spatscale[:, 0] - spatscale[0, 0]) / spatscale[0, 0] if np.any(np.abs(ratio) > 1E-4): - msgs.warning("The pixel scales of all input frames are not the same!") + log.warning("The pixel scales of all input frames are not the same!") spatstr = ", ".join(["{0:.6f}".format(ss) for ss in spatscale[:,0]*3600.0]) - msgs.info("Pixel scales of all input frames:\n" + spatstr + "arcseconds") + log.info("Pixel scales of all input frames:\n" + spatstr + "arcseconds") # Make sure all frames have consistent slicer scales ratio = (spatscale[:, 1] - spatscale[0, 1]) / spatscale[0, 1] if np.any(np.abs(ratio) > 1E-4): - msgs.warning("The slicer scales of all input frames are not the same!") + log.warning("The slicer scales of all input frames are not the same!") spatstr = ", ".join(["{0:.6f}".format(ss) for ss in spatscale[:,1]*3600.0]) - msgs.info("Slicer scales of all input frames:\n" + spatstr + "arcseconds") + log.info("Slicer scales of all input frames:\n" + spatstr + "arcseconds") # Make sure all frames have consistent wavelength sampling ratio = (specscale - specscale[0]) / specscale[0] if np.any(np.abs(ratio) > 1E-2): - msgs.warning("The wavelength samplings of the input frames are not the same!") + log.warning("The wavelength samplings of the input frames are not the same!") specstr = ", ".join(["{0:.6f}".format(ss) for ss in specscale]) - msgs.info("Wavelength samplings of all input frames:\n" + specstr + "Angstrom") + log.info("Wavelength samplings of all input frames:\n" + specstr + "Angstrom") # If the user has not specified the spatial scale, then set it appropriately now to the largest spatial scale _dspat = np.max(spatscale) if dspat is None else dspat - msgs.info("Adopting a square pixel spatial scale of {0:f} arcsec".format(3600.0 * _dspat)) + log.info("Adopting a square pixel spatial scale of {0:f} arcsec".format(3600.0 * _dspat)) # If the user has not specified the spectral sampling, then set it now to the largest value _dwv = np.max(specscale) if dwv is None else dwv - msgs.info("Adopting a wavelength sampling of {0:f} Angstrom".format(_dwv)) + log.info("Adopting a wavelength sampling of {0:f} Angstrom".format(_dwv)) return _dspat, _dwv @@ -1005,7 +1005,7 @@ def create_wcs(raImg, decImg, waveImg, slitid_img_gpm, dspat, dwave, numra, numdec = reference_image.shape cubewcs = generate_WCS(coord_min, coord_dlt, numra, equinox=equinox, name=specname) - msgs.info( + log.info( f'\n{"-"*40}' "\nParameters of the WCS:" f"\nRA min = {coord_min[0]}" @@ -1046,7 +1046,7 @@ def generate_WCS(crval, cdelt, numra, equinox=2000.0, name="PYP_SPEC"): `astropy.wcs.WCS`_ : astropy WCS to be used for the combined cube """ # Create a new WCS object. - msgs.info("Generating WCS") + log.info("Generating WCS") w = wcs.WCS(naxis=3) w.wcs.equinox = equinox w.wcs.name = name @@ -1301,7 +1301,7 @@ def compute_weights(raImg, decImg, waveImg, sciImg, ivarImg, slitidImg, containing the optimal weights of each pixel for all frames, with shape (nspec, nspat). """ - msgs.info("Calculating the optimal weights of each pixel") + log.info("Calculating the optimal weights of each pixel") # Check the inputs for combinations of lists or not, and then determine the number of frames _raImg, _decImg, _waveImg, _sciImg, _ivarImg, _slitidImg, \ _all_wcs, _all_tilts, _all_slits, _all_align, _all_dar, _ra_offsets, _dec_offsets = \ @@ -1311,7 +1311,7 @@ def compute_weights(raImg, decImg, waveImg, sciImg, ivarImg, slitidImg, # If there's only one frame, use uniform weighting if numframes == 1: - msgs.warning("Only one frame provided. Using uniform weighting.") + log.warning("Only one frame provided. Using uniform weighting.") return np.ones_like(sciImg) # Check the WCS bounds @@ -1326,7 +1326,7 @@ def compute_weights(raImg, decImg, waveImg, sciImg, ivarImg, slitidImg, # their are hot pixels in the white light image, which there are plenty of since the edges of the slits are very # poorly behaved. #idx_max = np.unravel_index(np.argmax(whitelight_img), whitelight_img.shape) - msgs.info("Highest S/N object located at spaxel (x, y) = {0:d}, {1:d}".format(idx_max[0], idx_max[1])) + log.info("Highest S/N object located at spaxel (x, y) = {0:d}, {1:d}".format(idx_max[0], idx_max[1])) # Make the bin edges to be at +/- 1 pixels around the maximum (i.e. summing 9 pixels total) numwav = int((_wave_max - _wave_min) / dwv) @@ -1350,7 +1350,7 @@ def compute_weights(raImg, decImg, waveImg, sciImg, ivarImg, slitidImg, flux_stack = np.zeros((numwav, numframes)) ivar_stack = np.zeros((numwav, numframes)) for ff in range(numframes): - msgs.info("Extracting spectrum of highest S/N detection from frame {0:d}/{1:d}".format(ff + 1, numframes)) + log.info("Extracting spectrum of highest S/N detection from frame {0:d}/{1:d}".format(ff + 1, numframes)) flxcube, sigcube, bpmcube, wave = \ generate_cube_subpixel(whitelightWCS, bins, _sciImg[ff], _ivarImg[ff], _waveImg[ff], _slitidImg[ff], np.ones(_sciImg[ff].shape), _all_wcs[ff], @@ -1381,7 +1381,7 @@ def compute_weights(raImg, decImg, waveImg, sciImg, ivarImg, slitidImg, ww = (slitidImg[ff] > 0) all_wghts[ff][ww] = interp1d(wave_spec, weights[ff], kind='cubic', bounds_error=False, fill_value="extrapolate")(waveImg[ff][ww]) - msgs.info("Optimal weighting complete") + log.info("Optimal weighting complete") return all_wghts @@ -1482,7 +1482,7 @@ def generate_image_subpixel(image_wcs, bins, sciImg, ivarImg, waveImg, slitid_im all_wl_imgs = np.zeros((numra, numdec, numframes)) # Loop through all frames and generate white light images for fr in range(numframes): - msgs.info(f"Creating image {fr + 1}/{numframes}") + log.info(f"Creating image {fr + 1}/{numframes}") # Subpixellate img, _, _ = subpixellate(image_wcs, bins, _sciImg[fr], _ivarImg[fr], _waveImg[fr], _slitid_img_gpm[fr], _wghtImg[fr], _all_wcs[fr], _tilts[fr], _slits[fr], _astrom_trans[fr], _all_dar[fr], _ra_offset[fr], _dec_offset[fr], @@ -1622,12 +1622,12 @@ def generate_cube_subpixel(output_wcs, bins, sciImg, ivarImg, waveImg, slitid_im whitelight_range[0] = wave[0] if whitelight_range[1] is None: whitelight_range[1] = wave[-1] - msgs.info("White light image covers the wavelength range {0:.2f} A - {1:.2f} A".format( + log.info("White light image covers the wavelength range {0:.2f} A - {1:.2f} A".format( whitelight_range[0], whitelight_range[1])) # Get the output filename for the white light image out_whitelight = get_output_whitelight_filename(outfile) whitelight_img = make_whitelight_fromcube(flxcube, bpmcube, wave=wave, wavemin=whitelight_range[0], wavemax=whitelight_range[1]) - msgs.info("Saving white light image as: {0:s}".format(out_whitelight)) + log.info("Saving white light image as: {0:s}".format(out_whitelight)) img_hdu = fits.PrimaryHDU(whitelight_img.T, header=whitelight_wcs.to_header()) img_hdu.writeto(out_whitelight, overwrite=overwrite) @@ -1767,9 +1767,9 @@ def subpixellate(output_wcs, bins, sciImg, ivarImg, waveImg, slitid_img_gpm, wgh # Loop through all slits for sl, spatid in enumerate(this_slits.spat_id): if numframes == 1: - msgs.info(f"Resampling slit {sl + 1}/{this_slits.nslits}") + log.info(f"Resampling slit {sl + 1}/{this_slits.nslits}") else: - msgs.info(f"Resampling slit {sl + 1}/{this_slits.nslits} of frame {fr + 1}/{numframes}") + log.info(f"Resampling slit {sl + 1}/{this_slits.nslits} of frame {fr + 1}/{numframes}") # Find the pixels on this slit this_sl = np.where(this_spatid == spatid) wpix = (this_specpos[this_sl], this_spatpos[this_sl]) @@ -1801,7 +1801,7 @@ def subpixellate(output_wcs, bins, sciImg, ivarImg, waveImg, slitid_img_gpm, wgh for ss in range(slice_subpixel): if slice_subpixel > 1: # Only print this if there are multiple subslices - msgs.info(f"Resampling subslice {ss+1}/{slice_subpixel}") + log.info(f"Resampling subslice {ss+1}/{slice_subpixel}") # Generate an RA/Dec image for this subslice raimg, decimg, minmax = this_slits.get_radec_image(this_wcs, this_astrom_trans, this_tilts, slit_compute=sl, slice_offset=slice_offs[ss]) @@ -1826,7 +1826,7 @@ def subpixellate(output_wcs, bins, sciImg, ivarImg, waveImg, slitid_img_gpm, wgh if num_all_subpixels == 1 or skip_subpix_weights: subpix_wght = 1.0 else: - msgs.info("Preparing subpixel weights") + log.info("Preparing subpixel weights") vox_index = np.floor(outshape * (vox_coord - binrng[:,0].reshape((1, 1, 3))) / (binrng[:,1] - binrng[:,0]).reshape((1, 1, 3))).astype(int) # Convert to a unique index diff --git a/pypeit/core/extract.py b/pypeit/core/extract.py index 9b1e3bfcb6..1eb7567440 100644 --- a/pypeit/core/extract.py +++ b/pypeit/core/extract.py @@ -14,7 +14,7 @@ from IPython import embed -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import utils from pypeit import bspline @@ -133,7 +133,7 @@ def extract_optimal(imgminsky, ivar, mask, waveimg, skyimg, thismask, oprof, # Exit gracefully if we have no positive object profiles, since that means something was wrong with object fitting if not np.any(oprof > 0.0): - msgs.warning('Object profile is zero everywhere. This aperture is junk.') + log.warning('Object profile is zero everywhere. This aperture is junk.') return mincol = np.min(ispat) @@ -756,11 +756,11 @@ def return_gaussian(sigma_x, norm_obj, fwhm, med_sn2, obj_string, profile_model = np.exp(-0.5*sigma_x**2)/np.sqrt(2.0 * np.pi)*(sigma_x ** 2 < 25.) info_string = "FWHM=" + "{:6.2f}".format(fwhm) + ", S/N=" + "{:8.3f}".format(np.sqrt(med_sn2)) title_string = obj_string + ', ' + info_string - msgs.info(title_string) + log.info(title_string) inf = np.isfinite(profile_model) == False ninf = np.sum(inf) if ninf != 0: - msgs.warning("Nan pixel values in object profile... setting them to zero") + log.warning("Nan pixel values in object profile... setting them to zero") profile_model[inf] = 0.0 if show_profile: qa_fit_profile(sigma_x, norm_obj, profile_model, title = title_string, l_limit = l_limit, r_limit = r_limit, @@ -893,7 +893,7 @@ def fit_profile(image, ivar, waveimg, thismask, spat_img, trace_in, wave, eligible_pixels = np.sum((wave >= wave_min) & (wave <= wave_max)) good_pix_frac = 0.05 if (np.sum(indsp) < good_pix_frac*eligible_pixels) or (eligible_pixels == 0): - msgs.warning( + log.warning( 'There are no pixels eligible to be fit for the object profile.\nThere is likely an ' f'issue in local_skysub_extract. Returning a Gassuain with fwhm={thisfwhm:5.3f}' ) @@ -910,7 +910,7 @@ def fit_profile(image, ivar, waveimg, thismask, spat_img, trace_in, wave, try: cont_flux, _ = c_answer.value(wave[indsp]) except: - msgs.warning( + log.warning( 'Problem estimating S/N ratio of spectrum\nThere is likely an issue in ' f'local_skysub_extract. Returning a Gassuain with fwhm={thisfwhm:5.3f}' ) @@ -960,9 +960,9 @@ def fit_profile(image, ivar, waveimg, thismask, spat_img, trace_in, wave, bounds_error=False,fill_value = 'extrapolate') sn2_img[totmask] = sn2_interp(waveimg[totmask]) else: - msgs.warning('All pixels are masked') + log.warning('All pixels are masked') - msgs.info('sqrt(med(S/N)^2) = ' + "{:5.2f}".format(np.sqrt(med_sn2))) + log.info('sqrt(med(S/N)^2) = ' + "{:5.2f}".format(np.sqrt(med_sn2))) # TODO -- JFH document this if(med_sn2 <= 2.0): @@ -1010,13 +1010,13 @@ def fit_profile(image, ivar, waveimg, thismask, spat_img, trace_in, wave, xtemp = (np.cumsum(np.outer(4.0 + np.sqrt(np.fmax(sn2_1, 0.0)),np.ones(nspat)))).reshape((nspec,nspat)) xtemp = xtemp/xtemp.max() - msgs.info("Gaussian vs b-spline of width " + "{:6.2f}".format(thisfwhm) + " pixels") + log.info("Gaussian vs b-spline of width " + "{:6.2f}".format(thisfwhm) + " pixels") area = 1.0 # If we have too few pixels to fit a profile or S/N is too low, just use a Gaussian profile if((ngood < 10) or (med_sn2 < sn_gauss**2) or (gauss is True)): - msgs.info("Too few good pixels or S/N <" + "{:5.1f}".format(sn_gauss) + " or gauss flag set") - msgs.info("Returning Gaussian profile") + log.info("Too few good pixels or S/N <" + "{:5.1f}".format(sn_gauss) + " or gauss flag set") + log.info("Returning Gaussian profile") profile_model = return_gaussian(sigma_x, norm_obj, thisfwhm, med_sn2, obj_string,show_profile,ind=good,xtrunc=7.0) return profile_model, trace_in, fwhmfit, med_sn2 @@ -1031,7 +1031,7 @@ def fit_profile(image, ivar, waveimg, thismask, spat_img, trace_in, wave, max_sigma = np.fmin(sigma_x.flat[good].max(), (abs_sigma)) nb = (np.arcsinh(abs_sigma)/sinh_space).astype(int) + 1 else: - msgs.info("Using prof_nsigma= " + "{:6.2f}".format(prof_nsigma) + " for extended/bright objects") + log.info("Using prof_nsigma= " + "{:6.2f}".format(prof_nsigma) + " for extended/bright objects") nb = np.round(prof_nsigma > 10) max_sigma = prof_nsigma min_sigma = -1*prof_nsigma @@ -1064,7 +1064,7 @@ def fit_profile(image, ivar, waveimg, thismask, spat_img, trace_in, wave, # TODO I don't follow the logic behind this statement but I'm leaving it for now. If the median is large it is used, otherwise we user zero??? if (np.abs(median_fit) > 0.01): - msgs.info("Median flux level in profile is not zero: median = " + "{:7.4f}".format(median_fit)) + log.info("Median flux level in profile is not zero: median = " + "{:7.4f}".format(median_fit)) else: median_fit = 0.0 @@ -1074,7 +1074,7 @@ def fit_profile(image, ivar, waveimg, thismask, spat_img, trace_in, wave, min_level = peak*np.exp(-0.5*limit**2) bspline_fwhm = (rwhm - lwhm)*thisfwhm/2.3548 - msgs.info("Bspline FWHM: " + "{:7.4f}".format(bspline_fwhm) + ", compared to initial object finding FWHM: " + "{:7.4f}".format(thisfwhm) ) + log.info("Bspline FWHM: " + "{:7.4f}".format(bspline_fwhm) + ", compared to initial object finding FWHM: " + "{:7.4f}".format(thisfwhm) ) sigma = sigma * (rwhm-lwhm)/2.3548 limit = limit * (rwhm-lwhm)/2.3548 @@ -1094,7 +1094,7 @@ def fit_profile(image, ivar, waveimg, thismask, spat_img, trace_in, wave, else: r_limit = max_sigma - msgs.info("Trace limits: limit = " + "{:7.4f}".format(limit) + ", min_level = " + "{:7.4f}".format(min_level) + + log.info("Trace limits: limit = " + "{:7.4f}".format(limit) + ", min_level = " + "{:7.4f}".format(min_level) + ", l_limit = " + "{:7.4f}".format(l_limit) + ", r_limit = " + "{:7.4f}".format(r_limit)) # Just grab the data points within the limits @@ -1104,8 +1104,8 @@ def fit_profile(image, ivar, waveimg, thismask, spat_img, trace_in, wave, # If we have too few pixels after this step, then again just use a Gaussian profile and return. if(ninside < 10): - msgs.info("Too few pixels inside l_limit and r_limit") - msgs.info("Returning Gaussian profile") + log.info("Too few pixels inside l_limit and r_limit") + log.info("Returning Gaussian profile") profile_model = return_gaussian(sigma_x, norm_obj, bspline_fwhm, med_sn2, obj_string,show_profile, ind=good, l_limit=l_limit, r_limit=r_limit, xlim=7.0) return (profile_model, trace_in, fwhmfit, med_sn2) @@ -1137,8 +1137,8 @@ def fit_profile(image, ivar, waveimg, thismask, spat_img, trace_in, wave, maxiter=1, kwargs_bspline={'nbkpts':nbkpts}) # Check to see if the mode fit failed, if so punt and return a Gaussian if not np.any(mode_shift_out[1]): - msgs.info('B-spline fit to trace correction failed for fit to ninside = {:}'.format(ninside) + ' pixels') - msgs.info("Returning Gaussian profile") + log.info('B-spline fit to trace correction failed for fit to ninside = {:}'.format(ninside) + ' pixels') + log.info("Returning Gaussian profile") profile_model = return_gaussian(sigma_x, norm_obj, bspline_fwhm, med_sn2, obj_string, show_profile, ind=good, l_limit=l_limit, r_limit=r_limit, xlim=7.0) return (profile_model, trace_in, fwhmfit, med_sn2) @@ -1160,8 +1160,8 @@ def fit_profile(image, ivar, waveimg, thismask, spat_img, trace_in, wave, norm_ivar.flat[inside], profile_basis, maxiter=1, fullbkpt=mode_shift_set.breakpoints) if not np.any(mode_stretch_out[1]): - msgs.info('B-spline fit to width correction failed for fit to ninside = {:}'.format(ninside) + ' pixels') - msgs.info("Returning Gaussian profile") + log.info('B-spline fit to width correction failed for fit to ninside = {:}'.format(ninside) + ' pixels') + log.info("Returning Gaussian profile") profile_model = return_gaussian(sigma_x, norm_obj, bspline_fwhm, med_sn2, obj_string, show_profile,ind=good, l_limit=l_limit, r_limit=r_limit, xlim=7.0) return (profile_model, trace_in, fwhmfit, med_sn2) @@ -1177,9 +1177,9 @@ def fit_profile(image, ivar, waveimg, thismask, spat_img, trace_in, wave, ratio_20 = (h2 / (h0 + (h0 == 0.0))) sigma_factor = 0.3 * ratio_20 / (1.0 + np.abs(ratio_20)) - msgs.info("Iteration# " + "{:3d}".format(iiter)) - msgs.info("Median abs value of trace correction = " + "{:8.3f}".format(np.median(np.abs(delta_trace_corr)))) - msgs.info("Median abs value of width correction = " + "{:8.3f}".format(np.median(np.abs(sigma_factor)))) + log.info("Iteration# " + "{:3d}".format(iiter)) + log.info("Median abs value of trace correction = " + "{:8.3f}".format(np.median(np.abs(delta_trace_corr)))) + log.info("Median abs value of width correction = " + "{:8.3f}".format(np.median(np.abs(sigma_factor)))) sigma = sigma*(1.0 + sigma_factor) area = area * h0/(1.0 + sigma_factor) @@ -1197,8 +1197,8 @@ def fit_profile(image, ivar, waveimg, thismask, spat_img, trace_in, wave, norm_ivar.flat[inside[ss]],pb[ss], nord=4, bkpt=bkpt[keep], maxiter=2) if not np.any(bset_out[1]): - msgs.info('B-spline to profile in trace and width correction loop failed for fit to ninside = {:}'.format(ninside) + ' pixels') - msgs.info("Returning Gaussian profile") + log.info('B-spline to profile in trace and width correction loop failed for fit to ninside = {:}'.format(ninside) + ' pixels') + log.info("Returning Gaussian profile") profile_model = return_gaussian(sigma_x, norm_obj, bspline_fwhm, med_sn2, obj_string, show_profile, ind=good, l_limit=l_limit,r_limit=r_limit, xlim=7.0) return (profile_model, trace_in, fwhmfit, med_sn2) @@ -1307,21 +1307,21 @@ def fit_profile(image, ivar, waveimg, thismask, spat_img, trace_in, wave, chi_med = np.median(res_mode[chi_good]**2) chi_zero = np.median(norm_obj.flat[ss[inside]]**2*norm_ivar.flat[ss[inside]]) - msgs.info("-------------------- Results of Profile Fit --------------------") - msgs.info(" min(fwhmfit)={:5.2f}".format(fwhmfit.min()) + + log.info("-------------------- Results of Profile Fit --------------------") + log.info(" min(fwhmfit)={:5.2f}".format(fwhmfit.min()) + " max(fwhmfit)={:5.2f}".format(fwhmfit.max()) + " median(chi^2)={:5.2f}".format(chi_med) + " nbkpts={:2d}".format(bkpt.size)) - msgs.info("-----------------------------------------------------------------") + log.info("-----------------------------------------------------------------") nxinf = np.sum(np.isfinite(xnew) == False) if (nxinf != 0): - msgs.warning("Nan pixel values in trace correction") - msgs.warning("Returning original trace....") + log.warning("Nan pixel values in trace correction") + log.warning("Returning original trace....") xnew = trace_in inf = np.isfinite(profile_model) == False ninf = np.sum(inf) if (ninf != 0): - msgs.warning("Nan pixel values in object profile... setting them to zero") + log.warning("Nan pixel values in object profile... setting them to zero") profile_model[inf] = 0.0 # Normalize profile norm = np.outer(np.sum(profile_model, 1), np.ones(nspat)) diff --git a/pypeit/core/findobj_skymask.py b/pypeit/core/findobj_skymask.py index 3a2efcac73..227eb8385f 100644 --- a/pypeit/core/findobj_skymask.py +++ b/pypeit/core/findobj_skymask.py @@ -13,7 +13,7 @@ import astropy.stats from astropy import table -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import utils from pypeit import specobj @@ -69,7 +69,7 @@ def create_skymask(sobjs, thismask, slit_left, slit_righ, box_rad_pix=None, trim # Number of objects nobj = len(sobjs) if nobj == 0: - msgs.info('No objects were detected. The entire slit will be used for sky subtraction.') + log.info('No objects were detected. The entire slit will be used for sky subtraction.') return thismask[thismask] # Compute the object mask @@ -107,12 +107,12 @@ def create_skymask(sobjs, thismask, slit_left, slit_righ, box_rad_pix=None, trim spat_img = np.tile(np.arange(nspat, dtype=int), (nspec,1)) # Boxcar radius? if box_rad_pix is not None: - msgs.info("Using boxcar radius for masking") + log.info("Using boxcar radius for masking") # Loop me for iobj in range(nobj): # Create a mask for the pixels that will contribute to the object skymask_radius = box_rad_pix if box_rad_pix is not None else sobjs[iobj].FWHM - msgs.info(f"Masking around object {iobj+1} within a radius = {skymask_radius} pixels") + log.info(f"Masking around object {iobj+1} within a radius = {skymask_radius} pixels") # slit_img = np.outer(sobjs[iobj].TRACE_SPAT, np.ones(nspat)) # central trace replicated spatially slit_img = np.tile(sobjs[iobj].TRACE_SPAT, (nspat,1)).T objmask_now = thismask \ @@ -124,7 +124,7 @@ def create_skymask(sobjs, thismask, slit_left, slit_righ, box_rad_pix=None, trim # TODO: There is this hard-coded check here, and then there is a similar # check in skysub.global_skysub. Do we need both? if np.sum(skymask_fwhm)/np.sum(thismask) < 0.10: - msgs.warning('More than 90% of usable area on this slit would be masked and not used by ' + log.warning('More than 90% of usable area on this slit would be masked and not used by ' 'global sky subtraction. Something is probably wrong with object finding for ' 'this slit. Not masking object for global sky subtraction.') skymask_fwhm = np.copy(thismask) @@ -310,7 +310,7 @@ def ech_findobj_ineach_order( sobjs = specobjs.SpecObjs() for iord, iorder in enumerate(order_vec): qa_title = 'Finding objects on order # {:d}'.format(iorder) - msgs.info(qa_title) + log.info(qa_title) thisslit_gpm = slitmask == slit_spats[iord] inmask_iord = inmask & thisslit_gpm specobj_dict['SLITID'] = slit_spats[iord] @@ -418,7 +418,7 @@ def ech_fof_sobjs(sobjs:specobjs.SpecObjs, for iord in range(norders): on_order = (obj_id_init == uni_obj_id_init[iobj]) & (sobjs.ECH_ORDER == order_vec[iord]) if (np.sum(on_order) > 1): - msgs.warning( + log.warning( f'Found multiple objects in a FOF group on order iord={order_vec[iord]}\n' 'Spawning new objects to maintain a single object per order.' ) @@ -442,7 +442,7 @@ def ech_fof_sobjs(sobjs:specobjs.SpecObjs, # Finish uni_obj_id, uni_ind = np.unique(obj_id, return_index=True) nobj = len(uni_obj_id) - msgs.info('FOF matching found {:d}'.format(nobj) + ' unique objects') + log.info('FOF matching found {:d}'.format(nobj) + ' unique objects') return obj_id @@ -517,7 +517,7 @@ def ech_fill_in_orders(sobjs:specobjs.SpecObjs, # Check standard star if std_trace is not None and len(std_trace) != norders: - msgs.warning('Standard star trace does not match the number of orders in the echelle data.' + log.warning('Standard star trace does not match the number of orders in the echelle data.' ' Will use the slit edges to trace the object in the missing orders.') # For traces @@ -609,7 +609,7 @@ def ech_fill_in_orders(sobjs:specobjs.SpecObjs, on_order = (sobjs_align.ECH_OBJID == uni_obj_id[iobj]) & (sobjs_align.ECH_ORDER == this_order) num_on_order = np.sum(on_order) if num_on_order == 0: - msgs.info(f"Adding object={uni_obj_id[iobj]} to order={this_order}") + log.info(f"Adding object={uni_obj_id[iobj]} to order={this_order}") # If it is not, create a new sobjs and add to sobjs_align and assign required tags thisobj = specobj.SpecObj('Echelle', sobjs_align[0].DET, OBJTYPE=sobjs_align[0].OBJTYPE, @@ -824,18 +824,18 @@ def ech_cutobj_on_snr( iobj_keep_not_hand += 1 else: if not nperorder_constraint: - msgs.info('Purging object #{:d}'.format(iobj) + + log.info('Purging object #{:d}'.format(iobj) + ' since there are already {:d} objects automatically identified ' 'and you set nperorder={:d}'.format(iobj_keep_not_hand-1, nperorder)) else: - msgs.info('Purging object #{:d}'.format(iobj) + ' which does not satisfy max_snr > {:5.2f} OR min_snr > {:5.2f}'.format(max_snr, min_snr) + + log.info('Purging object #{:d}'.format(iobj) + ' which does not satisfy max_snr > {:5.2f} OR min_snr > {:5.2f}'.format(max_snr, min_snr) + ' on at least nabove_min_snr >= {:d}'.format(nabove_min_snr) + ' orders') nobj_trim = np.sum(keep_obj) if nobj_trim == 0: - msgs.warning('No objects found') + log.warning('No objects found') sobjs_final = specobjs.SpecObjs() return sobjs_final @@ -947,7 +947,7 @@ def ech_pca_traces( for iobj in range(nobj_trim): indx_obj_id = sobjs_final.ECH_OBJID == (iobj + 1) # PCA predict all the orders now (where we have used the standard or slit boundary for the bad orders above) - msgs.info('Fitting echelle object finding PCA for object {:d}/{:d} with median SNR = {:5.3f}'.format( + log.info('Fitting echelle object finding PCA for object {:d}/{:d} with median SNR = {:5.3f}'.format( iobj + 1,nobj_trim,np.median(sobjs_final[indx_obj_id].ech_snr))) pca_fits[:,:,iobj] \ = tracepca.pca_trace_object( @@ -1788,13 +1788,13 @@ def objs_in_slit(image, ivar, thismask, slit_left, slit_righ, sobjs = specobjs.SpecObjs() if hand_extract_dict is None: # Instantiate a null specobj and return - msgs.info('No objects found automatically. Consider manual extraction.') + log.info('No objects found automatically. Consider manual extraction.') return sobjs else: nobj_reg = 0 # Cannot define the SNR if gpm_smash is all False snr_smash_smth = np.zeros_like(flux_smash_smth) - msgs.info('No objects found automatically.') + log.info('No objects found automatically.') else: # Compute the formal corresponding variance over the set of pixels that are not masked by gpm_sigclip var_rect = utils.inverse(ivar_rect) @@ -1820,12 +1820,12 @@ def objs_in_slit(image, ivar, thismask, slit_left, slit_righ, npeak_not_near_edge = np.sum(np.logical_not(near_edge_bpm)) if np.any(near_edge_bpm): - msgs.warning('Discarding {:d}'.format(np.sum(near_edge_bpm)) + + log.warning('Discarding {:d}'.format(np.sum(near_edge_bpm)) + ' at spatial pixels spat = {:}'.format(x_peaks_all[near_edge_bpm]) + ' which land within trim_edg = (left, right) = {:}'.format(trim_edg) + ' pixels from the slit boundary for this nsamp = {:5.2f}'.format(nsamp) + ' wide slit') - msgs.warning('You must decrease from the current value of trim_edg in order to keep them') - msgs.warning('Such edge objects are often spurious') + log.warning('You must decrease from the current value of trim_edg in order to keep them') + log.warning('Such edge objects are often spurious') # If the user requested the nperslit most significant peaks have been requested, then only return these @@ -1841,7 +1841,7 @@ def objs_in_slit(image, ivar, thismask, slit_left, slit_righ, nperslit_bpm = np.zeros(npeaks_all, dtype=bool) if np.any(nperslit_bpm): - msgs.warning('Discarding {:d}'.format(np.sum(nperslit_bpm)) + + log.warning('Discarding {:d}'.format(np.sum(nperslit_bpm)) + ' at spatial pixels spat = {:} and SNR = {:}'.format( x_peaks_all[nperslit_bpm], snr_peaks_all[nperslit_bpm]) + ' which are below SNR_thresh={:5.3f} set because the maximum number of objects '.format(snr_thresh_perslit) + @@ -1882,7 +1882,7 @@ def objs_in_slit(image, ivar, thismask, slit_left, slit_righ, if std_trace is not None: # Print a status message for the first object if iobj == 0: - msgs.info('Using input STANDARD star trace as crutch for object tracing') + log.info('Using input STANDARD star trace as crutch for object tracing') x_trace = np.interp(specmid, spec_vec, std_trace) shift = np.interp(specmid, spec_vec, @@ -1893,7 +1893,7 @@ def objs_in_slit(image, ivar, thismask, slit_left, slit_righ, # ToDO make this the average left and right boundary instead. That would be more robust. # Print a status message for the first object if iobj == 0: - msgs.info('Using slit edges as crutch for object tracing') + log.info('Using slit edges as crutch for object tracing') sobjs[iobj].TRACE_SPAT = slit_left + xsize*sobjs[iobj].SPAT_FRACPOS sobjs[iobj].trace_spec = spec_vec @@ -1915,14 +1915,14 @@ def objs_in_slit(image, ivar, thismask, slit_left, slit_righ, # TODO: Why is this not done way above? # It appears possible to have an initial object detection, but then # have it go away.. - msgs.info('No objects found automatically. Consider manual extraction.') + log.info('No objects found automatically. Consider manual extraction.') return specobjs.SpecObjs() - msgs.info("Automatic finding routine found {0:d} objects".format(len(sobjs))) + log.info("Automatic finding routine found {0:d} objects".format(len(sobjs))) # Fit the object traces if len(sobjs) > 0: - msgs.info('Fitting the traces') + log.info('Fitting the traces') # Note the transpose is here to pass in the TRACE_SPAT correctly. xinit_fweight = np.copy(sobjs.TRACE_SPAT.T).astype(float) spec_mask = (spec_vec >= spec_min_max_out[0]) & (spec_vec <= spec_min_max_out[1]) @@ -1948,7 +1948,7 @@ def objs_in_slit(image, ivar, thismask, slit_left, slit_righ, hand_extract_spec, hand_extract_spat, hand_extract_det, hand_extract_fwhm, \ hand_extract_boxcar = [hand_extract_dict[key] for key in [ 'spec', 'spat', 'detname', 'fwhm', 'boxcar_rad']] - msgs.info(f'Checking if the hand apertures at {hand_extract_spec} are in the slit') + log.info(f'Checking if the hand apertures at {hand_extract_spec} are in the slit') # Determine if these hand apertures land on the slit in question hand_on_slit = np.where(np.array(thismask[np.rint(hand_extract_spec).astype(int), np.rint(hand_extract_spat).astype(int)])) @@ -1958,7 +1958,7 @@ def objs_in_slit(image, ivar, thismask, slit_left, slit_righ, hand_extract_fwhm = hand_extract_fwhm[hand_on_slit] hand_extract_boxcar = hand_extract_boxcar[hand_on_slit] nobj_hand = len(hand_extract_spec) - msgs.info("Implementing hand apertures for {} sources on the slit".format(nobj_hand)) + log.info("Implementing hand apertures for {} sources on the slit".format(nobj_hand)) # Decide how to assign a trace to the hand objects @@ -1970,7 +1970,7 @@ def objs_in_slit(image, ivar, thismask, slit_left, slit_righ, elif std_trace is not None: # If no objects found, use the standard? trace_model = std_trace else: # If no objects or standard use the slit boundary - msgs.warning("No source to use as a trace. Using the slit boundary") + log.warning("No source to use as a trace. Using the slit boundary") trace_model = slit_left # Loop over hand_extract apertures and create and assign specobj @@ -2034,7 +2034,7 @@ def objs_in_slit(image, ivar, thismask, slit_left, slit_righ, close = np.abs(sobjs[reg_ind].SPAT_PIXPOS - spat_pixpos[ihand]) <= 0.6*spec_fwhm[ihand] if np.any(close): # Print out a warning - msgs.warning('Deleting object(s) {}'.format(sobjs[reg_ind[close]].NAME) + + log.warning('Deleting object(s) {}'.format(sobjs[reg_ind[close]].NAME) + ' because it collides with a user specified hand_extract aperture') keep[reg_ind[close]] = False @@ -2042,7 +2042,7 @@ def objs_in_slit(image, ivar, thismask, slit_left, slit_righ, if len(sobjs) == 0: - msgs.info('No hand or normal objects found on this slit. Returning') + log.info('No hand or normal objects found on this slit. Returning') return specobjs.SpecObjs() # Sort objects according to their spatial location @@ -2067,7 +2067,7 @@ def objs_in_slit(image, ivar, thismask, slit_left, slit_righ, color = 'blue' display.show_trace(viewer, ch,sobjs[iobj].TRACE_SPAT, trc_name = sobjs[iobj].NAME, color=color) - msgs.info("Successfully traced a total of {0:d} objects".format(len(sobjs))) + log.info("Successfully traced a total of {0:d} objects".format(len(sobjs))) # Finish for sobj in sobjs: diff --git a/pypeit/core/fitting.py b/pypeit/core/fitting.py index ba4037cc0d..2dda67e9b7 100644 --- a/pypeit/core/fitting.py +++ b/pypeit/core/fitting.py @@ -15,7 +15,7 @@ from pypeit.core import pydl from pypeit import bspline -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit.datamodel import DataContainer @@ -99,7 +99,7 @@ def to_hdu(self, **kwargs): See that func for Args and Returns """ if 'force_to_bintbl' in kwargs and not kwargs['force_to_bintbl']: - msgs.warning('PypeItFits objects must always be forced to a BinaryTableHDU for writing.') + log.warning('PypeItFits objects must always be forced to a BinaryTableHDU for writing.') kwargs['force_to_bintbl'] = True return super(PypeItFit, self).to_hdu(**kwargs) @@ -141,7 +141,7 @@ def fit(self): self.fitc = np.zeros(self.order[0] + 1, self.order[1] + 1).astype(float) else: self.fitc = np.zeros(self.order[0] + 1).astype(float) - msgs.warning('Input gpm is masked everywhere. Fit is probably probelmatic') + log.warning('Input gpm is masked everywhere. Fit is probably probelmatic') self.success = 0 return self.success @@ -432,9 +432,9 @@ def robust_fit(xarray, yarray, order, x2=None, function='polynomial', #pypeitFit = None while (not qdone) and (iIter < maxiter): if np.sum(this_gpm) <= np.sum(order) + 1: - msgs.warning("More parameters than data points - fit might be undesirable") + log.warning("More parameters than data points - fit might be undesirable") if not np.any(this_gpm): - msgs.warning("All points were masked. Returning current fit and masking all points. Fit is likely undesirable") + log.warning("All points were masked. Returning current fit and masking all points. Fit is likely undesirable") pypeitFit = PypeItFit(xval=xarray.astype(float), yval=yarray.astype(float), func=function, order=np.atleast_1d(order), x2=x2.astype(float) if x2 is not None else x2, @@ -453,7 +453,7 @@ def robust_fit(xarray, yarray, order, x2=None, function='polynomial', # Update the iteration iIter += 1 if (iIter == maxiter) & (maxiter != 0) & verbose: - msgs.warning(f'Maximum number of iterations maxiter={maxiter} reached in robust_polyfit_djs') + log.warning(f'Maximum number of iterations maxiter={maxiter} reached in robust_polyfit_djs') # Do the final fit pypeitFit = PypeItFit(xval=xarray.astype(float), yval=yarray.astype(float), @@ -617,16 +617,16 @@ def robust_optimize(ydata, fitfunc, arg_dict, maxiter=10, inmask=None, invvar=No nrej = np.sum(thismask_iter & np.invert(thismask)) nrej_tot = np.sum(inmask & np.invert(thismask)) if verbose: - msgs.info( + log.info( 'Iteration #{:d}: nrej={:d} new rejections, nrej_tot={:d} total rejections out of ntot={:d} ' 'total pixels'.format(iter, nrej, nrej_tot, nin_good)) iIter += 1 if (iIter == maxiter) & (maxiter != 0): - msgs.warning('Maximum number of iterations maxiter={:}'.format(maxiter) + ' reached in robust_optimize') + log.warning('Maximum number of iterations maxiter={:}'.format(maxiter) + ' reached in robust_optimize') outmask = np.copy(thismask) if np.sum(outmask) == 0: - msgs.warning('All points were rejected!!! The fits will be zero everywhere.') + log.warning('All points were rejected!!! The fits will be zero everywhere.') # Perform a final fit using the final outmask if new pixels were rejected on the last iteration if qdone is False: @@ -830,9 +830,9 @@ def polyfit2d_general(x, y, z, deg, w=None, function='polynomial', if w is not None: w = np.asarray(w) + 0.0 if w.ndim != 1: - msgs.debug("fitting.polyfit2d - Expected 1D vector for weights") + log.debug("fitting.polyfit2d - Expected 1D vector for weights") if len(x) != len(w) or len(y) != len(w) or len(x) != len(y): - msgs.debug("fitting.polyfit2d - Expected x, y and weights to have same length") + log.debug("fitting.polyfit2d - Expected x, y and weights to have same length") z = z * w vander = vander * w[:,np.newaxis] # Reshape @@ -1153,14 +1153,14 @@ def bspline_profile(xdata, ydata, invvar, profile_basis, ingpm=None, upper=5, lo if not quiet: termwidth = 80 - 13 - msgs.info('B-spline fit:') - msgs.info(' npoly = {0} profile basis functions'.format(npoly)) - msgs.info(' ngood = {0}/{1} measurements'.format(np.sum(ingpm), ingpm.size)) - msgs.info(' {0:>4} {1:>8} {2:>7} {3:>6} '.format( + log.info('B-spline fit:') + log.info(' npoly = {0} profile basis functions'.format(npoly)) + log.info(' ngood = {0}/{1} measurements'.format(np.sum(ingpm), ingpm.size)) + log.info(' {0:>4} {1:>8} {2:>7} {3:>6} '.format( 'Iter', 'Chi^2', 'N Rej', 'R. Fac').center(termwidth, '*')) hlinestr = ' {0} {1} {2} {3} '.format('-' * 4, '-' * 8, '-' * 7, '-' * 6) nullval = ' {0:>8} {1:>7} {2:>6} '.format('-' * 2, '-' * 2, '-' * 2) - msgs.info(hlinestr.center(termwidth)) + log.info(hlinestr.center(termwidth)) maskwork = outmask & ingpm & (invvar > 0) if not maskwork.any(): @@ -1171,7 +1171,7 @@ def bspline_profile(xdata, ydata, invvar, profile_basis, ingpm=None, upper=5, lo funcname='Bspline longslit special', **kwargs_bspline) if maskwork.sum() < sset.nord: if not quiet: - msgs.warning('Number of good data points fewer than nord.') + log.warning('Number of good data points fewer than nord.') # TODO: Why isn't maskwork returned? return sset, outmask, yfit, reduced_chi, 4 @@ -1220,14 +1220,14 @@ def bspline_profile(xdata, ydata, invvar, profile_basis, ingpm=None, upper=5, lo if error == -2: if not quiet: - msgs.warning('All break points lost!! Bspline fit failed.') + log.warning('All break points lost!! Bspline fit failed.') exit_status = 3 return sset, np.zeros(xdata.shape, dtype=bool), np.zeros(xdata.shape), reduced_chi, \ exit_status if error != 0: if not quiet: - msgs.info((' {0:4d}'.format(iiter) + nullval).center(termwidth)) + log.info((' {0:4d}'.format(iiter) + nullval).center(termwidth)) continue # Iterate the fit -- next rejection iteration @@ -1254,7 +1254,7 @@ def bspline_profile(xdata, ydata, invvar, profile_basis, ingpm=None, upper=5, lo lower=lower * relative_factor, **kwargs_reject) tempin = np.copy(maskwork) if not quiet: - msgs.info(' {0:4d} {1:8.3f} {2:7d} {3:6.2f} '.format(iiter, + log.info(' {0:4d} {1:8.3f} {2:7d} {3:6.2f} '.format(iiter, reduced_chi, np.sum(maskwork == 0), relative_factor).center(termwidth)) @@ -1269,10 +1269,10 @@ def bspline_profile(xdata, ydata, invvar, profile_basis, ingpm=None, upper=5, lo # 4 = Number of good data points fewer than nord if not quiet: - msgs.info(' {0:>4} {1:8.3f} {2:7d} {3:6.2f} '.format('DONE', + log.info(' {0:>4} {1:8.3f} {2:7d} {3:6.2f} '.format('DONE', reduced_chi, np.sum(maskwork == 0), relative_factor).center(termwidth)) - msgs.info('*' * termwidth) + log.info('*' * termwidth) # Finish # TODO: Why not return maskwork directly diff --git a/pypeit/core/flat.py b/pypeit/core/flat.py index a2d8840f40..cbfa72c6d2 100644 --- a/pypeit/core/flat.py +++ b/pypeit/core/flat.py @@ -15,7 +15,7 @@ from IPython import embed -from pypeit import msgs +from pypeit import log from pypeit.core import coadd from pypeit import utils @@ -186,7 +186,7 @@ def construct_illum_profile(norm_spec, spat_coo, slitwidth, spat_gpm=None, spat_ """ if illum_rej is None and illum_iter > 0: - msgs.warning('Cannot use iterative rejection to construct the illumination function if the ' + log.warning('Cannot use iterative rejection to construct the illumination function if the ' 'rejection is not provided. Continuing without iteration.') _spat_gpm = np.ones(norm_spec.shape, dtype=bool) if spat_gpm is None else np.copy(spat_gpm) @@ -286,7 +286,7 @@ def illum_profile_spectral_poly(rawimg, waveimg, slitmask, slitmask_trim, model, scale_model: `numpy.ndarray`_ An image containing the appropriate scaling """ - msgs.info(f"Performing relative spectral sensitivity correction (reference slit = {slit_illum_ref_idx})") + log.info(f"Performing relative spectral sensitivity correction (reference slit = {slit_illum_ref_idx})") # Generate the mask _thismask = thismask if (thismask is not None) else (slitmask > 0) gpm = gpmask if (gpmask is not None) else np.ones_like(rawimg, dtype=bool) @@ -322,7 +322,7 @@ def illum_profile_spectral_poly(rawimg, waveimg, slitmask, slitmask_trim, model, if sl == slit_illum_ref_idx: scaleImg[_thismask] *= utils.inverse(np.polyval(coeff, waveimg[_thismask])) minv, maxv = np.min(scaleImg[_thismask]), np.max(scaleImg[_thismask]) - msgs.info("Minimum/Maximum scales = {0:.5f}, {1:.5f}".format(minv, maxv)) + log.info("Minimum/Maximum scales = {0:.5f}, {1:.5f}".format(minv, maxv)) return scaleImg @@ -439,7 +439,7 @@ def poly_map(rawimg, rawivar, waveimg, slitmask, slitmask_trim, modelimg, deg=3, bounds_error=False, fill_value=0.0, assume_sorted=True) modelmap = np.ones_like(rawimg) relscale = np.ones_like(rawimg) - msgs.info("Generating a polynomial map between the model and the raw data") + log.info("Generating a polynomial map between the model and the raw data") for sl, spatid in enumerate(slitmask_spatid): # Prepare the masks, edges, and fitting variables this_slit = (slitmask == spatid) @@ -534,22 +534,22 @@ def tweak_slit_edges_gradient(left, right, spat_coo, norm_flat, maxfrac=0.1, deb # Check if the shift is within the allowed range if np.abs(left_shift) > maxfrac: - msgs.warning( + log.warning( f'Left slit edge shift of {100*left_shift:.1f}% exceeds the maximum allowed of ' f'{100*maxfrac:.1f}%\nThe left edge will not be tweaked.' ) left_shift = 0.0 else: - msgs.info('Tweaking left slit boundary by {0:.1f}%'.format(100 * left_shift) + + log.info('Tweaking left slit boundary by {0:.1f}%'.format(100 * left_shift) + ' ({0:.2f} pixels)'.format(left_shift * slitwidth)) if np.abs(right_shift) > maxfrac: - msgs.warning( + log.warning( f'Right slit edge shift of {100*right_shift:.1f}% exceeds the maximum allowed of ' f'{100*maxfrac:.1f}%\nThe right edge will not be tweaked.' ) right_shift = 0.0 else: - msgs.info('Tweaking right slit boundary by {0:.1f}%'.format(100 * right_shift) + + log.info('Tweaking right slit boundary by {0:.1f}%'.format(100 * right_shift) + ' ({0:.2f} pixels)'.format(right_shift * slitwidth)) # Calculate the tweak for the left edge @@ -677,7 +677,7 @@ def tweak_slit_edges_threshold(left, right, spat_coo, norm_flat, thresh=0.93, ma raise PypeItError('Tweak left edge has failed! Bad continuous region.') i = contiguous_region.stop-1 if i >= 0 and norm_flat[i-1] > norm_flat[i]: - msgs.warning('When adjusting left edge, found noisy illumination profile structure.') + log.warning('When adjusting left edge, found noisy illumination profile structure.') if debug: plt.scatter(spat_coo[masked_flat.mask], norm_flat[masked_flat.mask], marker='.', s=10, color='C3', lw=0) @@ -686,13 +686,13 @@ def tweak_slit_edges_threshold(left, right, spat_coo, norm_flat, thresh=0.93, ma plt.scatter(spat_coo[i], norm_flat[i], marker='o', facecolor='none', s=50, color='C1') plt.show() if norm_flat[i+1] < left_thresh: - msgs.warning('Left slit boundary tweak limited by maximum allowed shift: {:.1f}%'.format( + log.warning('Left slit boundary tweak limited by maximum allowed shift: {:.1f}%'.format( 100*maxfrac)) left_shift = maxfrac else: left_shift = utils.linear_interpolate(norm_flat[i], spat_coo[i], norm_flat[i+1], spat_coo[i+1], left_thresh) - msgs.info('Tweaking left slit boundary by {0:.1f}%'.format(100*left_shift) + + log.info('Tweaking left slit boundary by {0:.1f}%'.format(100*left_shift) + ' ({0:.2f} pixels)'.format(left_shift*slitwidth)) new_left += left_shift * slitwidth @@ -731,7 +731,7 @@ def tweak_slit_edges_threshold(left, right, spat_coo, norm_flat, thresh=0.93, ma raise PypeItError('Tweak right edge has failed! Bad continuous region.') i = contiguous_region.start if i < norm_flat.size-1 and norm_flat[i+1] > norm_flat[i]: - msgs.warning('When adjusting right edge, found noisy illumination profile structure.') + log.warning('When adjusting right edge, found noisy illumination profile structure.') if debug: plt.scatter(spat_coo[masked_flat.mask], norm_flat[masked_flat.mask], marker='.', s=10, color='C3', lw=0) @@ -740,13 +740,13 @@ def tweak_slit_edges_threshold(left, right, spat_coo, norm_flat, thresh=0.93, ma plt.scatter(spat_coo[i], norm_flat[i], marker='o', facecolor='none', s=50, color='C1') plt.show() if norm_flat[i-1] < right_thresh: - msgs.warning('Right slit boundary tweak limited by maximum allowed shift: {:.1f}%'.format( + log.warning('Right slit boundary tweak limited by maximum allowed shift: {:.1f}%'.format( 100*maxfrac)) right_shift = maxfrac else: right_shift = 1-utils.linear_interpolate(norm_flat[i-1], spat_coo[i-1], norm_flat[i], spat_coo[i], right_thresh) - msgs.info('Tweaking right slit boundary by {0:.1f}%'.format(100*right_shift) + + log.info('Tweaking right slit boundary by {0:.1f}%'.format(100*right_shift) + ' ({0:.2f} pixels)'.format(right_shift*slitwidth)) new_right -= right_shift * slitwidth diff --git a/pypeit/core/flexure.py b/pypeit/core/flexure.py index 09a0708bd8..513a581128 100644 --- a/pypeit/core/flexure.py +++ b/pypeit/core/flexure.py @@ -26,7 +26,7 @@ from linetools.spectra import xspectrum1d -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import dataPaths from pypeit import io @@ -80,7 +80,7 @@ def spat_flexure_shift(sciimg, slits, bpm=None, maxlag=20, sigdetect=10., debug= float: The spatial flexure shift relative to the initial slits """ - msgs.info("Measuring spatial flexure") + log.info("Measuring spatial flexure") # Mask -- Includes short slits and those excluded by the user (e.g. ['rdx']['slitspatnum']) slitmask = slits.slit_img(initial=True, exclude_flag=slits.bitmask.exclude_for_flexure) @@ -112,7 +112,7 @@ def spat_flexure_shift(sciimg, slits, bpm=None, maxlag=20, sigdetect=10., debug= _, _, pix_max, _, _, _, _, _ = arc.detect_lines(xcorr_max, cont_subtract=False, input_thresh=0., nfind=1, debug=debug) # No peak? -- e.g. data fills the entire detector if (len(pix_max) == 0) or pix_max[0] == -999.0: - msgs.warning( + log.warning( 'No peak found in the x-correlation between the traced slits and the science/calib ' 'image. Assuming there is NO SPATIAL FLEXURE.\nIf a flexure is expected, consider ' 'either changing the maximum lag for the cross-correlation, or the ' @@ -123,7 +123,7 @@ def spat_flexure_shift(sciimg, slits, bpm=None, maxlag=20, sigdetect=10., debug= lag0_max = np.where(lags_max == 0)[0][0] shift = round(pix_max[0] - lag0_max, 3) - msgs.info('Spatial flexure measured: {}'.format(shift)) + log.info('Spatial flexure measured: {}'.format(shift)) if debug: # 1D plot of the cross-correlation @@ -150,7 +150,7 @@ def spat_flexure_shift(sciimg, slits, bpm=None, maxlag=20, sigdetect=10., debug= if qa_outfile is not None: # Generate the QA plot - msgs.info("Generating QA plot for spatial flexure") + log.info("Generating QA plot for spatial flexure") spat_flexure_qa(sciimg, slits, shift, gpm=np.logical_not(bpm), vrange=qa_vrange, outfile=qa_outfile) return shift @@ -181,7 +181,7 @@ def spat_flexure_qa(img, slits, shift, gpm=None, vrange=None, outfile=None): # check that vrange is a tuple if vrange is not None and not isinstance(vrange, tuple): - msgs.warning('vrange must be a tuple with the min and max values for the imshow plot. Ignoring vrange.') + log.warning('vrange must be a tuple with the min and max values for the imshow plot. Ignoring vrange.') vrange = None # TODO: should we use initial or tweaked slits in this plot? @@ -352,17 +352,17 @@ def spec_flex_shift(obj_skyspec, sky_file=None, arx_skyspec=None, arx_fwhm_pix=N if sky_file is None and arx_skyspec is None: raise PypeItError("sky_file or arx_skyspec must be provided") elif sky_file is not None and arx_skyspec is not None: - msgs.warning("sky_file and arx_skyspec both provided. Using arx_skyspec.") + log.warning("sky_file and arx_skyspec both provided. Using arx_skyspec.") sky_file = None # Arxiv sky spectrum if sky_file is not None: # Load arxiv sky spectrum - msgs.info("Loading the arxiv sky spectrum and computing its spectral FWHM") + log.info("Loading the arxiv sky spectrum and computing its spectral FWHM") arx_skyspec, arx_fwhm_pix = get_archive_spectrum(sky_file, obj_skyspec=obj_skyspec, spec_fwhm_pix=spec_fwhm_pix) elif arx_fwhm_pix is None: # get arxiv sky spectrum resolution (FWHM in pixels) - msgs.info("Computing the spectral FWHM for the provided arxiv sky spectrum") + log.info("Computing the spectral FWHM for the provided arxiv sky spectrum") arx_fwhm_pix = autoid.measure_fwhm(arx_skyspec.flux.value, sigdetect=4., fwhm=4.) if arx_fwhm_pix is None: raise PypeItError('Failed to measure the spectral FWHM of the archived sky spectrum. ' @@ -377,7 +377,7 @@ def spec_flex_shift(obj_skyspec, sky_file=None, arx_skyspec=None, arx_fwhm_pix=N if smooth_fwhm_pix is None: # smooth_fwhm_pix is None if spec_fwhm_pix<0, i.e., the wavelength calibration is bad - msgs.warning('No flexure correction could be computed for this slit/object') + log.warning('No flexure correction could be computed for this slit/object') return None if smooth_fwhm_pix > 0: @@ -395,7 +395,7 @@ def spec_flex_shift(obj_skyspec, sky_file=None, arx_skyspec=None, arx_fwhm_pix=N # Rebin both spectra onto overlapped wavelength range if len(keep_idx) <= 50: - msgs.warning("Not enough overlap between sky spectra") + log.warning("Not enough overlap between sky spectra") return None # rebin onto object ALWAYS @@ -404,7 +404,7 @@ def spec_flex_shift(obj_skyspec, sky_file=None, arx_skyspec=None, arx_fwhm_pix=N obj_skyspec = obj_skyspec.rebin(keep_wave) # Deal with bad pixels - msgs.debug("Need to mask bad pixels") + log.debug("Need to mask bad pixels") # Trim edges (rebinning is junk there) arx_skyspec.data['flux'][0,:2] = 0. arx_skyspec.data['flux'][0,-2:] = 0. @@ -425,14 +425,14 @@ def spec_flex_shift(obj_skyspec, sky_file=None, arx_skyspec=None, arx_fwhm_pix=N norm = np.sum(obj_skyspec_flux)/obj_skyspec.npix norm2 = np.sum(arx_skyspec.flux.value)/arx_skyspec.npix if norm <= 0: - msgs.warning("Bad normalization of object in flexure algorithm") - msgs.warning("Will try the median") + log.warning("Bad normalization of object in flexure algorithm") + log.warning("Will try the median") norm = np.median(obj_skyspec_flux) if norm <= 0: - msgs.warning("Improper sky spectrum for flexure. Is it too faint??") + log.warning("Improper sky spectrum for flexure. Is it too faint??") return None if norm2 <= 0: - msgs.warning('Bad normalization of archive in flexure. You are probably using wavelengths ' + log.warning('Bad normalization of archive in flexure. You are probably using wavelengths ' 'well beyond the archive.') return None obj_skyspec_flux = obj_skyspec_flux / norm @@ -453,7 +453,7 @@ def spec_flex_shift(obj_skyspec, sky_file=None, arx_skyspec=None, arx_fwhm_pix=N arx_sky_flux = np.clip(arx_sky_flux, arx_lower, arx_upper) # # # Consider sharpness filtering (e.g. LowRedux) - # msgs.debug("Consider taking median first [5 pixel]") + # log.debug("Consider taking median first [5 pixel]") # Cross correlation of spectra corr = np.correlate(arx_sky_flux, obj_sky_flux, "same") @@ -478,7 +478,7 @@ def spec_flex_shift(obj_skyspec, sky_file=None, arx_skyspec=None, arx_fwhm_pix=N # We use the int of abs(shift) to avoid to trigger the error/warning for differences <1pixel # TODO :: I'm not convinced that we need int here... if int(abs(shift)) > mxshft: - msgs.warning(f"Computed shift {shift:.1f} pix is " + log.warning(f"Computed shift {shift:.1f} pix is " f"larger than specified maximum {mxshft} pix.") if excess_shft == "crash": @@ -492,29 +492,29 @@ def spec_flex_shift(obj_skyspec, sky_file=None, arx_skyspec=None, arx_fwhm_pix=N ) elif excess_shft == "set_to_zero": - msgs.warning("Flexure compensation failed for one of your objects.") - msgs.warning("Setting the flexure correction shift to 0 pixels.") + log.warning("Flexure compensation failed for one of your objects.") + log.warning("Setting the flexure correction shift to 0 pixels.") # Return the usual dictionary, but with a shift == 0 shift = 0.0 elif excess_shft == "continue": - msgs.warning("Applying flexure shift larger than specified max!") + log.warning("Applying flexure shift larger than specified max!") elif excess_shft == "use_median": - msgs.warning("Will try to use a flexure shift from other slit/object. " + log.warning("Will try to use a flexure shift from other slit/object. " "If not available, flexure correction will not be applied.") return None else: raise PypeItError(f"FlexurePar Keyword excessive_shift = \"{excess_shft}\" " "not recognized.") - msgs.info(f"Flexure correction of {shift:.3f} pixels") + log.info(f"Flexure correction of {shift:.3f} pixels") else: fit = fitting.PypeItFit(xval=subpix_grid, yval=0.0*subpix_grid, func='polynomial', order=np.atleast_1d(2)) fit.fit() - msgs.warning('Flexure compensation failed for one of your objects') + log.warning('Flexure compensation failed for one of your objects') return None return dict(polyfit=fit, shift=shift, subpix=subpix_grid, @@ -562,9 +562,9 @@ def get_fwhm_gauss_smooth(arx_skyspec, obj_skyspec, arx_fwhm_pix, spec_fwhm_pix= if spec_fwhm_pix is None: # pixels spec_fwhm_pix = autoid.measure_fwhm(obj_skyspec.flux.value, sigdetect=4., fwhm=4.) - msgs.info('Measuring spectral FWHM using the boxcar extracted sky spectrum.') + log.info('Measuring spectral FWHM using the boxcar extracted sky spectrum.') if spec_fwhm_pix is None: - msgs.warning('Failed to measure the spectral FWHM using the boxcar extracted sky spectrum. ' + log.warning('Failed to measure the spectral FWHM using the boxcar extracted sky spectrum. ' 'Not enough sky lines detected.') return None # object sky spectral dispersion (Angstrom/pixel) @@ -577,10 +577,10 @@ def get_fwhm_gauss_smooth(arx_skyspec, obj_skyspec, arx_fwhm_pix, spec_fwhm_pix= arx_disp = np.median(np.diff(arx_skyspec.wavelength.value)) arx_fwhm = arx_fwhm_pix * arx_disp - msgs.info(f"Resolution (FWHM) of Archive={arx_fwhm:.2f} Ang and Observation={spec_fwhm:.2f} Ang") + log.info(f"Resolution (FWHM) of Archive={arx_fwhm:.2f} Ang and Observation={spec_fwhm:.2f} Ang") if spec_fwhm <= 0: - msgs.warning('Negative spectral FWHM, likely due to a bad wavelength calibration.') + log.warning('Negative spectral FWHM, likely due to a bad wavelength calibration.') return None # Determine fwhm of the smoothing gaussian @@ -593,9 +593,9 @@ def get_fwhm_gauss_smooth(arx_skyspec, obj_skyspec, arx_fwhm_pix, spec_fwhm_pix= smooth_fwhm = np.sqrt(obj_med_fwhm2-arx_med_fwhm2) # Ang smooth_fwhm_pix = smooth_fwhm / arx_disp else: - msgs.warning("Prefer archival sky spectrum to have higher resolution") + log.warning("Prefer archival sky spectrum to have higher resolution") smooth_fwhm_pix = 0. - msgs.warning("New Sky has higher resolution than Archive. Not smoothing") + log.warning("New Sky has higher resolution than Archive. Not smoothing") return smooth_fwhm_pix @@ -691,8 +691,8 @@ def spec_flex_shift_global(slit_specs, islit, sky_file, empty_flex_dict, else: # No success, come back to it later return_later_slits.append(islit) - msgs.warning("Flexure shift calculation failed for this slit.") - msgs.info("Will come back to this slit to attempt " + log.warning("Flexure shift calculation failed for this slit.") + log.info("Will come back to this slit to attempt " "to use saved estimates from other slits") # Append flex_dict, which will be an empty dictionary if the flexure failed for the all the slits @@ -763,7 +763,7 @@ def spec_flex_shift_local(slits, slitord, specobjs, islit, sky_file, empty_flex_ # if no objects in this slit, append an empty dict if len(this_specobjs) == 0: - msgs.info('No object extracted in this slit.') + log.info('No object extracted in this slit.') flex_list.append(empty_flex_dict.copy()) return flex_list @@ -773,13 +773,13 @@ def spec_flex_shift_local(slits, slitord, specobjs, islit, sky_file, empty_flex_ # Loop through objects for ss, sobj in enumerate(this_specobjs): if sobj is None or sobj['BOX_WAVE'] is None: # Nothing extracted; only the trace exists - msgs.info(f'Object # {ss} was not extracted.') + log.info(f'Object # {ss} was not extracted.') # Update dict for key in keys_to_update: # append None flex_dict[key].append(None) continue - msgs.info(f"Working on spectral flexure for object # {ss} in slit {slits.spat_id[islit]}") + log.info(f"Working on spectral flexure for object # {ss} in slit {slits.spat_id[islit]}") # get 1D spectrum for this object obj_sky = xspectrum1d.XSpectrum1D.from_tuple((sobj.BOX_WAVE[sobj.BOX_MASK], sobj.BOX_COUNTS_SKY[sobj.BOX_MASK])) @@ -795,18 +795,18 @@ def spec_flex_shift_local(slits, slitord, specobjs, islit, sky_file, empty_flex_ else: # No success, come back to it later return_later_sobjs.append(ss) - msgs.warning("Flexure shift calculation failed for this spectrum.") - msgs.info("Will come back to this spectrum to attempt " + log.warning("Flexure shift calculation failed for this spectrum.") + log.info("Will come back to this spectrum to attempt " "to use saved estimates from other slits/objects") # Check if we need to go back if (len(return_later_sobjs) > 0) and (len(flex_dict['shift']) > 0): - msgs.warning(f'Flexure shift calculation failed for {len(return_later_sobjs)} ' + log.warning(f'Flexure shift calculation failed for {len(return_later_sobjs)} ' f'object(s) in slit {slits.spat_id[islit]}') # get the median shift among all objects in this slit idx_med_shift = np.where(flex_dict['shift'] == np.percentile(flex_dict['shift'], 50, method='nearest'))[0][0] - msgs.info(f"Median value of the measured flexure shifts in this slit, equal to " + log.info(f"Median value of the measured flexure shifts in this slit, equal to " f"{flex_dict['shift'][idx_med_shift]:.3f} pixels, will be used") # assign the median shift to the failed objects @@ -880,7 +880,7 @@ def spec_flexure_slit(slits, slitord, slit_bpm, sky_file, method="boxcar", speco results of each slit. This is filled with a basically empty dict if the slit is skipped. """ - msgs.debug("Consider doing 2 passes in flexure as in LowRedux") + log.debug("Consider doing 2 passes in flexure as in LowRedux") # Determine the method slit_cen = True if (specobjs is None) or (method == "slitcen") else False @@ -904,7 +904,7 @@ def spec_flexure_slit(slits, slitord, slit_bpm, sky_file, method="boxcar", speco gdslits = np.where(np.logical_not(slit_bpm))[0] for islit in range(slits.nslits): - msgs.info(f"Working on spectral flexure of slit: {slits.spat_id[islit]}") + log.info(f"Working on spectral flexure of slit: {slits.spat_id[islit]}") # If no objects on this slit append an empty dictionary if islit not in gdslits: @@ -934,13 +934,13 @@ def spec_flexure_slit(slits, slitord, slit_bpm, sky_file, method="boxcar", speco # Check if we need to go back to some failed slits if len(return_later_slits) > 0: - msgs.warning(f'Flexure shift calculation failed for {len(return_later_slits)} slits') + log.warning(f'Flexure shift calculation failed for {len(return_later_slits)} slits') # take the median value to deal with the cases when there are more than one shift per slit (e.g., local flexure) saved_shifts = np.array([np.percentile(flex['shift'], 50, method='nearest') if len(flex['shift']) > 0 else None for flex in flex_list]) if np.all(saved_shifts == None): # If all the elements in saved_shifts are None means that there are no saved shifts available - msgs.warning(f'No previously saved flexure shift estimates available. ' + log.warning(f'No previously saved flexure shift estimates available. ' f'Flexure corrections cannot be performed.') for islit in range(slits.nslits): # we append an empty dictionary @@ -950,7 +950,7 @@ def spec_flexure_slit(slits, slitord, slit_bpm, sky_file, method="boxcar", speco med_shift = np.percentile(saved_shifts[saved_shifts!= None], 50, method='nearest') # in which slit the median is? islit_med_shift = np.where(saved_shifts == med_shift)[0][0] - msgs.info(f"Median value of all the measured flexure shifts, equal to " + log.info(f"Median value of all the measured flexure shifts, equal to " f"{saved_shifts[islit_med_shift]:.3f} pixels, will be used") # global flexure @@ -1095,7 +1095,7 @@ def get_archive_spectrum(sky_file, obj_skyspec=None, spec_fwhm_pix=None): # measure spec_fwhm_pix spec_fwhm_pix = autoid.measure_fwhm(obj_skyspec.flux.value, sigdetect=4., fwhm=4.) if spec_fwhm_pix is None: - msgs.warning('Failed to measure the spectral FWHM using the boxcar extracted sky spectrum. ' + log.warning('Failed to measure the spectral FWHM using the boxcar extracted sky spectrum. ' 'Choose one of the provided sky files.') # get the spectral resolution of obj_skyspec # obj_skyspec spectral dispersion (Angstrom/pixel) @@ -1294,7 +1294,7 @@ def spec_flexure_qa(slitords, bpm, basename, flex_list, dwv = 20.*units.AA gdsky = np.where((sky_lines > min_wave) & (sky_lines < max_wave))[0] if len(gdsky) == 0: - msgs.warning("No sky lines for Flexure QA") + log.warning("No sky lines for Flexure QA") continue if len(gdsky) > 6: idx = np.array([0, 1, len(gdsky)//2, len(gdsky)//2+1, -2, -1]) @@ -1339,7 +1339,7 @@ def spec_flexure_qa(slitords, bpm, basename, flex_list, plt.tight_layout(pad=0.2, h_pad=0.0, w_pad=0.0) plt.savefig(outfile)#, dpi=400) plt.close() - msgs.info("Wrote spectral flexure QA: {}".format(outfile)) + log.info("Wrote spectral flexure QA: {}".format(outfile)) plt.rcdefaults() @@ -1384,10 +1384,10 @@ def calculate_image_phase(imref, imshift, gpm_ref=None, gpm_shift=None, maskval= try: from skimage.registration import optical_flow_tvl1, phase_cross_correlation except ImportError: - msgs.warning("scikit-image is not installed. Adopting a basic image cross-correlation") + log.warning("scikit-image is not installed. Adopting a basic image cross-correlation") return calculate_image_offset(imref, imshift) if imref.shape != imshift.shape: - msgs.warning("Input images shapes are not equal. Adopting a basic image cross-correlation") + log.warning("Input images shapes are not equal. Adopting a basic image cross-correlation") return calculate_image_offset(imref, imshift) # Set the masks if gpm_ref is None: @@ -1530,7 +1530,7 @@ def sky_em_residuals(wave:np.ndarray, flux:np.ndarray, p, pcov = fitting.fit_gauss(wave[mw], flux[mw], w_out=1./np.sqrt(ivar[mw]), guesses=p0, nparam=4) except RuntimeError as e: - msgs.warning('First attempt at Gaussian fit failed, ending with RuntimeError. Original ' + log.warning('First attempt at Gaussian fit failed, ending with RuntimeError. Original ' f'exception: {e.args[0]} Assuming this is because it hit the maximum ' 'number of function evaluations. Trying again with a maximum of 10000.') # Try again with larger limit on the number of function evaluations @@ -1747,7 +1747,7 @@ def measure_sky_lines(self): # Loop on slits for i in np.arange(0,self.nslits,1): if (i % 10) == 0: - msgs.info("Working on slit {} of {}".format(i, self.nslits)) + log.info("Working on slit {} of {}".format(i, self.nslits)) if not np.all(self['SN'][:,i] > 1.): continue @@ -1836,7 +1836,7 @@ def update_fit(self): all_sky[mm] = m all_ivar[mm] = 1e6 if (np.sum(mm) > 10): - msgs.warning('Removing more than 10 pixels of data') + log.warning('Removing more than 10 pixels of data') _,diff,diff_err,_,_ = sky_em_residuals(all_wave, all_sky, all_ivar, self.sky_table['Wave']) diff --git a/pypeit/core/flux_calib.py b/pypeit/core/flux_calib.py index ef1233ec71..0e7f670ecd 100644 --- a/pypeit/core/flux_calib.py +++ b/pypeit/core/flux_calib.py @@ -20,7 +20,7 @@ from astropy.io import ascii from astropy import stats -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import utils from pypeit import bspline @@ -82,7 +82,7 @@ def find_standard(specobj_list): else: medfx.append(np.median(spobj.BOX_COUNTS)) mxix = np.argmax(np.array(medfx)) - msgs.info("Putative standard star {} has a median boxcar count of {}".format(specobj_list[mxix], + log.info("Putative standard star {} has a median boxcar count of {}".format(specobj_list[mxix], np.max(medfx))) # Return return mxix @@ -242,7 +242,7 @@ def get_sensfunc_factor(wave, wave_zp, zeropoint, exptime, tellmodel=None, delta raise PypeItError('The wavelength vector and delta_wave vector must be the same size') _delta_wave = delta_wave else: - msgs.warning('Invalid type for delta_wave - using a default value') + log.warning('Invalid type for delta_wave - using a default value') _delta_wave = wvutils.get_delta_wave(wave, wave_mask) else: # If delta_wave is not passed in, then we will use the native wavelength sampling of the spectrum @@ -258,7 +258,7 @@ def get_sensfunc_factor(wave, wave_zp, zeropoint, exptime, tellmodel=None, delta if extrap_sens: zeropoint_obs[wave_mask] \ = interpolate.interp1d(wave_zp, zeropoint, bounds_error=False)(wave[wave_mask]) - msgs.warning("Your data extends beyond the bounds of your sensfunc. You should be " + log.warning("Your data extends beyond the bounds of your sensfunc. You should be " "adjusting the par['sensfunc']['extrap_blu'] and/or " "par['sensfunc']['extrap_red'] to extrapolate further and recreate your " "sensfunc. But we are extrapolating per your direction. Good luck!") @@ -277,16 +277,16 @@ def get_sensfunc_factor(wave, wave_zp, zeropoint, exptime, tellmodel=None, delta # Did the user request a telluric correction? if tellmodel is not None: # This assumes there is a separate telluric key in this dict. - #msgs.warning("Telluric corrections via this method are deprecated") - msgs.info('Applying telluric correction') + #log.warning("Telluric corrections via this method are deprecated") + log.info('Applying telluric correction') sensfunc_obs = sensfunc_obs * (tellmodel > 1e-10) / (tellmodel + (tellmodel < 1e-10)) if atmext is None: senstot = sensfunc_obs.copy() else: # Apply Extinction if optical bands - msgs.info("Applying extinction correction") -# msgs.warn("Extinction correction applied only if the spectra covers <10000Ang.") + log.info("Applying extinction correction") +# log.warn("Extinction correction applied only if the spectra covers <10000Ang.") senstot = sensfunc_obs * atmext.correction_factor(wave, airmass=airmass) # senstot is the conversion from N_lam to F_lam, and the division by exptime and delta_wave are to convert @@ -329,7 +329,7 @@ def counts2Nlam(wave, counts, counts_ivar, counts_mask, exptime, airmass, atmext Nlam_ivar_star = delta_wave**2*counts_ivar*exptime**2 # Extinction correction - msgs.info("Applying extinction correction") + log.info("Applying extinction correction") ext_corr = atmext.correction_factor(wave, airmass=airmass) # Correct for extinction Nlam_star = Nlam_star * ext_corr @@ -400,7 +400,7 @@ def fit_zeropoint(wave, Nlam_star, Nlam_ivar_star, gpm_star, std_spec, # Do we need to extrapolate? TODO Replace with a model or a grey body? ## TODO This is an ugly hack. Why are we only triggering this if the extrapolated star is negative. if np.min(flux_true) <= 0.: - msgs.warning('Your spectrum extends beyond calibrated standard star, extrapolating the spectra with polynomial.') + log.warning('Your spectrum extends beyond calibrated standard star, extrapolating the spectra with polynomial.') pypeitFit = fitting.robust_fit( std_spec.wave, std_spec.flux,8,function='polynomial', maxiter=50, lower=3.0, upper=3.0, maxrej=3, grow=0, sticky=True, use_mad=True @@ -492,15 +492,15 @@ def get_mask(wave_star, flux_star, ivar_star, mask_star, mask_tell = np.ones_like(flux_star).astype(bool) # masking bad entries - msgs.info(" Masking bad pixels") + log.info(" Masking bad pixels") gpm_star = mask_star.copy() gpm_star[ivar_star <= 0.] = False gpm_star[flux_star <= 0.] = False # Mask edges - msgs.info(" Masking edges") + log.info(" Masking edges") gpm_star[[0, -1]] = False # Mask Atm. cutoff - msgs.info(" Masking Below the atmospheric cutoff") + log.info(" Masking Below the atmospheric cutoff") atms_cutoff = wave_star <= 3000.0 gpm_star[atms_cutoff] = False @@ -551,7 +551,7 @@ def get_mask(wave_star, flux_star, ivar_star, mask_star, tell_nir = (trans_final < trans_thresh) & (wave_star > 9100.0) mask_tell[tell_nir] = False else: - msgs.info('Your spectrum is bluer than 9100A, only optical telluric regions are masked.') + log.info('Your spectrum is bluer than 9100A, only optical telluric regions are masked.') return gpm_star, mask_recomb, mask_tell @@ -582,10 +582,10 @@ def mask_stellar_hydrogen(wave_star, mask_width=10.0, mask_star=None): if mask_star is None: mask_star = np.ones_like(wave_star, dtype=bool) # Mask Balmer, Paschen, Brackett, and Pfund recombination lines - msgs.info("Masking hydrogen recombination lines") + log.info("Masking hydrogen recombination lines") # Mask Balmer - msgs.info(" Masking Balmer") + log.info(" Masking Balmer") # Vacuum Wavelengths from NIST (TEB, 2023-02-10) lines_balm = np.array([6564.6, 4862.7, 4341.7, 4102.9, 3971.2, 3890.2, 3836.4]) @@ -596,7 +596,7 @@ def mask_stellar_hydrogen(wave_star, mask_width=10.0, mask_star=None): mask_star[ibalm] = False # Mask Paschen - msgs.info(" Masking Paschen") + log.info(" Masking Paschen") # Vacuum Wavelengths from NIST (TEB, 2023-02-10) lines_pasc = np.array([18756.4, 12821.6, 10941.2, 10052.6, 9548.8, 9232.2, 9017.8, 8865.3, @@ -607,7 +607,7 @@ def mask_stellar_hydrogen(wave_star, mask_width=10.0, mask_star=None): mask_star[ipasc] = False # Mask Brackett - msgs.info(" Masking Brackett") + log.info(" Masking Brackett") # Vacuum Wavelengths from NIST (TEB, 2023-02-10) lines_brac = np.array([40522.8, 26258.7, 21661.2, 19446.0, 18179.2, 17366.9, 14584.0]) @@ -616,7 +616,7 @@ def mask_stellar_hydrogen(wave_star, mask_width=10.0, mask_star=None): mask_star[ibrac] = False # Mask Pfund - msgs.info(" Masking Pfund") + log.info(" Masking Pfund") # Vacuum Wavelengths from NIST (TEB, 2023-02-10) lines_pfund = np.array([74599.0, 46537.8, 37405.8, 32969.8, 22788.0]) for line_pfund in lines_pfund: @@ -653,10 +653,10 @@ def mask_stellar_helium(wave_star, mask_width=5.0, mask_star=None): if mask_star is None: mask_star = np.ones_like(wave_star, dtype=bool) # Mask Balmer, Paschen, Brackett, and Pfund recombination lines - msgs.info("Masking ionized helium recombination lines") + log.info("Masking ionized helium recombination lines") # Mask HeII - msgs.info(" Masking HeII lines") + log.info(" Masking HeII lines") # Prominent HeII lines not overlapped by hydrogen lines: # Vacuum wavelengths from Hubeney & Milhas (2015) # "Theory of Stellar Atmospheres", p. 191. @@ -937,7 +937,7 @@ def standard_zeropoint(wave, Nlam, Nlam_ivar, Nlam_gpm, flam_true, mask_recomb=N Good pixel mask for fitted sensitivity function with same shape as wave (nspec,) """ if np.any(np.logical_not(np.isfinite(Nlam_ivar))): - msgs.warning("NaN are present in the inverse variance") + log.warning("NaN are present in the inverse variance") ivar_bpm = np.logical_not(np.isfinite(Nlam_ivar) & (Nlam_ivar > 0)) # check masks @@ -971,7 +971,7 @@ def standard_zeropoint(wave, Nlam, Nlam_ivar, Nlam_gpm, flam_true, mask_recomb=N zeropoint_clean_gpm = zeropoint_data_gpm.copy() # Polynomial corrections on Hydrogen Recombination lines if (np.sum(zeropoint_fitmask) > 0.5 * len(zeropoint_fitmask)) & polycorrect: - msgs.info("Replacing bspline fit with polyfit over Hydrogen Recombination line regions") + log.info("Replacing bspline fit with polyfit over Hydrogen Recombination line regions") ## Only correct Hydrogen Recombination lines with polyfit in the telluric free region balmer_clean = np.zeros_like(wave, dtype=bool) # Commented out the bluest recombination lines since they are weak for spectroscopic standard stars. @@ -989,32 +989,32 @@ def standard_zeropoint(wave, Nlam, Nlam_ivar, Nlam_gpm, flam_true, mask_recomb=N zeropoint_clean[ivar_bpm] = zeropoint_poly[ivar_bpm] else: ## if half more than half of your spectrum is masked (or polycorrect=False) then do not correct it with polyfit - msgs.warning('No polynomial corrections performed on Hydrogen Recombination line regions') + log.warning('No polynomial corrections performed on Hydrogen Recombination line regions') # ToDo # Compute an effective resolution for the standard. This could be improved # to setup an array of breakpoints based on the resolution. At the # moment we are using only one number - msgs.debug("Should pull resolution from arc line analysis") - msgs.debug("At the moment the resolution is taken as the PixelScale") - msgs.debug("This needs to be changed!") + log.debug("Should pull resolution from arc line analysis") + log.debug("At the moment the resolution is taken as the PixelScale") + log.debug("This needs to be changed!") std_pix = np.median(np.abs(wave[zeropoint_data_gpm] - np.roll(wave[zeropoint_data_gpm], 1))) std_res = np.median(wave[zeropoint_data_gpm]/resolution) # median resolution in units of Angstrom. if (nresln * std_res) < std_pix: - msgs.warning("Bspline breakpoints spacing shoud be larger than 1pixel") - msgs.warning("Changing input nresln to fix this") + log.warning("Bspline breakpoints spacing shoud be larger than 1pixel") + log.warning("Changing input nresln to fix this") nresln = std_res / std_pix # Output some helpful information for double-checking input params are correct - msgs.debug(f" This is the passed-in R: {resolution}") - msgs.info(f" This is the standard pixel: {std_pix:.2f} Ã…") - msgs.info(f" This is the standard resolution element: {std_res:.2f} Ã…") - msgs.info(f" Breakpoint spacing: {std_res * nresln:.2f} pixels") + log.debug(f" This is the passed-in R: {resolution}") + log.info(f" This is the standard pixel: {std_pix:.2f} Ã…") + log.info(f" This is the standard resolution element: {std_res:.2f} Ã…") + log.info(f" Breakpoint spacing: {std_res * nresln:.2f} pixels") # Fit zeropoint with bspline kwargs_bspline = {'bkspace': std_res * nresln} kwargs_reject = {'maxrej': 5} - msgs.info("Initialize bspline for flux calibration") + log.info("Initialize bspline for flux calibration") init_bspline = bspline.bspline(wave[zeropoint_data_gpm], bkspace=kwargs_bspline['bkspace']) fullbkpt = init_bspline.breakpoints @@ -1023,7 +1023,7 @@ def standard_zeropoint(wave, Nlam, Nlam_ivar, Nlam_gpm, flam_true, mask_recomb=N init_breakpoints = fullbkpt[msk_bkpt > 0.999] # init_breakpoints = fullbkpt - msgs.info("Bspline fit on zeropoint. ") + log.info("Bspline fit on zeropoint. ") bset1, bmask = fitting.iterfit(wave, zeropoint_clean, invvar=zeropoint_ivar, inmask=zeropoint_fitmask, upper=upper, lower=lower, fullbkpt=init_breakpoints, maxiter=maxiter, kwargs_bspline=kwargs_bspline, kwargs_reject=kwargs_reject) @@ -1063,7 +1063,7 @@ def standard_zeropoint(wave, Nlam, Nlam_ivar, Nlam_gpm, flam_true, mask_recomb=N else: ## if half more than half of your spectrum is masked (or polycorrect=False) then do not correct it with polyfit zeropoint_bspl_clean = zeropoint_bspl.copy() - msgs.warning('No polynomial corrections performed on Hydrogen Recombination line regions') + log.warning('No polynomial corrections performed on Hydrogen Recombination line regions') # Calculate zeropoint zeropoint_fit = zeropoint_poly if polyfunc else zeropoint_bspl_clean @@ -1153,7 +1153,7 @@ def scale_in_filter(wave, flux, gpm, scale_dict): flux = flux[gpm] # Grab the instrument response function - msgs.info("Integrating spectrum in filter: {}".format(scale_dict['filter'])) + log.info("Integrating spectrum in filter: {}".format(scale_dict['filter'])) fwave, trans = load_filter_file(scale_dict['filter']) tfunc = interpolate.interp1d(fwave, trans, bounds_error=False, fill_value=0.) @@ -1173,7 +1173,7 @@ def scale_in_filter(wave, flux, gpm, scale_dict): # Scale factor Dm = AB - scale_dict['filter_mag'] scale = np.power(10.0,(Dm/2.5)) - msgs.info("Scaling spectrum by {}".format(scale)) + log.info("Scaling spectrum by {}".format(scale)) else: raise PypeItError("Bad magnitude type") diff --git a/pypeit/core/framematch.py b/pypeit/core/framematch.py index 5c5b495283..ba7e63edc4 100644 --- a/pypeit/core/framematch.py +++ b/pypeit/core/framematch.py @@ -9,7 +9,7 @@ import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit.bitmask import BitMask @@ -97,7 +97,7 @@ def valid_frametype(frametype, quiet=False, raise_error=False): if not good_frametype: message = f'{frametype} is not a valid PypeIt frame type.' if not quiet and not raise_error: - msgs.warning(message) + log.warning(message) elif raise_error: raise PypeItError(message) return good_frametype diff --git a/pypeit/core/gui/edge_inspector.py b/pypeit/core/gui/edge_inspector.py index 2f1b2b9f0f..9e78c685d0 100644 --- a/pypeit/core/gui/edge_inspector.py +++ b/pypeit/core/gui/edge_inspector.py @@ -9,7 +9,7 @@ import numpy as np from matplotlib import pyplot, widgets -from pypeit import msgs +from pypeit import log from pypeit.core.gui import gui_util @@ -67,7 +67,7 @@ def __init__(self, edges): if self.edges.par['left_right_pca'] \ else self.edges.pca.reference_row else: - msgs.warning('Edges object does not include a PCA decomposition of the traces.') + log.warning('Edges object does not include a PCA decomposition of the traces.') self.reference_row = self.edges.nspec // 2 # NOTE: line properties match what is used for the Pointer self.ref_row_line = image_ax.axhline(self.reference_row, color='C1', lw=0.5) @@ -170,7 +170,7 @@ def update_traces(self, *args): if np.any(_remove) or np.any(_add): success = self.edges.sync() if not success: - msgs.warning('Unable to synchronize left-right traces!') + log.warning('Unable to synchronize left-right traces!') # Remove the trace lines from the plot # TODO: There may be an easier way to do this, but I couldn't find it. @@ -245,7 +245,7 @@ def move(self, pos): # Get the spatial offset self.offset[i] = pos[0] - self.trace_cen[self.reference_row,i] # Report - msgs.info(f'Offsetting trace {i} by {self.offset[i]} pixels') + log.info(f'Offsetting trace {i} by {self.offset[i]} pixels') # Reset the line data self.trace_plot[i].set_data((self.trace_cen[:,i] + self.offset[i], self.spec_pix)) # Update the plot @@ -307,7 +307,7 @@ def add_trace(self, spat, side): # Draw the plot pyplot.draw() # Report - msgs.info(f'Added {"right" if side > 0 else "left"} trace passing through ' + log.info(f'Added {"right" if side > 0 else "left"} trace passing through ' f'({spat:.1f}, {self.reference_row:.2f}).') def add_left(self, pos): diff --git a/pypeit/core/gui/identify.py b/pypeit/core/gui/identify.py index 0f50328c6f..3f0664530a 100644 --- a/pypeit/core/gui/identify.py +++ b/pypeit/core/gui/identify.py @@ -23,7 +23,7 @@ from pypeit.par import pypeitpar from pypeit.core.wavecal import wv_fitting, waveio, wvutils -from pypeit import msgs +from pypeit import log from astropy.io import ascii as ascii_io from astropy.table import Table @@ -254,19 +254,19 @@ def initialise(cls, arccen, lamps, slits, slit=0, par=None, wv_calib_all=None, print(f"Using {sigdetect} for sigma detection") # If a wavelength calibration has been performed already, load it: - msgs.info(f"Slit ID = {slit} (SPAT ID = {slits.spat_id[slit]})") + log.info(f"Slit ID = {slit} (SPAT ID = {slits.spat_id[slit]})") if wv_calib_all is not None: wv_calib = wv_calib_all.wv_fits[slit] if wv_calib.spat_id != slits.spat_id[slit]: - msgs.warning("Wavelength calibration slits did not match!") - msgs.info("Best-fitting wavelength solution will not be loaded.") + log.warning("Wavelength calibration slits did not match!") + log.info("Best-fitting wavelength solution will not be loaded.") wv_calib = None - msgs.info(f"Loading lamps from wavelength solution: {wv_calib_all.lamps}") + log.info(f"Loading lamps from wavelength solution: {wv_calib_all.lamps}") lamps = wv_calib_all.lamps.split(",") # Must specify `wv_calib = None` otherwise else: - msgs.warning("No wavelength calibration supplied!") - msgs.info("No wavelength solution will be loaded.") + log.warning("No wavelength calibration supplied!") + log.info("No wavelength solution will be loaded.") wv_calib = None # Extract the lines that are detected in arccen @@ -726,12 +726,12 @@ def make_order_vec(self, order_str): """ mtch = re.search(r"(\d+):(\d+)", order_str) if mtch is None: - msgs.warning(f"Input string {order_str} is not in the correct format, e.g. (45:122)") + log.warning(f"Input string {order_str} is not in the correct format, e.g. (45:122)") return None start_order = int(mtch.groups()[0]) end_order = int(mtch.groups()[1]) if start_order > end_order: - msgs.warning(f"The start order {start_order} must be less than the end order {end_order}") + log.warning(f"The start order {start_order} must be less than the end order {end_order}") return None order_vec = np.arange(start_order, end_order+1) return order_vec @@ -786,13 +786,13 @@ def store_solution(self, final_fit, binspec, rmstol=0.15, while ans != 'y' and ans != 'n': ans = input("Would you like to store the line IDs? (y/n): ") else: - msgs.info("The line IDs are being saved to disk") + log.info("The line IDs are being saved to disk") ans = 'y' if ans == 'y': self.save_IDs() # Solution if 'rms' not in final_fit.keys(): - msgs.warning("No wavelength solution available") + log.warning("No wavelength solution available") return elif final_fit['rms'] < rmstol or multi: ans = '' @@ -800,7 +800,7 @@ def store_solution(self, final_fit, binspec, rmstol=0.15, while ans != 'y' and ans != 'n': ans = input("Would you like to write this wavelength solution to disk? (y/n): ") else: - msgs.info('Saving the wavelength solution to disk') + log.info('Saving the wavelength solution to disk') ans = 'y' if ans == 'y': # Arxiv solution @@ -815,7 +815,7 @@ def store_solution(self, final_fit, binspec, rmstol=0.15, #better try again... Return to the start of the loop continue if len(order_vec) != len(wvcalib.wv_fits): - msgs.warning( + log.warning( f'The number of orders in this list, {order_vec}\ndoes not match ' f'the number of traces: {len(wvcalib.wv_fits)}\nPlease try again.' ) @@ -832,18 +832,18 @@ def store_solution(self, final_fit, binspec, rmstol=0.15, else: make_arxiv = input("Save the wavelength solution as an arxiv? ([y]/n): ") else: - msgs.info('Saving the wavelength solution as an arxiv file.') + log.info('Saving the wavelength solution as an arxiv file.') make_arxiv = 'y' if make_arxiv != 'n': if multi: # check that specdata is defined if specdata_multi is None: - msgs.warning('Skipping arxiv save because arc line spectra are not defined by pypeit/scripts/identify.py') + log.warning('Skipping arxiv save because arc line spectra are not defined by pypeit/scripts/identify.py') # check that the number of spectra in specdata is the same as the number of wvcalib solutions elif specdata_multi is not None and np.shape(specdata_multi)[0] != len(wvcalib.wv_fits): - msgs.warning('Skipping arxiv save because there are not enough orders for full template') - msgs.warning('To generate a valid arxiv to save, please rerun with the "--slits all" option.') + log.warning('Skipping arxiv save because there are not enough orders for full template') + log.warning('To generate a valid arxiv to save, please rerun with the "--slits all" option.') else: norder = np.shape(specdata_multi)[0] wavelengths = np.copy(specdata_multi) @@ -851,7 +851,7 @@ def store_solution(self, final_fit, binspec, rmstol=0.15, if fits_dicts is not None: fitdict = fits_dicts[iord] else: - msgs.warning('skipping saving fits because fits_dicts is not defined by pypeit/scripts/identify.py') + log.warning('skipping saving fits because fits_dicts is not defined by pypeit/scripts/identify.py') fitdict = None if fitdict is not None and fitdict['full_fit'] is not None: wavelengths[iord,:] = fitdict['full_fit'].eval(np.arange(specdata_multi[iord,:].size) / @@ -888,14 +888,14 @@ def store_solution(self, final_fit, binspec, rmstol=0.15, outfname = "wvcalib.fits" if wvcalib is not None: wvcalib.to_file(outfname, overwrite=True) - msgs.info("A WaveCalib container was written to wvcalib.fits") + log.info("A WaveCalib container was written to wvcalib.fits") # Ask if overwrite the existing WVCalib file only if force_save=False, otherwise don't overwrite ow_wvcalib = '' if not force_save: while ow_wvcalib != 'y' and ow_wvcalib != 'n': print('') - msgs.warning( + log.warning( 'Do you want to overwrite existing Calibrations/WaveCalib*.fits ' 'file?\nNOTE: To use this WaveCalib file the user will need to ' 'delete the other files in Calibrations/ \nand re-run run_pypeit.' @@ -915,7 +915,7 @@ def store_solution(self, final_fit, binspec, rmstol=0.15, if slits: print(' '*10) - msgs.info('Unflagging Slits from WaveCalib: ') + log.info('Unflagging Slits from WaveCalib: ') slits.mask = np.zeros(slits.nslits, dtype=slits.bitmask.minimum_dtype()) slits.ech_order = order_vec slits.to_file() @@ -952,7 +952,7 @@ def store_solution(self, final_fit, binspec, rmstol=0.15, while ans != 'y' and ans != 'n': ans = input("A solution has not been saved - would you like to write the IDs to disk? (y/n): ") else: - msgs.info("The line IDs are being saved to disk") + log.info("The line IDs are being saved to disk") ans = 'y' if ans == 'y': self.save_IDs() @@ -1073,7 +1073,7 @@ def operations(self, key, axisID, event): self.update_infobox(message="WARNING: There are unsaved changes!!\nPress q again to exit", yesno=False) self._qconf = True else: - msgs.debug("Need to change this to kill and return the results to PypeIt") + log.debug("Need to change this to kill and return the results to PypeIt") plt.close() elif self._qconf: self.update_infobox(default=True) @@ -1089,7 +1089,7 @@ def operations(self, key, axisID, event): # Deal with the response if self._respreq[1] == "write": # First remove the old file, and save the new one - msgs.debug("Not implemented yet!") + log.debug("Not implemented yet!") self.write() else: return @@ -1113,7 +1113,7 @@ def operations(self, key, axisID, event): if self._fitdict['coeff'] is not None: self.auto_id() else: - msgs.info("You must identify a few lines first") + log.info("You must identify a few lines first") elif key == 'c': wclr = np.where((self._lineflg == 2) | (self._lineflg == 3)) self._lineflg[wclr] = 0 @@ -1153,11 +1153,11 @@ def operations(self, key, axisID, event): plt.close() elif key == 'r': if self._detns_idx == -1: - msgs.info("You must select a line first") + log.info("You must select a line first") elif self._fitr is None: - msgs.info("You must select a fitting region first") + log.info("You must select a fitting region first") else: - msgs.debug("Feature not yet implemented") + log.debug("Feature not yet implemented") elif key == 's': self.save_IDs() elif key == 'w': @@ -1253,7 +1253,7 @@ def fitsol_value(self, xfit=None, idx=None): else: return np.polyval(self._fitdict["coeff"], xfit[idx] / self._fitdict["scale"]) else: - msgs.debug("Cannot predict wavelength value - no fit has been performed") + log.debug("Cannot predict wavelength value - no fit has been performed") return None def fitsol_deriv(self, xfit=None, idx=None): @@ -1279,7 +1279,7 @@ def fitsol_deriv(self, xfit=None, idx=None): else: return np.polyval(cder, xfit[idx] / self._fitdict["scale"]) / self._fitdict["scale"] else: - msgs.debug("Cannot predict wavelength value - no fit has been performed") + log.debug("Cannot predict wavelength value - no fit has been performed") return None def add_new_detection(self): @@ -1454,15 +1454,15 @@ def load_IDs(self, wv_calib=None, fname='waveid.ascii'): self._lineids[idx] = wv_calib.wave_fit[ii] self._lineflg[idx] = 2 self._fitdict['polyorder'] = wv_calib.pypeitfit.order[0] - msgs.info("Loaded line IDs") + log.info("Loaded line IDs") elif os.path.exists(fname): data = ascii_io.read(fname, format='fixed_width') self._detns = data['pixel'].data self._lineids = data['wavelength'].data self._lineflg = data['flag'].data - msgs.info(f"Loaded line IDs:\n{fname}") + log.info(f"Loaded line IDs:\n{fname}") else: - msgs.info(f"Could not find line IDs:\n{fname}") + log.info(f"Could not find line IDs:\n{fname}") self._detnsy = self.get_ann_ypos() # Get the y locations of the annotations self.replot() @@ -1480,5 +1480,5 @@ def save_IDs(self, fname='waveid.ascii'): names=['pixel', 'wavelength', 'flag'], meta=meta) ascii_io.write(data, fname, format='fixed_width', overwrite=True) - msgs.info(f"Line IDs saved as:\n{fname}") + log.info(f"Line IDs saved as:\n{fname}") self.update_infobox(message="Line IDs saved as: {0:s}".format(fname), yesno=False) diff --git a/pypeit/core/gui/object_find.py b/pypeit/core/gui/object_find.py index 1fe52e142f..0004a70f23 100644 --- a/pypeit/core/gui/object_find.py +++ b/pypeit/core/gui/object_find.py @@ -21,7 +21,7 @@ # TODO: Commented out the import of specobjs because it was only used by # the (presumably) defunct method. #from pypeit import specobjs -from pypeit import msgs +from pypeit import log # TODO: No globals please operations = dict({'cursor': "Select object trace (LMB click)\n" + @@ -562,7 +562,7 @@ def operations(self, key, axisID): self.update_infobox(message="WARNING: There are unsaved changes!!\nPress q again to exit", yesno=False) self._qconf = True else: - msgs.debug("Need to change this to kill and return the results to PypeIt") + log.debug("Need to change this to kill and return the results to PypeIt") plt.close() elif self._qconf: self.update_infobox(default=True) @@ -781,7 +781,7 @@ def print_pypeit_info(self): """print text that the user should insert into their .pypeit file """ if 1 in self._object_traces._add_rm: - msgs.info("Include the following info in the manual_extract column in your .pypeit file:\n") + log.info("Include the following info in the manual_extract column in your .pypeit file:\n") print(self._object_traces.get_pypeit_string()) def recenter(self): @@ -823,7 +823,7 @@ def get_specobjs(self): SpecObjs: SpecObjs Class """ if self._use_updates: - msgs.debug("Have not updated SpecObjs yet") + log.debug("Have not updated SpecObjs yet") return self.specobjs else: return None diff --git a/pypeit/core/gui/skysub_regions.py b/pypeit/core/gui/skysub_regions.py index 0a6551cc00..b7f84338c8 100644 --- a/pypeit/core/gui/skysub_regions.py +++ b/pypeit/core/gui/skysub_regions.py @@ -12,7 +12,7 @@ from matplotlib.widgets import Button import matplotlib.transforms as mtransforms -from pypeit import msgs +from pypeit import log from pypeit.core import skysub from pypeit.images import buildimage @@ -525,7 +525,7 @@ def operations(self, key, axisID): 'again to exit', yesno=False) self._qconf = True else: - msgs.debug("Need to change this to kill and return the results to PypeIt") + log.debug("Need to change this to kill and return the results to PypeIt") plt.close() elif self._qconf: self.update_infobox(default=True) @@ -586,7 +586,7 @@ def get_result(self): # Generate the mask inmask = skysub.generate_mask(self.pypeline, self._skyreg, self.slits, self.slits_left, self.slits_right) if np.all(np.logical_not(inmask)): - msgs.warning("Sky regions are empty - A sky regions calibration frame will not be generated") + log.warning("Sky regions are empty - A sky regions calibration frame will not be generated") return None # Build the Sky Regions calibration frame @@ -603,8 +603,8 @@ def get_outname(self): outfil = self._outname if os.path.exists(self._outname) and not self._overwrite: outfil = 'temp.fits' - msgs.warning(f"A SkyRegions file already exists and you have not forced an overwrite:\n{self._outname}") - msgs.info(f"Adopting the following output filename: {outfil}") + log.warning(f"A SkyRegions file already exists and you have not forced an overwrite:\n{self._outname}") + log.info(f"Adopting the following output filename: {outfil}") return outfil def recenter(self): diff --git a/pypeit/core/mosaic.py b/pypeit/core/mosaic.py index 4fe0b58bcc..ef905ab658 100644 --- a/pypeit/core/mosaic.py +++ b/pypeit/core/mosaic.py @@ -10,7 +10,7 @@ import numpy as np from scipy import ndimage -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit.core import transform from pypeit.utils import inverse @@ -267,7 +267,7 @@ def build_image_mosaic(imgs, tforms, ivar=None, bpm=None, mosaic_shape=None, cva raise PypeItError(f'Unknown value for overlap ({overlap}), must be "combine" or "error".') if any([not np.issubdtype(img.dtype, np.floating) for img in imgs]): - msgs.warning('Images must be floating type, and will be recast before transforming.') + log.warning('Images must be floating type, and will be recast before transforming.') # Get the output shape, if necessary if mosaic_shape is None: @@ -279,7 +279,7 @@ def build_image_mosaic(imgs, tforms, ivar=None, bpm=None, mosaic_shape=None, cva else: _tforms = tforms - msgs.info(f'Constructing image mosaic with {nimg} images and output shape {mosaic_shape}.') + log.info(f'Constructing image mosaic with {nimg} images and output shape {mosaic_shape}.') if ivar is not None: var = [inverse(_ivar) for _ivar in ivar] diff --git a/pypeit/core/parse.py b/pypeit/core/parse.py index a29764cee7..f9cbef88e0 100644 --- a/pypeit/core/parse.py +++ b/pypeit/core/parse.py @@ -12,7 +12,7 @@ import numpy as np # Logging -from pypeit import msgs +from pypeit import log from pypeit import PypeItError def load_sections(string, fmt_iraf=True): @@ -155,7 +155,7 @@ def parse_binning(binning:str): elif 'x' in binning: binspectral, binspatial = [int(item) for item in binning.split('x')] # LRIS elif binning == 'None': - msgs.warning("Assuming unbinned, i.e. 1x1") + log.warning("Assuming unbinned, i.e. 1x1") binspectral, binspatial = 1,1 else: binspectral, binspatial = [int(item) for item in binning.strip().split(' ')] # Gemini diff --git a/pypeit/core/pca.py b/pypeit/core/pca.py index 9ee6dd4520..098ba50998 100644 --- a/pypeit/core/pca.py +++ b/pypeit/core/pca.py @@ -12,7 +12,7 @@ from sklearn.decomposition import PCA -from pypeit import msgs +from pypeit import log from pypeit import utils from pypeit.core import fitting @@ -98,7 +98,7 @@ def pca_decomposition(vectors, npca=None, pca_explained_var=99.0, mean=None): # Number of components for a full decomposition npca_tot = var_growth.size - msgs.info('The unconstrained PCA yields {0} components.'.format(npca_tot)) + log.info('The unconstrained PCA yields {0} components.'.format(npca_tot)) if npca is None: # Assign the number of components to use based on the variance # percentage @@ -112,7 +112,7 @@ def pca_decomposition(vectors, npca=None, pca_explained_var=99.0, mean=None): + ', which is less than the requested {0} component(s).'.format(npca) + ' Lower the number of requested PCA component(s) or turn off the PCA.') - msgs.info('PCA will include {0} component(s), '.format(npca) + log.info('PCA will include {0} component(s), '.format(npca) + 'containing {0:.3f}% of the total variance.'.format(var_growth[npca-1])) # Determine the PCA coefficients with the revised number of diff --git a/pypeit/core/pixels.py b/pypeit/core/pixels.py index 7b72bcafb1..8ae90ec395 100644 --- a/pypeit/core/pixels.py +++ b/pypeit/core/pixels.py @@ -8,7 +8,7 @@ import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import PypeItError @@ -103,11 +103,11 @@ def ximg_and_edgemask(lord_in, rord_in, slitpix, trim_edg=(3,3), xshift=0.): badp = xsize <= 0. if np.any(badp): meds = np.median(xsize) - msgs.warning('Something goofy in slit # {:d}'.format(islit)) - msgs.warning('Probably a bad slit (e.g. a star box)') - msgs.warning('It is best to expunge this slit') - msgs.warning('Proceed at your own risk, with a slit width of {}'.format(meds)) - msgs.warning('Or set meds to your liking') + log.warning('Something goofy in slit # {:d}'.format(islit)) + log.warning('Probably a bad slit (e.g. a star box)') + log.warning('It is best to expunge this slit') + log.warning('Proceed at your own risk, with a slit width of {}'.format(meds)) + log.warning('Or set meds to your liking') #rord[:, islit] = lord[:, islit] + meds # Loop down the slit diff --git a/pypeit/core/procimg.py b/pypeit/core/procimg.py index d48f7a2cdd..c7fd0a0ede 100644 --- a/pypeit/core/procimg.py +++ b/pypeit/core/procimg.py @@ -15,7 +15,7 @@ import scipy.optimize import scipy.signal -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import utils @@ -167,7 +167,7 @@ def lacosmic(sciframe, saturation=None, nonlinear=1., bpm=None, varframe=None, m if varframe is not None and varframe.shape != sciframe.shape: raise PypeItError('Variance frame must match shape of science frame.') - msgs.info("Detecting cosmic rays with the L.A.Cosmic algorithm") + log.info("Detecting cosmic rays with the L.A.Cosmic algorithm") # Setup # NOTE: We only need a copy of the image if we're performing more than one @@ -205,7 +205,7 @@ def lacosmic(sciframe, saturation=None, nonlinear=1., bpm=None, varframe=None, m for i in range(maxiter): if varframe is None: - msgs.info("Updating the noise model") + log.info("Updating the noise model") m5 = scipy.ndimage.median_filter(_sciframe, size=5, mode='mirror') noise = np.sqrt(np.absolute(m5)) # NOTE: Inverting the error avoids division by 0 errors @@ -215,7 +215,7 @@ def lacosmic(sciframe, saturation=None, nonlinear=1., bpm=None, varframe=None, m # get its S/N. NOTE: the division by 2 in the S/N calculation is from # the 2x2 subsampling. astropy.convolution.convolve gives the same # result as scipy.signal.convolve2d, but is nearly a factor of 2 faster. - msgs.info("Convolving image with Laplacian kernel") + log.info("Convolving image with Laplacian kernel") deriv = convolve(boxcar_replicate(_sciframe, 2), laplkernel, normalize_kernel=False, boundary='extend') s = utils.rebinND(np.clip(deriv, 0, None), _sciframe.shape) * _inv_err / 2.0 @@ -226,13 +226,13 @@ def lacosmic(sciframe, saturation=None, nonlinear=1., bpm=None, varframe=None, m # Candidate cosmic rays cosmics = sp > sigclip ncr = np.sum(cosmics) - msgs.info(f'Found {ncr} candidate cosmic-ray pixels') + log.info(f'Found {ncr} candidate cosmic-ray pixels') if _bpm is not None: # Remove known bad pixels cosmics &= np.logical_not(_bpm) ncr = np.sum(cosmics) - msgs.info(f'Reduced to {ncr} candidates after excluding known bad pixels.') + log.info(f'Reduced to {ncr} candidates after excluding known bad pixels.') if remove_compact_obj: # Build the fine structure image @@ -243,11 +243,11 @@ def lacosmic(sciframe, saturation=None, nonlinear=1., bpm=None, varframe=None, m # Require cosmics to have significant contrast cosmics &= sp/f > objlim ncr = np.sum(cosmics) - msgs.info(f'Reduced to {ncr} candidates after excluding compact objects.') + log.info(f'Reduced to {ncr} candidates after excluding compact objects.') # What follows is a special treatment for neighbors, with more relaxed # constraints. - msgs.info("Finding neighboring pixels affected by cosmic rays") + log.info("Finding neighboring pixels affected by cosmic rays") # We grow these cosmics a first time to determine the immediate # neighborhod, keeping those that also meet the S/N requirement cosmics = scipy.ndimage.binary_dilation(cosmics, structure=growkernel) @@ -257,18 +257,18 @@ def lacosmic(sciframe, saturation=None, nonlinear=1., bpm=None, varframe=None, m cosmics = scipy.ndimage.binary_dilation(cosmics, structure=growkernel) cosmics &= sp > sigcliplow ncr = np.sum(cosmics) - msgs.info(f'Changed to {ncr} candidates after evaluating neighboring pixels.') + log.info(f'Changed to {ncr} candidates after evaluating neighboring pixels.') if _bpm is not None: # Remove known bad pixels cosmics &= np.logical_not(_bpm) ncr = np.sum(cosmics) - msgs.info(f'Reduced to {ncr} candidates after excluding known bad pixels.') + log.info(f'Reduced to {ncr} candidates after excluding known bad pixels.') # Determine how many new cosmics were found nnew = np.sum(np.logical_not(crmask) & cosmics) crmask |= cosmics - msgs.info(f'Iteration {i+1}: {np.sum(crmask)} pixels identified as cosmic rays ' + log.info(f'Iteration {i+1}: {np.sum(crmask)} pixels identified as cosmic rays ' f'({nnew} are new)') if nnew == 0 or i == maxiter - 1: # TODO: Warn the user if the maximum number of iterations was @@ -276,7 +276,7 @@ def lacosmic(sciframe, saturation=None, nonlinear=1., bpm=None, varframe=None, m break # Prepare for the next iteration - msgs.info('Preparing for next iteration') + log.info('Preparing for next iteration') _sciframe = boxcar_fill(_sciframe, 5, bpm=crmask if _bpm is None else crmask | _bpm) if not rm_false_pos: @@ -284,7 +284,7 @@ def lacosmic(sciframe, saturation=None, nonlinear=1., bpm=None, varframe=None, m # Additional algorithms (not traditionally implemented by LA cosmic) to # remove some false positives. - #msgs.debug("The following algorithm would be better on the rectified, tilts-corrected image") + #log.debug("The following algorithm would be better on the rectified, tilts-corrected image") filt = scipy.ndimage.sobel(sciframe, axis=1, mode='constant') _inv_mad = utils.inverse(np.sqrt(np.abs(sciframe))) # Avoid divisions by 0 filty = scipy.ndimage.sobel(filt * _inv_mad, axis=0, mode='constant') @@ -297,7 +297,7 @@ def lacosmic(sciframe, saturation=None, nonlinear=1., bpm=None, varframe=None, m sigsmth[np.isnan(sigsmth)] = 0.0 crmask &= sigsmth > sigclip - msgs.info(f'{np.sum(crmask)} pixels identified as cosmic rays after removing false positives') + log.info(f'{np.sum(crmask)} pixels identified as cosmic rays after removing false positives') return grow_mask(crmask, grow) if grow > 0 else crmask @@ -459,7 +459,7 @@ def gain_frame(amp_img, gain): `numpy.ndarray`_: Image with the gain for each pixel. """ # TODO: Remove this or actually do it. - # msgs.warning("Should probably be measuring the gain across the amplifier boundary") + # log.warning("Should probably be measuring the gain across the amplifier boundary") # Build and return the gain image gain_img = np.zeros_like(amp_img, dtype=float) for i,_gain in enumerate(gain): @@ -785,7 +785,7 @@ def subtract_pattern(rawframe, datasec_img, oscansec_img, frequency=None, axis=1 Returns: `numpy.ndarray`_: The input frame with the pattern subtracted """ - msgs.info("Analyzing detector pattern") + log.info("Analyzing detector pattern") # Copy the data so that the subtraction is not done in place frame_orig = rawframe.copy() @@ -848,7 +848,7 @@ def subtract_pattern(rawframe, datasec_img, oscansec_img, frequency=None, axis=1 frq_mod = np.polyval(cc, all_rows) * (overscan.shape[1]-1) # Convert frequency to the size of the overscan region - msgs.info("Subtracting detector pattern from amplifier {0:d} with frequency = {1:f}".format(amp, use_fr)) + log.info("Subtracting detector pattern from amplifier {0:d} with frequency = {1:f}".format(amp, use_fr)) # Get a first guess of the amplitude and phase information xdata, step = np.linspace(0.0, 1.0, overscan.shape[1], retstep=True) @@ -860,7 +860,7 @@ def subtract_pattern(rawframe, datasec_img, oscansec_img, frequency=None, axis=1 # STEP 2 - Using the model frequency, calculate how amplitude depends on pixel row (usually constant) # Use the above to as initial guess parameters for a chi-squared minimisation of the amplitudes - msgs.info("Measuring amplitude-pixel dependence of amplifier {0:d}".format(amp)) + log.info("Measuring amplitude-pixel dependence of amplifier {0:d}".format(amp)) nspec = overscan.shape[0] model_pattern = np.zeros_like(oscandata) cosfunc = lambda xarr, *p: p[0] * np.cos(2.0 * np.pi * xarr + p[1]) @@ -883,10 +883,10 @@ def subtract_pattern(rawframe, datasec_img, oscansec_img, frequency=None, axis=1 bounds=([0, -np.inf],[np.inf, np.inf]) ) except ValueError: - msgs.warning("Input data invalid for pattern subtraction of row {0:d}/{1:d}".format(ii + 1, overscan.shape[0])) + log.warning("Input data invalid for pattern subtraction of row {0:d}/{1:d}".format(ii + 1, overscan.shape[0])) continue except RuntimeError: - msgs.warning("Pattern subtraction fit failed for row {0:d}/{1:d}".format(ii + 1, overscan.shape[0])) + log.warning("Pattern subtraction fit failed for row {0:d}/{1:d}".format(ii + 1, overscan.shape[0])) continue amps_fit[ii] = popt[0] # Construct a model of the amplitudes as a fucntion of spectral pixel @@ -895,7 +895,7 @@ def subtract_pattern(rawframe, datasec_img, oscansec_img, frequency=None, axis=1 # STEP 3 - Using the model frequency and amplitude, calculate the phase of every pixel row # Now determine the phase, given a prior on the amplitude and frequency - msgs.info("Calculating pattern phases of amplifier {0:d}".format(amp)) + log.info("Calculating pattern phases of amplifier {0:d}".format(amp)) cosfunc = lambda xarr, *p: np.cos(2.0 * np.pi * xarr + p[0]) cosfunc_full = lambda xarr, *p: p[0] * np.cos(2.0 * np.pi * p[1] * xarr + p[2]) for ii in range(nspec): @@ -913,10 +913,10 @@ def subtract_pattern(rawframe, datasec_img, oscansec_img, frequency=None, axis=1 bounds=([-np.inf], [np.inf]) ) except ValueError: - msgs.warning("Input data invalid for pattern subtraction of row {0:d}/{1:d}".format(ii + 1, overscan.shape[0])) + log.warning("Input data invalid for pattern subtraction of row {0:d}/{1:d}".format(ii + 1, overscan.shape[0])) continue except RuntimeError: - msgs.warning("Pattern subtraction fit failed for row {0:d}/{1:d}".format(ii + 1, overscan.shape[0])) + log.warning("Pattern subtraction fit failed for row {0:d}/{1:d}".format(ii + 1, overscan.shape[0])) continue # Calculate the model pattern, given the amplitude, frequency and phase information model_pattern[ii, :] = cosfunc_full(xdata_all, amp_mod[ii], frq_mod[ii], popt[0]) @@ -925,7 +925,7 @@ def subtract_pattern(rawframe, datasec_img, oscansec_img, frequency=None, axis=1 full_model[osd_slice] = model_pattern old_ron = astropy.stats.sigma_clipped_stats(overscan, sigma=5, stdfunc='mad_std')[-1] new_ron = astropy.stats.sigma_clipped_stats(overscan-full_model[os_slice], sigma=5, stdfunc='mad_std')[-1] - msgs.info(f'Effective read noise of amplifier {amp} reduced by a factor of {old_ron/new_ron:.2f}x') + log.info(f'Effective read noise of amplifier {amp} reduced by a factor of {old_ron/new_ron:.2f}x') # Transpose if the input frame if applied along a different axis if axis == 0: @@ -1432,7 +1432,7 @@ def nonlinear_counts(counts, ampimage, nonlinearity_coeffs): corr_counts : Array with the corrected counts. """ - msgs.info('Applying a non-linearity correction to the counts.') + log.info('Applying a non-linearity correction to the counts.') # Check the input if counts.shape != ampimage.shape: raise PypeItError('Counts and amplifier image have different shapes.') diff --git a/pypeit/core/pydl.py b/pypeit/core/pydl.py index 983924e7fe..f3ccf58c47 100644 --- a/pypeit/core/pydl.py +++ b/pypeit/core/pydl.py @@ -5,7 +5,7 @@ import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import utils from pypeit.core import basis @@ -686,7 +686,7 @@ def djs_reject(data, model, outmask=None, inmask=None, # ToDO It would be nice to come up with a way to use MAD but also use the errors in the rejection, i.e. compute the rejection threhsold using the mad. if upper is None and lower is None and maxdev is None: - msgs.warning('upper, lower, and maxdev are all set to None. No rejection performed since no rejection criteria were specified.') + log.warning('upper, lower, and maxdev are all set to None. No rejection performed since no rejection criteria were specified.') if (use_mad and (invvar is not None)): raise ValueError('use_mad can only be set to True innvar = None. This code only computes a mad' @@ -697,7 +697,7 @@ def djs_reject(data, model, outmask=None, inmask=None, # ToDo JFH: I think it would actually make more sense for outmask be a required input parameter (named lastmask or something like that). if outmask is None: outmask = np.ones(data.shape, dtype='bool') - msgs.warning('outmask was not specified as an input parameter. Cannot asess convergence of rejection -- qdone is automatically True') + log.warning('outmask was not specified as an input parameter. Cannot asess convergence of rejection -- qdone is automatically True') else: if data.shape != outmask.shape: raise ValueError('Dimensions of data and outmask do not agree.') @@ -1543,7 +1543,7 @@ def spheregroup(ra, dec, linklength, chunksize=None): if chunksize is not None: if chunksize < 4.0*linklength: chunksize = 4.0*linklength - msgs.warning("chunksize changed to {0:.2f}.".format(chunksize)) + log.warning("chunksize changed to {0:.2f}.".format(chunksize)) else: chunksize = max(4.0*linklength, 0.1) # diff --git a/pypeit/core/qa.py b/pypeit/core/qa.py index a7929f2cbb..505009aaee 100644 --- a/pypeit/core/qa.py +++ b/pypeit/core/qa.py @@ -10,9 +10,9 @@ from IPython import embed -# CANNOT INCLUDE msgs IN THIS MODULE AS -# THE HTML GENERATION OCCURS FROM msgs -#from pypeit import msgs +# CANNOT INCLUDE log IN THIS MODULE AS +# THE HTML GENERATION OCCURS FROM log +#from pypeit import log # TODO: Move these names to the appropriate class. This always writes # to QA directory, even if the user sets something else... diff --git a/pypeit/core/scattlight.py b/pypeit/core/scattlight.py index 5d027ba4d5..9ee9941283 100644 --- a/pypeit/core/scattlight.py +++ b/pypeit/core/scattlight.py @@ -9,7 +9,7 @@ from scipy import signal, interpolate, ndimage from IPython import embed -from pypeit import msgs, utils +from pypeit import log, utils from pypeit import PypeItError @@ -221,17 +221,17 @@ def scattered_light(frame, bpm, offslitmask, x0, bounds, detpad=300, debug=False wpix = np.where(offslitmask_pad) # Compute the best-fitting model parameters - msgs.info("Performing a least-squares fit to the scattered light") + log.info("Performing a least-squares fit to the scattered light") res_lsq = least_squares(scattlight_resid, x0, bounds=bounds, args=(wpix, _frame_pad), verbose=2, ftol=1.0E-4) # Store if this is a successful fit success = res_lsq.success if success: - msgs.info("Generating best-fitting scattered light model") + log.info("Generating best-fitting scattered light model") scatt_img = scattered_light_model(res_lsq.x, _frame_pad)[detpad:-detpad, detpad:-detpad] else: - msgs.warning("Scattered light model fitting failed") + log.warning("Scattered light model fitting failed") scatt_img = np.zeros_like(frame) if debug: @@ -283,7 +283,7 @@ def mask_slit_regions(offslitmask, centrace, mask_regions=None): """ # Check if there are regions to be masked if mask_regions is None: - msgs.warning("There are no inter-slit regions specified that need to be masked") + log.warning("There are no inter-slit regions specified that need to be masked") return offslitmask elif isinstance(mask_regions, int): # Convert this to a list @@ -344,7 +344,7 @@ def fine_correction(frame, bpm, offslitmask, method='median', polyord=2, debug=F """ if method not in ['median', 'poly']: raise PypeItError("Unrecognized method to determine the fine correction to the scattered light: {:s}".format(method)) - msgs.info("Performing a fine correction to the scattered light using the {:s} method".format(method)) + log.info("Performing a fine correction to the scattered light using the {:s} method".format(method)) nspec, nspat = frame.shape if method == 'median': # Use the median of the off-slit pixels to determine the scattered light diff --git a/pypeit/core/skysub.py b/pypeit/core/skysub.py index 556512b3a1..abfd8de861 100644 --- a/pypeit/core/skysub.py +++ b/pypeit/core/skysub.py @@ -16,7 +16,7 @@ from pypeit.core import basis, pixels, extract from pypeit.core import fitting from pypeit.core import procimg -from pypeit import msgs, utils, bspline, slittrace +from pypeit import log, utils, bspline, slittrace from pypeit import PypeItError from pypeit.display import display @@ -142,7 +142,7 @@ def global_skysub(image, ivar, tilts, thismask, slit_left, slit_righ, inmask=Non & np.isfinite(image) & np.isfinite(ivar) bad_pixel_frac = np.sum(thismask & np.logical_not(gpm))/np.sum(thismask) if bad_pixel_frac > max_mask_frac: - msgs.warning( + log.warning( f'This slit/order has {100.0*bad_pixel_frac:.3f}% of the pixels masked, which exceeds ' f'the threshold of {100.0*max_mask_frac:.3f}%.\nThere is likely a problem with this ' 'slit. Giving up on global sky-subtraction.' @@ -173,7 +173,7 @@ def global_skysub(image, ivar, tilts, thismask, slit_left, slit_righ, inmask=Non kwargs_bspline={'bkspace':bsp}, kwargs_reject={'groupbadpix': True, 'maxrej': 10}) if exit_status != 0: - msgs.warning( + log.warning( 'Global sky-subtraction did not exit cleanly for initial positive sky fit.\n' 'Initial masking based on positive sky fit will be skipped' ) @@ -192,7 +192,7 @@ def global_skysub(image, ivar, tilts, thismask, slit_left, slit_righ, inmask=Non poly_basis = basis.flegendre(2.0*ximg_fit - 1.0, npoly_fit) # Perform the full fit now - msgs.info("Full fit in global sky sub.") + log.info("Full fit in global sky sub.") skyset, outmask, yfit, _, exit_status = fitting.bspline_profile(pix, sky, sky_ivar, poly_basis, ingpm=inmask_fit, nord=4, upper=sigrej, lower=sigrej, maxiter=maxiter, kwargs_bspline={'bkspace':bsp}, @@ -203,7 +203,7 @@ def global_skysub(image, ivar, tilts, thismask, slit_left, slit_righ, inmask=Non # better understand what this functionality is doing, but it makes the rejection much more quickly approach a small # chi^2 if exit_status == 1: - msgs.warning( + log.warning( 'Maximum iterations reached in bspline_profile global sky-subtraction for ' f'npoly={npoly_fit}.\nRedoing sky-subtraction without polynomial degrees of freedom' ) @@ -315,7 +315,7 @@ def skyoptimal(piximg, data, ivar, oprof, sigrej=3.0, npoly=1, spatial_img=None, if nc != nx: raise ValueError('Object profile should have oprof.shape[0] equal to nx') - msgs.info('Iter Chi^2 Rejected Pts') + log.info('Iter Chi^2 Rejected Pts') xmin = 0.0 xmax = 1.0 @@ -345,7 +345,7 @@ def skyoptimal(piximg, data, ivar, oprof, sigrej=3.0, npoly=1, spatial_img=None, relative=relative, kwargs_reject={'groupbadpix': True, 'maxrej': 5}) else: - msgs.warning('All pixels are masked in skyoptimal. Not performing local sky subtraction.') + log.warning('All pixels are masked in skyoptimal. Not performing local sky subtraction.') return np.zeros_like(piximg), np.zeros_like(piximg), gpm chi2 = (data[good] - yfit1) ** 2 * ivar[good] @@ -355,8 +355,8 @@ def skyoptimal(piximg, data, ivar, oprof, sigrej=3.0, npoly=1, spatial_img=None, chi2_sigrej = chi2_srt[sigind] mask1 = (chi2 < chi2_sigrej) - msgs.info('2nd round....') - msgs.info('Iter Chi^2 Rejected Pts') + log.info('2nd round....') + log.info('Iter Chi^2 Rejected Pts') if np.any(mask1): sset, gpm_good, yfit, red_chi, exit_status \ = fitting.bspline_profile(piximg[good], data[good], ivar[good], profile_basis[good,:], @@ -364,7 +364,7 @@ def skyoptimal(piximg, data, ivar, oprof, sigrej=3.0, npoly=1, spatial_img=None, relative=relative, kwargs_reject={'groupbadpix': True, 'maxrej': 1}) else: - msgs.warning('All pixels are masked in skyoptimal after first round of rejection. Not performing local sky subtraction.') + log.warning('All pixels are masked in skyoptimal after first round of rejection. Not performing local sky subtraction.') return np.zeros_like(piximg), np.zeros_like(piximg), gpm ncoeff = npoly + nobj @@ -443,7 +443,7 @@ def optimal_bkpts(bkpts_optimal, bsp_min, piximg, sampmask, samp_frac=0.80, fullbkpt_grid = fullbkpt_grid[keep] used_grid = False if not bkpts_optimal: - msgs.info('bkpts_optimal = False --> using uniform bkpt spacing spacing: bsp={:5.3f}'.format(bsp_min)) + log.info('bkpts_optimal = False --> using uniform bkpt spacing spacing: bsp={:5.3f}'.format(bsp_min)) fullbkpt = fullbkpt_grid used_grid = True else: @@ -471,8 +471,8 @@ def optimal_bkpts(bkpts_optimal, bsp_min, piximg, sampmask, samp_frac=0.80, dsamp = scipy.ndimage.convolve(dsamp_med, kernel, mode='reflect') # if more than samp_frac of the pixels have dsamp < bsp_min than just use a uniform breakpoint spacing if np.sum(dsamp <= bsp_min) > samp_frac*nbkpt: - msgs.info('Sampling of wavelengths is nearly continuous.') - msgs.info('Using uniform bkpt spacing: bsp={:5.3f}'.format(bsp_min)) + log.info('Sampling of wavelengths is nearly continuous.') + log.info('Using uniform bkpt spacing: bsp={:5.3f}'.format(bsp_min)) fullbkpt = fullbkpt_grid used_grid = True else: @@ -848,7 +848,7 @@ def local_skysub_extract(sciimg, sciivar, tilts, waveimg, global_sky, thismask, obj_profiles = np.zeros((nspec, nspat, objwork), dtype=float) sigrej_eff = sigrej for iiter in range(1, niter + 1): - msgs.info('--------------------------REDUCING: Iteration # ' + '{:2d}'.format(iiter) + ' of ' + + log.info('--------------------------REDUCING: Iteration # ' + '{:2d}'.format(iiter) + ' of ' + '{:2d}'.format(niter) + '---------------------------------------------------') img_minsky = sciimg - skyimage for ii in range(objwork): @@ -856,10 +856,10 @@ def local_skysub_extract(sciimg, sciivar, tilts, waveimg, global_sky, thismask, if iiter == 1: # If this is the first iteration, print status message. Initiate profile fitting with a simple # boxcar extraction. - msgs.info("----------------------------------- PROFILE FITTING --------------------------------------------------------") - msgs.info("Fitting profile for obj # " + "{:}".format(sobjs[iobj].OBJID) + " of {:}".format(nobj)) - msgs.info("At x = {:5.2f}".format(sobjs[iobj].SPAT_PIXPOS) + " on slit # {:}".format(sobjs[iobj].slit_order)) - msgs.info("------------------------------------------------------------------------------------------------------------") + log.info("----------------------------------- PROFILE FITTING --------------------------------------------------------") + log.info("Fitting profile for obj # " + "{:}".format(sobjs[iobj].OBJID) + " of {:}".format(nobj)) + log.info("At x = {:5.2f}".format(sobjs[iobj].SPAT_PIXPOS) + " on slit # {:}".format(sobjs[iobj].slit_order)) + log.info("------------------------------------------------------------------------------------------------------------") # TODO -- Use extract_specobj_boxcar to avoid code duplication extract.extract_boxcar(sciimg-skyimage, modelivar, outmask, waveimg, skyimage, @@ -913,8 +913,8 @@ def local_skysub_extract(sciimg, sciivar, tilts, waveimg, global_sky, thismask, #sobjs[iobj].maskwidth = maskwidth if sobjs[iobj].prof_nsigma is None else \ # sobjs[iobj].prof_nsigma * (sobjs[iobj].FWHM / 2.3548) else: - msgs.warning("Bad extracted wavelengths in local_skysub_extract") - msgs.warning("Skipping this profile fit and continuing.....") + log.warning("Bad extracted wavelengths in local_skysub_extract") + log.warning("Skipping this profile fit and continuing.....") # Fit the local sky sky_bmodel = np.array(0.0) @@ -935,12 +935,12 @@ def local_skysub_extract(sciimg, sciivar, tilts, waveimg, global_sky, thismask, fullbkpt=fullbkpt, sigrej=sigrej_eff, npoly=npoly) iterbsp = iterbsp + 1 if (not sky_bmodel.any()) & (iterbsp <= 3): - msgs.warning('***************************************') - msgs.warning('WARNING: bspline sky-subtraction failed') - msgs.warning('Increasing bkpt spacing by 20%. Retry') - msgs.warning( + log.warning('***************************************') + log.warning('WARNING: bspline sky-subtraction failed') + log.warning('Increasing bkpt spacing by 20%. Retry') + log.warning( 'Old bsp = {:5.2f}'.format(bsp_now) + '; New bsp = {:5.2f}'.format(1.2 ** (iterbsp) * bsp)) - msgs.warning('***************************************') + log.warning('***************************************') if sky_bmodel.any(): skyimage.flat[isub] = sky_bmodel @@ -971,28 +971,28 @@ def local_skysub_extract(sciimg, sciivar, tilts, waveimg, global_sky, thismask, sigrej_eff = np.fmax(np.sqrt(chi2_sigrej), sigrej) # Maximum sigrej is sigrej_ceil (unless this is a standard) #sigrej_eff = np.fmin(sigrej_eff, sigrej_ceil) - msgs.info('Measured effective rejection from distribution of chi^2') - msgs.info('Instead of rejecting sigrej = {:5.2f}'.format(sigrej) + + log.info('Measured effective rejection from distribution of chi^2') + log.info('Instead of rejecting sigrej = {:5.2f}'.format(sigrej) + ', use threshold sigrej_eff = {:5.2f}'.format(sigrej_eff)) # Explicitly mask > sigrej outliers using the distribution of chi2 but only in the region that was actually fit. # This prevents e.g. excessive masking of slit edges outmask.flat[isub[igood1]] = outmask.flat[isub[igood1]] & (chi2[igood1] < chi2_sigrej) & ( sciivar.flat[isub[igood1]] > 0.0) nrej = outmask.flat[isub[igood1]].sum() - msgs.info( + log.info( 'Iteration = {:d}'.format(iiter) + ', rejected {:d}'.format(nrej) + ' of ' + '{:d}'.format( igood1.sum()) + ' fit pixels') elif no_local_sky: pass else: - msgs.warning('ERROR: Bspline sky subtraction failed after 4 iterations of bkpt spacing') - msgs.warning(' Moving on......') + log.warning('ERROR: Bspline sky subtraction failed after 4 iterations of bkpt spacing') + log.warning(' Moving on......') # obj_profiles = np.zeros_like(obj_profiles) isub, = np.where(localmask.flatten()) # Just replace with the global sky skyimage.flat[isub] = global_sky.flat[isub] if iiter == niter: - msgs.warning('WARNING: LOCAL SKY SUBTRACTION NOT PERFORMED') + log.warning('WARNING: LOCAL SKY SUBTRACTION NOT PERFORMED') outmask_extract = outmask.copy() if use_2dmodel_mask else inmask.copy() @@ -1004,7 +1004,7 @@ def local_skysub_extract(sciimg, sciivar, tilts, waveimg, global_sky, thismask, for ii in range(objwork): iobj = group[ii] - msgs.info('Extracting obj # {:d}'.format(iobj + 1) + ' of {:d}'.format(nobj) + + log.info('Extracting obj # {:d}'.format(iobj + 1) + ' of {:d}'.format(nobj) + ' with objid = {:d}'.format(sobjs[iobj].OBJID) + ' on slit # {:d}'.format(sobjs[iobj].slit_order) + ' at x = {:5.2f}'.format(sobjs[iobj].SPAT_PIXPOS)) this_profile = obj_profiles[:, :, ii] @@ -1361,14 +1361,14 @@ def ech_local_skysub_extract(sciimg, sciivar, fullmask, tilts, waveimg, str_out += f'{slitids[iord]:<8d}{order_vec[iord]:<8d}{order_snr[iord,ibright]:>10.2f}\n' dash = '-'*27 dash_big = '-'*40 - msgs.info( + log.info( f'\nReducing orders in order of S/N of brightest object:\n{dash}\n' f'{"slit":<8s}{"order":<8s}{"S/N":>10s}\n{dash}\n' + str_out ) # Loop over orders in order of S/N ratio (from highest to lowest) for the brightest object for iord in srt_order_snr: order = order_vec[iord] - msgs.info("Local sky subtraction and extraction for slit/order: {:d}/{:d}".format(iord,order)) + log.info("Local sky subtraction and extraction for slit/order: {:d}/{:d}".format(iord,order)) other_orders = (fwhm_here > 0) & np.invert(fwhm_was_fit) other_fit = (fwhm_here > 0) & fwhm_was_fit # Loop over objects in order of S/N ratio (from highest to lowest) @@ -1407,7 +1407,7 @@ def ech_local_skysub_extract(sciimg, sciivar, fullmask, tilts, waveimg, order_snr[other_orders,ibright], fwhm_here[other_orders]): str_out += f'{slit_now:<8d}{order_now:<8d}{snr_now:>10.2f}{fwhm_now:>10.2f}' - msgs.info( + log.info( f'\nUsing {fwhm_str} for FWHM of object={uni_objid[iobj]} on slit/order: ' f'{iord}/{order}\n{dash_big}\n' f'{"slit":<8s}{"order":<8s}{"SNR":>10s}{"FWHM":>10s}\n{dash_big}\n' @@ -1518,7 +1518,7 @@ def convolve_skymodel(input_img, fwhm_map, thismask, subpixel=5, nsample=10): if kk == 0: # The first element is the original image continue - msgs.info(f"Image spectral convolution - Evaluating grid point {kk}/{nsample - 1}") + log.info(f"Image spectral convolution - Evaluating grid point {kk}/{nsample - 1}") # Generate a kernel and normalise kernsize = 2 * int(5 * kernwids[kk] + 0.5) + 1 # Use a Gaussian kernel, covering +/-5sigma midp = (kernsize - 1) // 2 @@ -1528,9 +1528,9 @@ def convolve_skymodel(input_img, fwhm_map, thismask, subpixel=5, nsample=10): conv_allkern[:, :, kk] = utils.rebinND(utils.convolve_fft(_input_img, kern, _input_msk), input_img.shape) # Collect all of the images - msgs.info(f"Collating all convolution steps") + log.info(f"Collating all convolution steps") conv_interp = RegularGridInterpolator((np.arange(conv_allkern.shape[0]), np.arange(nspat), kernwids), conv_allkern) - msgs.info(f"Applying the convolution solution") + log.info(f"Applying the convolution solution") eval_spec, eval_spat = np.where(thismask) sciimg_conv = np.copy(input_img) sciimg_conv[thismask] = conv_interp((eval_spec, eval_spat, sig_exc)) diff --git a/pypeit/core/slitdesign_matching.py b/pypeit/core/slitdesign_matching.py index 084fd137d3..8bc43749f2 100644 --- a/pypeit/core/slitdesign_matching.py +++ b/pypeit/core/slitdesign_matching.py @@ -20,7 +20,7 @@ from astropy.stats import sigma_clipped_stats from pypeit.core import fitting -from pypeit import msgs +from pypeit import log def best_offset(x_det, x_model, step=1, xlag_range=None): @@ -56,8 +56,8 @@ def best_offset(x_det, x_model, step=1, xlag_range=None): # we keep only the x_model values that are in the current detector wkeep =(x_model > min_x_det+xlag_range[0]) & (x_model < max_x_det+xlag_range[1]) if x_model[wkeep].size<2: - msgs.warning('Working between {} and {}'.format(min_x_det+xlag_range[0], max_x_det+xlag_range[1])) - msgs.warning('Not enough lines to run!!!') + log.warning('Working between {} and {}'.format(min_x_det+xlag_range[0], max_x_det+xlag_range[1])) + log.warning('Not enough lines to run!!!') sdev = 1e10 return 0. x_model_trim = x_model[wkeep] @@ -256,9 +256,9 @@ def slit_match(x_det, x_model, step=1, xlag_range=[-50,50], sigrej=3, print_matc # Both duplicates and matches with high RMS are considered bad dupl = dupl | out if edge is not None: - msgs.warning('{} duplicate match(es) for {} edges'.format(dupl[dupl == 1].size, edge)) + log.warning('{} duplicate match(es) for {} edges'.format(dupl[dupl == 1].size, edge)) else: - msgs.warning('{} duplicate match(es)'.format(dupl[dupl == 1].size)) + log.warning('{} duplicate match(es)'.format(dupl[dupl == 1].size)) # I commented the 3 lines below because I don't really need to trim the duplicate matches. I just # propagate the flag. # good = dupl == 0 @@ -266,14 +266,14 @@ def slit_match(x_det, x_model, step=1, xlag_range=[-50,50], sigrej=3, print_matc # x_det=x_det[good] if print_matches: if edge is not None: - msgs.info('-----------------------------------------------') - msgs.info(' {} slit edges '.format(edge)) - msgs.info('-----------------------------------------------') - msgs.info('Index omodel_edge spat_edge ') - msgs.info('-----------------------------------------------') + log.info('-----------------------------------------------') + log.info(' {} slit edges '.format(edge)) + log.info('-----------------------------------------------') + log.info('Index omodel_edge spat_edge ') + log.info('-----------------------------------------------') for i in range(ind.size): - msgs.info('{} {} {}'.format(ind[i], x_model[ind][i], x_det[i])) - msgs.info('-----------------------------------------------') + log.info('{} {} {}'.format(ind[i], x_model[ind][i], x_det[i])) + log.info('-----------------------------------------------') return ind, dupl, coeff, sigres diff --git a/pypeit/core/spectrum.py b/pypeit/core/spectrum.py index 75b073cb4e..16b2d0f018 100644 --- a/pypeit/core/spectrum.py +++ b/pypeit/core/spectrum.py @@ -10,7 +10,7 @@ from IPython import embed import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import sampling from pypeit import utils @@ -50,23 +50,23 @@ def __init__(self, wave, flux, ivar=None, gpm=None, meta=None): self.wave = np.asarray(wave, dtype=float).copy() if self.wave.ndim != 1: - msgs.error('wavelength array must always be 1D in the spectrum object') + log.error('wavelength array must always be 1D in the spectrum object') if self.wave.size != self.flux.shape[0]: - msgs.error('wavelength vector must match length of flux array') + log.error('wavelength vector must match length of flux array') if ivar is None: self.ivar = None else: self.ivar = np.asarray(ivar, dtype=float).copy() if self.ivar.shape != self.flux.shape: - msgs.error('Wavelength and inverse variance arrays do not have the same shape.') + log.error('Wavelength and inverse variance arrays do not have the same shape.') if gpm is None: self.gpm = np.ones(self.flux.shape, dtype=bool) else: self.gpm = np.asarray(gpm, dtype=bool).copy() if self.gpm.shape != self.flux.shape: - msgs.error('Wavelength and good-pixel arrays do not have the same size.') + log.error('Wavelength and good-pixel arrays do not have the same size.') self.meta = meta if meta is None else deepcopy(meta) @@ -119,7 +119,7 @@ def multiply(self, a): # Multiply by a scalar if isinstance(a, (int, np.integer, float, np.floating)): if float(a) == 0.: - msgs.warn('Multiplicative factor is 0!') + log.warn('Multiplicative factor is 0!') self.flux *= a if self.ivar is not None: if np.absolute(a) > 0: @@ -138,7 +138,7 @@ def multiply(self, a): # Check the wavelength vectors # TODO: Loosen this; i.e., use isclose instead of array_equal? if not np.array_equal(a.wave, self.wave): - msgs.error('To multiply two spectra, their wavelength vectors must be identical.') + log.error('To multiply two spectra, their wavelength vectors must be identical.') a_flux = a.flux a_gpm = a.gpm if a.ivar is not None: @@ -150,7 +150,7 @@ def multiply(self, a): # Check the input if a_flux.ndim > self.ndim: - msgs.error( + log.error( 'Multiplication does not allow the dimensionality of the spectrum to change. ' f'The dimensionality of this spectrum is {self.ndim} and the multiplier is ' f'{a_flux.ndim}.' @@ -159,7 +159,7 @@ def multiply(self, a): # below should work, as long as the last a.ndim dimensions of a and this # spectrum match. if a_flux.shape != self.shape[:a_flux.ndim]: - msgs.error( + log.error( 'Numpy will not be able to successfully broadcast arithmetic operations between ' f'this spectrum, shape={self.shape}, and the multiplier, shape={a_flux.shape}.' ) diff --git a/pypeit/core/standard.py b/pypeit/core/standard.py index 588fc58e7e..f88295c01e 100644 --- a/pypeit/core/standard.py +++ b/pypeit/core/standard.py @@ -14,7 +14,7 @@ from IPython import embed import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import dataPaths from pypeit import PypeItError from pypeit.core import spectrum @@ -70,12 +70,12 @@ def archive_entry(archive, name): # Get the file star_file = stds_path.get_file_path(f'{archive}_info.txt') if not star_file.is_file(): - msgs.error(f'File does not exist!: {star_file}') + log.error(f'File does not exist!: {star_file}') star_tbl = table.Table.read(star_file, comment='#', format='ascii') idx = np.where(star_tbl['Name'] == name)[0] if len(idx) != 1: - msgs.error(f'{name} is not a named source in {star_file}.') + log.error(f'{name} is not a named source in {star_file}.') return star_tbl[idx[0]] @@ -112,14 +112,14 @@ def nearest_archive_entry(archive, ra, dec, unit=None): _unit = unit obj_coord = coordinates.SkyCoord([ra], [dec], unit=_unit) if obj_coord.size > 1: - msgs.error('Matching to archive can only be done one object at a time.') + log.error('Matching to archive can only be done one object at a time.') # Set the path (creates a new PypeItDataPath object) stds_path = dataPaths.standards / archive # Get the file star_file = stds_path.get_file_path(f"{archive}_info.txt") if not star_file.is_file(): - msgs.error(f"File does not exist!: {star_file}") + log.error(f"File does not exist!: {star_file}") star_tbl = table.Table.read(star_file, comment='#', format='ascii') star_coords = coordinates.SkyCoord(star_tbl['RA_2000'], star_tbl['DEC_2000'], @@ -218,7 +218,7 @@ def from_coordinates(cls, ra, dec, tol=20., unit=None): """ sep, row = nearest_archive_entry(cls.archive, ra, dec, unit=unit) if sep > tol * units.arcmin: - msgs.error(f'Closest object ({row["Name"]}) is separated by {sep.to("arcmin").value} ' + log.error(f'Closest object ({row["Name"]}) is separated by {sep.to("arcmin").value} ' f'arcmin, which is beyond the required tolerance ({tol} arcmin).') return cls(row['File'], meta=cls._init_meta(row=row)) @@ -512,7 +512,7 @@ def from_coordinates(cls, ra, dec, tol=20., unit=None, wave=None): """ sep, row = nearest_archive_entry(cls.model_type, ra, dec, unit=unit) if sep > tol * units.arcmin: - msgs.error(f'Closest object ({row["Name"]}) is separated by {sep.to("arcmin").value} ' + log.error(f'Closest object ({row["Name"]}) is separated by {sep.to("arcmin").value} ' f'arcmin, which is beyond the required tolerance ({tol} arcmin).') return cls(row['a_x10m23'], row['T_K'], wave=wave, meta=cls._init_meta(row=row)) @@ -578,7 +578,7 @@ def __init__(self, V_mag, spectral_type): # interpolate across types. indx = np.where(spectral_type == sk82_tab['Sp'])[0] if len(indx) != 1: - msgs.error( + log.error( f'Provided spectral type {spectral_type} not available in Schmidt-Kaler (1982) ' 'table. See the KuruczModelStandard API.' ) @@ -729,10 +729,10 @@ def get_archive_sets(archives=['xshooter', 'calspec', 'esofil', 'noao', 'ing']): good = np.ones(len(_archives), dtype=bool) for i, s in enumerate(_archives): if s not in archive_classes.keys(): - msgs.warn(f'{s} is not a recognized archive of standard spectra. Ignoring.') + log.warn(f'{s} is not a recognized archive of standard spectra. Ignoring.') good[i] = False if not any(good): - msgs.error('None of the provided standard spectra archives are valid. Try using ' + log.error('None of the provided standard spectra archives are valid. Try using ' 'the default list.') return _archives[good] @@ -825,7 +825,7 @@ def get_archive_standard(ra, dec, tol=20., unit=None, archives='default', check= res = np.asarray(list([nearest_archive_entry(key, ra, dec, unit=unit) for key in _archives])) indx = np.argmin(res[:,0]) sep, row = res[indx] - msgs.error(f'Unable to find a standard star within {tol:.1f} arcmin of RA={ra}, DEC={dec} in ' + log.error(f'Unable to find a standard star within {tol:.1f} arcmin of RA={ra}, DEC={dec} in ' f'the following archives: {_archives}. The nearest object is {row["Name"]} in ' f'{_archives[indx]} at RA={row["RA_2000"]}, DEC={row["DEC_2000"]}, separated by ' f'{sep.to("arcmin").value:.1f} arcmin.') @@ -914,7 +914,7 @@ def get_standard_spectrum(spectral_type=None, V_mag=None, ra=None, dec=None, tol if spectral_type is not None and V_mag is not None: return get_model_standard(spectral_type, V_mag) if ra is None or dec is None: - msgs.error('Insufficient data provided to determine the appropriate standard spectrum. ' + log.error('Insufficient data provided to determine the appropriate standard spectrum. ' 'Provide either the coordinates of the standard or a stellar type and ' 'magnitude.') return get_archive_standard(ra, dec, tol=tol, unit=unit, archives=archives) diff --git a/pypeit/core/telluric.py b/pypeit/core/telluric.py index 744b14f0cc..08572c8db6 100644 --- a/pypeit/core/telluric.py +++ b/pypeit/core/telluric.py @@ -16,7 +16,7 @@ from astropy import table from astropy.io import fits -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import dataPaths from pypeit import io @@ -337,7 +337,7 @@ def conv_telluric(tell_model, dloglam, res): pix_per_sigma = 1.0/res/(dloglam*np.log(10.0))/(2.0 * np.sqrt(2.0 * np.log(2))) # number of dloglam pixels per 1 sigma dispersion sig2pix = 1.0/pix_per_sigma # number of sigma per 1 pix if sig2pix > 2.0: - msgs.warning('The telluric model grid is not sampled finely enough to properly convolve to the desired resolution. ' + log.warning('The telluric model grid is not sampled finely enough to properly convolve to the desired resolution. ' 'Skipping resolution convolution for now. Create a higher resolution telluric model grid') return tell_model @@ -812,7 +812,7 @@ def general_spec_reader(specfile, ret_flam=False, chk_version=False, ret_order_s meta_spec['core'] = spect_dict # ASC: Reimplement the ability to return the OrderStack components at some point. #if ret_order_stacks: - # msgs.info('Returning order stacks') + # log.info('Returning order stacks') # return wave_stack, None, counts_stack, counts_ivar_stack, counts_gpm_stack, meta_spec, head return wave, wave_grid_mid, counts, counts_ivar, counts_gpm, meta_spec, head @@ -906,7 +906,7 @@ def init_sensfunc_model(obj_params, iord, wave, counts_per_ang, ivar, gpm, tellm flam_true_gpm = (wave >= np.min(obj_params['std_spec'].wave)) \ & (wave <= np.max(obj_params['std_spec'].wave)) if np.any(np.logical_not(flam_true_gpm)): - msgs.warning('Your data extends beyond the range covered by the standard star spectrum. ' + log.warning('Your data extends beyond the range covered by the standard star spectrum. ' 'Proceeding by masking these regions, but consider using another standard star') N_lam = counts_per_ang/obj_params['exptime'] zeropoint_data, zeropoint_data_gpm \ @@ -2441,12 +2441,12 @@ def __init__(self, wave, flux, ivar, gpm, telgridfile, obj_params, init_obj_mode # 3) Read the telluric grid and initalize associated parameters wv_gpm = self.wave_in_arr > 1.0 if self.teltype == 'pca': - msgs.info(f'Reading in the pca-based telluric model: {self.telgrid}') + log.info(f'Reading in the pca-based telluric model: {self.telgrid}') self.tell_dict = read_telluric_pca(self.telgrid, wave_min=self.wave_in_arr[wv_gpm].min(), wave_max=self.wave_in_arr[wv_gpm].max()) elif self.teltype == 'grid': self.tell_npca = 4 - msgs.info(f'Reading in the grid-based telluric model: {self.telgrid}') + log.info(f'Reading in the grid-based telluric model: {self.telgrid}') self.tell_dict = read_telluric_grid(self.telgrid, wave_min=self.wave_in_arr[wv_gpm].min(), wave_max=self.wave_in_arr[wv_gpm].max()) @@ -2498,7 +2498,7 @@ def __init__(self, wave, flux, ivar, gpm, telgridfile, obj_params, init_obj_mode for counter, iord in enumerate(self.srt_order_tell): _ord = self.ech_orders[iord] if self.ech_orders is not None and \ len(self.ech_orders) == self.norders else iord - msgs.info(f'Initializing object model for order: {_ord}, {counter+1}/{self.norders}' + log.info(f'Initializing object model for order: {_ord}, {counter+1}/{self.norders}' + f' with user supplied function: {self.init_obj_model.__name__}') tellmodel = eval_telluric(self.tell_guess, self.tell_dict, ind_lower=self.ind_lower[iord], @@ -2556,14 +2556,14 @@ def run(self, only_orders=None): if self.ech_orders is not None and len(self.ech_orders) == self.norders: indx_only = np.where(np.isin(self.ech_orders, only_orders))[0] if (indx_only.size == 0) and (only_orders is not None): - msgs.warning(f'All the orders provided in `only_orders` are not among the expected orders. ' + log.warning(f'All the orders provided in `only_orders` are not among the expected orders. ' f'Using all orders available in the data.') elif indx_only.size > 0: good_orders = indx_only - msgs.info(f'Working only on the following orders: {self.ech_orders[indx_only]}') + log.info(f'Working only on the following orders: {self.ech_orders[indx_only]}') if len(indx_only) != len(only_orders): missing_orders = list(set(only_orders) - set(self.ech_orders[indx_only])) - msgs.warning(f'Some orders provided in `only_orders` are not among the expected orders. ' + log.warning(f'Some orders provided in `only_orders` are not among the expected orders. ' f'Ignoring orders: {missing_orders}') # Run the fits @@ -2578,7 +2578,7 @@ def run(self, only_orders=None): continue _ord = self.ech_orders[iord] if self.ech_orders is not None and \ len(self.ech_orders) == self.norders else iord - msgs.info(f'Fitting object + telluric model for order: {_ord}, {counter+1}/{self.norders}' + log.info(f'Fitting object + telluric model for order: {_ord}, {counter+1}/{self.norders}' + f' with user supplied function: {self.init_obj_model.__name__}') self.result_list[iord], ymodel, ivartot, self.outmask_list[iord] \ = fitting.robust_optimize(self.flux_arr[self.ind_lower[iord]:self.ind_upper[iord]+1,iord], diff --git a/pypeit/core/trace.py b/pypeit/core/trace.py index 4b8dc5ba30..ecefc5be25 100644 --- a/pypeit/core/trace.py +++ b/pypeit/core/trace.py @@ -21,7 +21,7 @@ from astropy.stats import sigma_clipped_stats, sigma_clip -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import utils from pypeit import sampling @@ -87,7 +87,7 @@ def detect_slit_edges(flux, bpm=None, median_iterations=0, min_sqm=30., sobel_mo # Specify how many times to repeat the median filter. Even better # would be to fit the filt/sqrt(abs(binarr)) array with a Gaussian # near the maximum in each column - msgs.info("Detecting slit edges in the trace image") + log.info("Detecting slit edges in the trace image") # Generate sqrt image sqmstrace = np.sqrt(np.abs(flux)) @@ -130,7 +130,7 @@ def detect_slit_edges(flux, bpm=None, median_iterations=0, min_sqm=30., sobel_mo edge_img[wcr] = 1 if bpm is not None: - msgs.info("Applying bad pixel mask") + log.info("Applying bad pixel mask") # JFH grow the bad pixel mask in the spatial direction _nave = np.fmin(grow_bpm, flux.shape[0]) # Construct the kernel for mean calculation @@ -171,7 +171,7 @@ def identify_traces(edge_img, max_spatial_separation=4, follow_span=10, minimum_ :func:`count_edge_traces`. Pixels not associated to any edge have a value of 0. """ - msgs.info('Finding unique traces among detected edges.') + log.info('Finding unique traces among detected edges.') # Check the input if edge_img.ndim > 2: raise PypeItError('Provided edge image must be 2D.') @@ -180,7 +180,7 @@ def identify_traces(edge_img, max_spatial_separation=4, follow_span=10, minimum_ # No edges were detected. if np.all(edge_img == 0): - msgs.warning('No edges were found!') + log.warning('No edges were found!') return np.zeros_like(edge_img, dtype=int) # Find the left and right coordinates @@ -339,13 +339,13 @@ def atleast_one_edge(edge_img, bpm=None, flux_valid=True, buffer=0, copy=False): if nleft == 0: # Add a left edge trace at the first valid column - msgs.warning('No left edge found. Adding one at the detector edge.') + log.warning('No left edge found. Adding one at the detector edge.') gdi0 = np.min(np.where(sum_bpm[buffer:] == 0)[0]) + buffer _edge_img[:,gdi0] = -1 if nright == 0: # Add a right edge trace at the last valid column - msgs.warning('No right edge found. Adding one at the detector edge.') + log.warning('No right edge found. Adding one at the detector edge.') gdi1 = np.max(np.where(sum_bpm[:-buffer] == 0)[0]) _edge_img[:,gdi1] = 1 @@ -416,8 +416,8 @@ def handle_orphan_edges(edge_img, sobel_sig, bpm=None, flux_valid=True, buffer=0 if nright > 1: # To get here, nleft must be 1. This is mainly in here for # LRISb, which is a real pain.. - msgs.warning('Only one left edge, and multiple right edges.') - msgs.info('Restricting right edge detection to the most significantly detected edge.') + log.warning('Only one left edge, and multiple right edges.') + log.info('Restricting right edge detection to the most significantly detected edge.') # Find the most significant right trace best_trace = np.argmin([-np.median(sobel_sig[_edge_img==t]) for t in range(nright)])+1 # Remove the other right traces @@ -428,8 +428,8 @@ def handle_orphan_edges(edge_img, sobel_sig, bpm=None, flux_valid=True, buffer=0 return _edge_img # To get here, nright must be 1. - msgs.warning('Only one right edge, and multiple left edges.') - msgs.info('Restricting left edge detection to the most significantly detected edge.') + log.warning('Only one right edge, and multiple left edges.') + log.info('Restricting left edge detection to the most significantly detected edge.') # Find the most significant left trace best_trace = np.argmax([np.median(sobel_sig[_edge_img == -t]) for t in range(nleft)])+1 # Remove the other left traces @@ -1423,7 +1423,7 @@ def peak_trace(flux, ivar=None, bpm=None, trace_map=None, extract_width=None, sm # Check there is a trace for each image pixel if trace_map.shape != flux.shape: raise ValueError('Provided trace data must match the image shape.') - msgs.info('Rectifying image by extracting along trace for each spatial pixel') + log.info('Rectifying image by extracting along trace for each spatial pixel') # TODO: JFH What should this aperture size be? I think fwhm=3.0 # since that is the width of the sobel filter flux_extract = sampling.rectify_image(flux, trace_map, bpm=bpm, extract_width=fwhm_gaussian @@ -1431,7 +1431,7 @@ def peak_trace(flux, ivar=None, bpm=None, trace_map=None, extract_width=None, sm # Collapse the image along the spectral direction to isolate peaks/troughs start, end = np.clip(np.asarray(smash_range)*nspec, 0, nspec).astype(int) - msgs.info('Collapsing image spectrally between pixels {0}:{1}'.format(start, end)) + log.info('Collapsing image spectrally between pixels {0}:{1}'.format(start, end)) flux_smash_mean, flux_smash_median, flux_smash_sig \ = sigma_clipped_stats(flux_extract[start:end,:], axis=0, sigma=4.0) @@ -1460,16 +1460,16 @@ def peak_trace(flux, ivar=None, bpm=None, trace_map=None, extract_width=None, sm for i,(l,s) in enumerate(zip(label,sign)): # Identify the peaks - msgs.info('Searching for peaks.') + log.info('Searching for peaks.') peak, _, _cen, _, _, best, _, _ \ = arc.detect_lines(s*flux_smash_mean, cont_subtract=False, fwhm=fwhm_gaussian, input_thresh=peak_thresh, max_frac_fwhm=4.0, min_pkdist_frac_fwhm=min_pkdist_frac_fwhm, debug=show_peaks) if len(_cen) == 0 or not np.any(best): - msgs.warning('No good {0}s found!'.format(l)) + log.warning('No good {0}s found!'.format(l)) continue - msgs.info('Found {0} good {1}(s) in the rectified, collapsed image'.format( + log.info('Found {0} good {1}(s) in the rectified, collapsed image'.format( len(_cen[best]),l)) # Set the reference spatial locations to use for tracing the @@ -1485,7 +1485,7 @@ def peak_trace(flux, ivar=None, bpm=None, trace_map=None, extract_width=None, sm clipped_peak = sigma_clip(peak[best], sigma_lower=peak_clip, sigma_higher=np.inf) peak_mask = np.ma.getmaskarray(clipped_peak) if np.any(peak_mask): - msgs.warning('Clipping {0} detected peak(s) with aberrant amplitude(s).'.format( + log.warning('Clipping {0} detected peak(s) with aberrant amplitude(s).'.format( np.sum(peak_mask))) loc = loc[np.invert(peak_mask)] _cen = _cen[np.invert(peak_mask)] diff --git a/pypeit/core/tracewave.py b/pypeit/core/tracewave.py index c7b8d453ae..f4c5f0a456 100644 --- a/pypeit/core/tracewave.py +++ b/pypeit/core/tracewave.py @@ -14,7 +14,7 @@ from astropy.stats import sigma_clipped_stats -from pypeit import msgs +from pypeit import log from pypeit import utils from pypeit import tracepca @@ -79,7 +79,7 @@ def tilts_find_lines(arc_spec, slit_cen, tracethresh=10.0, sig_neigh=5.0, nfwhm_ aduse = np.zeros(arcdet.size, dtype=bool) # Which lines should be used to trace the tilts aduse[nsig >= tracethresh] = True - msgs.info('Rejecting {0} lines below sigma threshold.'.format(arcdet.size - np.sum(aduse))) + log.info('Rejecting {0} lines below sigma threshold.'.format(arcdet.size - np.sum(aduse))) # arc.find_lines_qa(arc_cont_sub, arcdet, arc_ampl, aduse, bpm=bpm, # nonlinear=nonlinear_counts) @@ -100,7 +100,7 @@ def tilts_find_lines(arc_spec, slit_cen, tracethresh=10.0, sig_neigh=5.0, nfwhm_ aduse[idxuse[s]] = False break - msgs.info('Removed {0} lines that were too close to neighboring lines.'.format( + log.info('Removed {0} lines that were too close to neighboring lines.'.format( np.sum(olduse) - np.sum(aduse))) # Restricted to ID lines? [introduced to avoid LRIS ghosts] @@ -110,19 +110,19 @@ def tilts_find_lines(arc_spec, slit_cen, tracethresh=10.0, sig_neigh=5.0, nfwhm_ idxuse = np.arange(arcdet.size)[aduse] for s in idxuse: if np.min(np.abs(arcdet[s] - ids_pix)) > 2.0: - msgs.info('Ignoring line at {:6.1f} which was not identified'.format(arcdet[s])) + log.info('Ignoring line at {:6.1f} which was not identified'.format(arcdet[s])) aduse[s] = False # Final spectral positions of arc lines we will trace lines_spec = arcdet[aduse] nlines = len(lines_spec) if nlines == 0: - msgs.warning('No arc lines were deemed usable on this slit; line tilts cannot be computed.' + log.warning('No arc lines were deemed usable on this slit; line tilts cannot be computed.' ' This may be a bad slit, which you can remove. Otherwise, try lowering ' 'the tracethresh parameter.') return None, None, None else: - msgs.info('Modeling arc line tilts with {:d} arc lines'.format(nlines)) + log.info('Modeling arc line tilts with {:d} arc lines'.format(nlines)) nspec = arc_spec.size spec_vec = np.arange(nspec) @@ -479,7 +479,7 @@ def trace_tilts_work(arcimg, lines_spec, lines_spat, thismask, slit_cen, inmask= use_tilt = (mad_rej) & (bad_frac < max_badpix_frac) & good_line & (dev_mad < maxdev) nuse = np.sum(use_tilt) - msgs.info('Number of usable arc lines for tilts: {:d}/{:d}'.format(nuse, nlines)) + log.info('Number of usable arc lines for tilts: {:d}/{:d}'.format(nuse, nlines)) tilts_mad = np.outer(np.ones(nspat), dev_mad) @@ -489,7 +489,7 @@ def trace_tilts_work(arcimg, lines_spec, lines_spat, thismask, slit_cen, inmask= # cause a full fault of the code, we need to make sure the user # sees these kinds of critical failures instead of them getting # buried in all the other messages. - msgs.warning( + log.warning( f'Too many lines rejected in this slit/order.\nWould reject {nlines - nuse}/{nlines} ' 'lines (more than 95%).\nProceeding without rejection, but reduction likely bogus.' ) @@ -618,7 +618,7 @@ def trace_tilts(arcimg, lines_spec, lines_spat, thismask, slit_cen, inmask=None, debug_pca_fit = False if debug_pca_fit: # !!!! FOR TESTING ONLY!!!! Evaluate the model fit to the tilts for all of our lines - msgs.info('TESTING: Performing an initial fit before PCA.') + log.info('TESTING: Performing an initial fit before PCA.') # JFH Note spec_order is hard wired here as we don't pass it in tilt_fit_dict0 = fit_tilts(trace_dict0, spat_order=spat_order, spec_order=6, debug=True, maxdev=0.2, sigrej=3.0, doqa=True, setup='test', slit=0, @@ -630,12 +630,12 @@ def trace_tilts(arcimg, lines_spec, lines_spat, thismask, slit_cen, inmask=None, if nuse < 2: # DP: Added this because sometime there are < 2 usable arc lines for tilt tracing, PCA fit does not work # and the reduction crushes - msgs.warning('Less than 2 usable arc lines for tilts. NO PCA modeling!') + log.warning('Less than 2 usable arc lines for tilts. NO PCA modeling!') return trace_dict0 else: bpm = np.ones(trace_dict0['tilts_sub_fit'].shape, dtype=bool) bpm[:, iuse] = False - msgs.info('PCA modeling {:d} good tilts'.format(nuse)) + log.info('PCA modeling {:d} good tilts'.format(nuse)) pca_fit = tracepca.pca_trace_object(trace_dict0['tilts_sub_fit'], order=coeff_npoly_pca, trace_bpm=bpm, npca=npca, coo=lines_spec, minx=0.0, maxx=float(trace_dict0['nsub'] - 1), lower=sigrej_pca, @@ -703,7 +703,7 @@ def fit_tilts(trc_tilt_dict, thismask, slit_cen, spat_order=3, spec_order=4, max fitxy = [spec_order, spat_order] # Fit the inverted model with a 2D polynomial - msgs.info("Fitting tilts with a low order, 2D {:s}".format(func2d)) + log.info("Fitting tilts with a low order, 2D {:s}".format(func2d)) # TODO: Make adderr a parameter? Where does this come from? adderr = 0.03 @@ -740,11 +740,11 @@ def fit_tilts(trc_tilt_dict, thismask, slit_cen, spat_order=3, spec_order=4, max # Report the residuals in pixels res_fit = tilts[fitmask] - tilts_2dfit[fitmask] rms_fit = np.std(res_fit) - msgs.info("Residuals: 2D Legendre Fit") - msgs.info("RMS (pixels): {}".format(rms_fit)) - msgs.info("RMS/FWHM: {}".format(rms_fit / fwhm)) + log.info("Residuals: 2D Legendre Fit") + log.info("RMS (pixels): {}".format(rms_fit)) + log.info("RMS/FWHM: {}".format(rms_fit / fwhm)) - msgs.info('Inverting the fit to generate the tilts image') + log.info('Inverting the fit to generate the tilts image') spec_vec = np.arange(nspec) spat_vec = np.arange(nspat) spat_img, spec_img = np.meshgrid(spat_vec, spec_vec) @@ -803,7 +803,7 @@ def fit_tilts(trc_tilt_dict, thismask, slit_cen, spat_order=3, spec_order=4, max use_mad=False, sticky=False) # JFH changed above to use stick=False, to limit the amount of rejection irej = np.logical_not(pypeitFit.bool_gpm) & inmask - msgs.info('Rejected {0}/{1} pixels in final inversion tilts image fit'.format( + log.info('Rejected {0}/{1} pixels in final inversion tilts image fit'.format( np.sum(irej), np.sum(inmask))) # normalized tilts image # TODO -- This should be a DataContainer @@ -847,9 +847,9 @@ def fit_tilts(trc_tilt_dict, thismask, slit_cen, spat_order=3, spec_order=4, max # Actual 2D Model Tilt Residuals # res_real = tilts[fitmask] - tilts_2dfit_piximg[fitmask] # rms_real = np.std(res_real) - # msgs.info("Residuals: Actual 2D Tilt Residuals from piximg") - # msgs.info("RMS (pixels): {}".format(rms_real)) - # msgs.info("RMS/FWHM: {}".format(rms_real/fwhm)) + # log.info("Residuals: Actual 2D Tilt Residuals from piximg") + # log.info("RMS (pixels): {}".format(rms_real)) + # log.info("RMS/FWHM: {}".format(rms_real/fwhm)) def fit2tilts(shape, coeff2, func2d, spat_shift=None): diff --git a/pypeit/core/transform.py b/pypeit/core/transform.py index 8bb779985e..89997b90fb 100644 --- a/pypeit/core/transform.py +++ b/pypeit/core/transform.py @@ -9,7 +9,7 @@ import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import PypeItError diff --git a/pypeit/core/wave.py b/pypeit/core/wave.py index c4c43f3b81..ad74657779 100644 --- a/pypeit/core/wave.py +++ b/pypeit/core/wave.py @@ -18,7 +18,7 @@ -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from IPython import embed diff --git a/pypeit/core/wavecal/autoid.py b/pypeit/core/wavecal/autoid.py index 169829b4d5..8d843bd5e2 100644 --- a/pypeit/core/wavecal/autoid.py +++ b/pypeit/core/wavecal/autoid.py @@ -29,7 +29,7 @@ from pypeit.core import pca from pypeit import utils -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from matplotlib import pyplot as plt @@ -593,7 +593,7 @@ def reidentify(spec, spec_arxiv_in, wave_soln_arxiv_in, line_list, ccorr_vec = np.zeros(narxiv) for iarxiv in range(narxiv): - msgs.info('Cross-correlating with arxiv slit # {:d}'.format(iarxiv)) + log.info('Cross-correlating with arxiv slit # {:d}'.format(iarxiv)) this_det_arxiv = det_arxiv[str(iarxiv)] # Match the peaks between the two spectra. This code attempts to compute the stretch if cc > cc_thresh success, shift_vec[iarxiv], stretch_vec[iarxiv], stretch2_vec[iarxiv], ccorr_vec[iarxiv], _, _ = \ @@ -601,10 +601,10 @@ def reidentify(spec, spec_arxiv_in, wave_soln_arxiv_in, line_list, lag_range=cc_shift_range, cc_thresh=cc_thresh, fwhm=fwhm, seed=random_state, debug=debug_xcorr, percent_ceil=percent_ceil, max_lag_frac=max_lag_frac, stretch_func=stretch_func) - msgs.info(f'shift = {shift_vec[iarxiv]:5.3f}, stretch = {stretch_vec[iarxiv]:5.3f}, cc = {ccorr_vec[iarxiv]:5.3f}') + log.info(f'shift = {shift_vec[iarxiv]:5.3f}, stretch = {stretch_vec[iarxiv]:5.3f}, cc = {ccorr_vec[iarxiv]:5.3f}') # If cc < cc_thresh or if this optimization failed, don't reidentify from this arxiv spectrum if success != 1: - msgs.warning('Global cross-correlation failed or cc 50))[0]] IDs = lines_wav[slit][np.where(np.logical_and(pix_arxiv_ss < len(obs_spec_i)-50, pix_arxiv_ss > 50))[0]] - msgs.info(f'Using lines from pixel {dets} mapped to Wavelengths: {IDs}') + log.info(f'Using lines from pixel {dets} mapped to Wavelengths: {IDs}') gd_det = np.where(IDs > 0.)[0] if len(gd_det) < 2: - msgs.warning("Not enough useful IDs") + log.warning("Not enough useful IDs") wvcalib[str(slit)] = None continue # Fit @@ -1237,12 +1237,12 @@ def full_template(spec, lamps, par, ok_mask, det, binspectral, nsnippet=2, slit_ continue else: - msgs.info('No solution yet for this slit, so making one now...') + log.info('No solution yet for this slit, so making one now...') # Cross-correlate shift_cc, corr_cc = wvutils.xcorr_shift(tspec, pad_spec, debug=debug, fwhm=fwhm, percent_ceil=x_percentile, lag_range=par['cc_shift_range']) - msgs.info(f"Shift = {shift_cc:.2f}; cc = {corr_cc:.4f}") + log.info(f"Shift = {shift_cc:.2f}; cc = {corr_cc:.4f}") if debug: xvals = np.arange(tspec.size) plt.clf() @@ -1301,7 +1301,7 @@ def full_template(spec, lamps, par, ok_mask, det, binspectral, nsnippet=2, slit_ try: sv_IDs.append(patt_dict['IDs']) except KeyError: - msgs.warning("Failed to perform wavelength calibration in reidentify..") + log.warning("Failed to perform wavelength calibration in reidentify..") sv_IDs.append(np.zeros_like(detections)) else: # Save now in case the next one barfs @@ -1312,7 +1312,7 @@ def full_template(spec, lamps, par, ok_mask, det, binspectral, nsnippet=2, slit_ IDs = np.concatenate(sv_IDs) gd_det = np.where(IDs > 0.)[0] if len(gd_det) < 2: - msgs.warning("Not enough useful IDs") + log.warning("Not enough useful IDs") wvcalib[str(slit)] = None continue # get n_final for this slit @@ -1444,25 +1444,25 @@ def echelle_wvcalib(spec, orders, spec_arxiv, wave_arxiv, lamps, par, continue # ToDO should we still be populating wave_calib with an empty dict here? if iord not in ok_mask: - msgs.warning(f"Skipping order = {orders[iord]} ({iord+1}/{norders}) because masked") + log.warning(f"Skipping order = {orders[iord]} ({iord+1}/{norders}) because masked") wv_calib[str(iord)] = None all_patt_dict[str(iord)] = None continue if np.all(spec_arxiv[:, iord] == 0.0): - msgs.warning(f"Order = {orders[iord]} ({iord+1}/{norders}) cannot be reidentified " + log.warning(f"Order = {orders[iord]} ({iord+1}/{norders}) cannot be reidentified " f"because this order is not present in the arxiv") wv_calib[str(iord)] = None all_patt_dict[str(iord)] = None continue - msgs.info('Reidentifying and fitting Order = {0:d}, which is {1:d}/{2:d}'.format(orders[iord], iord+1, norders)) + log.info('Reidentifying and fitting Order = {0:d}, which is {1:d}/{2:d}'.format(orders[iord], iord+1, norders)) sigdetect = wvutils.parse_param(par, 'sigdetect', iord) cc_thresh = wvutils.parse_param(par, 'cc_thresh', iord) - msgs.info("Using sigdetect = {}".format(sigdetect)) + log.info("Using sigdetect = {}".format(sigdetect)) # Set FWHM for this order fwhm = set_fwhm(par, measured_fwhm=measured_fwhms[iord], verbose=True) # get rms threshold for this slit rms_thresh = round(par['rms_thresh_frac_fwhm'] * fwhm, 3) - msgs.info(f"Using RMS threshold = {rms_thresh} (pixels); RMS/FWHM threshold = {par['rms_thresh_frac_fwhm']}") + log.info(f"Using RMS threshold = {rms_thresh} (pixels); RMS/FWHM threshold = {par['rms_thresh_frac_fwhm']}") detections[str(iord)], spec_cont_sub[:, iord], all_patt_dict[str(iord)] = reidentify( spec[:, iord], spec_arxiv[:, iord], wave_arxiv[:, iord], tot_line_list, par['nreid_min'], cont_sub=par['reid_cont_sub'], match_toler=par['match_toler'], cc_shift_range=par['cc_shift_range'], @@ -1477,7 +1477,7 @@ def echelle_wvcalib(spec, orders, spec_arxiv, wave_arxiv, lamps, par, if not all_patt_dict[str(iord)]['acceptable']: wv_calib[str(iord)] = None bad_orders = np.append(bad_orders, iord) - msgs.warning( + log.warning( '\n---------------------------------------------------' f'\nReidentify report for order = {orders[iord]:d} ({iord+1:d}/{norders:d}):' f'\nCross-correlation failed' @@ -1494,13 +1494,13 @@ def echelle_wvcalib(spec, orders, spec_arxiv, wave_arxiv, lamps, par, sigrej_first=par['sigrej_first'], n_final=n_final, sigrej_final=par['sigrej_final']) - msgs.info(f"Number of lines used in fit: {len(final_fit['pixel_fit'])}") + log.info(f"Number of lines used in fit: {len(final_fit['pixel_fit'])}") # Did the fit succeed? if final_fit is None: # This pattern wasn't good enough wv_calib[str(iord)] = None bad_orders = np.append(bad_orders, iord) - msgs.warning( + log.warning( '\n---------------------------------------------------' f'\nReidentify report for order = {orders[iord]:d} ({iord+1:d}/{norders:d}):' f'\nFinal fit failed' @@ -1509,7 +1509,7 @@ def echelle_wvcalib(spec, orders, spec_arxiv, wave_arxiv, lamps, par, continue # Is the RMS below the threshold? if final_fit['rms'] > rms_thresh: - msgs.warning( + log.warning( '\n---------------------------------------------------' f'\nReidentify report for order = {orders[iord]:d} ({iord+1:d}/{norders:d}):' f'\nPoor RMS ({final_fit["rms"]:.3f})! Need to add additional spectra to arxiv ' @@ -1571,7 +1571,7 @@ def report_final(nslits, all_patt_dict, detections, continue st = str(slit) if slit not in ok_mask or slit in bad_slits or all_patt_dict[st] is None or wv_calib[st] is None: - msgs.warning(badmsg) + log.warning(badmsg) continue if all_patt_dict[st]['sign'] == +1: @@ -1581,7 +1581,7 @@ def report_final(nslits, all_patt_dict, detections, # Report cen_wave = wv_calib[st]['cen_wave'] cen_disp = wv_calib[st]['cen_disp'] - msgs.info( + log.info( f'{report_ttl}' f' Pixels {signtxt} with wavelength\n' f' Number of lines detected = {detections[st].size}\n' @@ -1770,20 +1770,20 @@ def __init__(self, spec, lamps, par, ech_fixed_format=False, ok_mask=None, if slit not in self.ok_mask: self.wv_calib[str(slit)] = None continue - msgs.info('Reidentifying and fitting slit # {0:d}/{1:d}'.format(slit+1,self.nslits)) + log.info('Reidentifying and fitting slit # {0:d}/{1:d}'.format(slit+1,self.nslits)) # If this is a fixed format echelle, arxiv has exactly the same orders as the data and so # we only pass in the relevant arxiv spectrum to make this much faster ind_sp = self.arxiv_orders.index(orders[slit]) if ech_fixed_format else ind_arxiv if ech_fixed_format: - msgs.info(f'Order: {orders[slit]}') + log.info(f'Order: {orders[slit]}') sigdetect = wvutils.parse_param(self.par, 'sigdetect', slit) cc_thresh = wvutils.parse_param(self.par, 'cc_thresh', slit) - msgs.info("Using sigdetect = {}".format(sigdetect)) + log.info("Using sigdetect = {}".format(sigdetect)) # get FWHM for this slit fwhm = set_fwhm(self.par, measured_fwhm=measured_fwhms[slit], verbose=True) # get rms threshold for this slit rms_thresh = round(self.par['rms_thresh_frac_fwhm'] * fwhm, 3) - msgs.info(f"Using RMS threshold = {rms_thresh} (pixels); RMS/FWHM threshold = {self.par['rms_thresh_frac_fwhm']}") + log.info(f"Using RMS threshold = {rms_thresh} (pixels); RMS/FWHM threshold = {self.par['rms_thresh_frac_fwhm']}") self.detections[str(slit)], self.spec_cont_sub[:,slit], self.all_patt_dict[str(slit)] = \ reidentify(self.spec[:,slit], self.spec_arxiv[:,ind_sp], self.wave_soln_arxiv[:,ind_sp], self.tot_line_list, self.nreid_min, cont_sub=self.par['reid_cont_sub'], @@ -1798,7 +1798,7 @@ def __init__(self, spec, lamps, par, ech_fixed_format=False, ok_mask=None, if not self.all_patt_dict[str(slit)]['acceptable']: self.wv_calib[str(slit)] = None self.bad_slits = np.append(self.bad_slits, slit) - msgs.warning( + log.warning( '---------------------------------------------------\n' f'Reidentify report for slit {slit}/{self.nslits-1}{order_str}\n' ' Cross-correlation failed\n' @@ -1818,7 +1818,7 @@ def __init__(self, spec, lamps, par, ech_fixed_format=False, ok_mask=None, # This pattern wasn't good enough self.wv_calib[str(slit)] = None self.bad_slits = np.append(self.bad_slits, slit) - msgs.warning( + log.warning( '---------------------------------------------------\n' f'Reidentify report for slit {slit}/{self.nslits-1}{order_str}\n' ' Final fit failed\n' @@ -1827,7 +1827,7 @@ def __init__(self, spec, lamps, par, ech_fixed_format=False, ok_mask=None, continue # Is the RMS below the threshold? if final_fit['rms'] > rms_thresh: - msgs.warning( + log.warning( '---------------------------------------------------\n' f'Reidentify report for slit {slit}/{self.nslits-1}{order_str}\n' f' Poor RMS ({final_fit["rms"]:.3f})! Need to add additional spectra to ' @@ -1990,12 +1990,12 @@ def __init__(self, spec, lamps, par=None, ok_mask=None, self._thar = True # Set up the grids to be used for pattern matching self.set_grids(ngridw=5000, ngridd=1000) - msgs.info("Using KD Tree pattern matching algorithm to wavelength calibrate") + log.info("Using KD Tree pattern matching algorithm to wavelength calibrate") self.run_kdtree() else: # Set up the grids to be used for pattern matching self.set_grids() - msgs.info("Using brute force pattern matching algorithm to wavelength calibrate") + log.info("Using brute force pattern matching algorithm to wavelength calibrate") self.run_brute() def get_results(self): @@ -2051,7 +2051,7 @@ def run_brute_loop(self, slit, tcent_ecent, rms_thresh, wavedata=None): idthresh = 0.5 # Criteria for early return (at least this fraction of lines must have # an ID on either side of the spectrum) - msgs.info(f"Using RMS threshold = {rms_thresh} (pixels); RMS/FWHM threshold = {self._par['rms_thresh_frac_fwhm']}") + log.info(f"Using RMS threshold = {rms_thresh} (pixels); RMS/FWHM threshold = {self._par['rms_thresh_frac_fwhm']}") best_patt_dict, best_final_fit = None, None # Loop through parameter space for poly in rng_poly: @@ -2098,14 +2098,14 @@ def run_brute(self, min_nlines=10): for slit in range(self._nslit): if slit not in self._ok_mask: self._all_final_fit[str(slit)] = None - msgs.info('Ignoring masked slit {}'.format(slit+1)) + log.info('Ignoring masked slit {}'.format(slit+1)) continue else: - msgs.info("Working on slit: {}".format(slit+1)) + log.info("Working on slit: {}".format(slit+1)) # TODO Pass in all the possible params for detect_lines to arc_lines_from_spec, and update the parset # Detect lines, and decide which tcent to use sigdetect = wvutils.parse_param(self._par, 'sigdetect', slit) - msgs.info("Using sigdetect = {}".format(sigdetect)) + log.info("Using sigdetect = {}".format(sigdetect)) # get FWHM for this slit fwhm = set_fwhm(self._par, measured_fwhm=self._measured_fwhms[slit], verbose=True) # get rms threshold for this slit @@ -2119,7 +2119,7 @@ def run_brute(self, min_nlines=10): # Were there enough lines? This mainly deals with junk slits if self._all_tcent.size < min_nlines: - msgs.warning("Not enough lines to identify in slit {0:d}!".format(slit+1)) + log.warning("Not enough lines to identify in slit {0:d}!".format(slit+1)) self._det_weak[str(slit)] = [None,None] self._det_stro[str(slit)] = [None,None] # Remove from ok mask @@ -2142,13 +2142,13 @@ def run_brute(self, min_nlines=10): # list of all lines in every slit, and refit all spectra # in self.cross_match() good fits are cross correlate with each other, so we need to have at least 2 good fits if np.where(good_fit[self._ok_mask])[0].size > 1 and np.any(np.logical_not(good_fit[self._ok_mask])): - msgs.info('Checking wavelength solution by cross-correlating with all slits') + log.info('Checking wavelength solution by cross-correlating with all slits') - msgs.info('Cross-correlation iteration #1') + log.info('Cross-correlation iteration #1') obad_slits = self.cross_match(good_fit, self._det_weak) cntr = 2 while obad_slits.size > 0: - msgs.info('Cross-correlation iteration #{:d}'.format(cntr)) + log.info('Cross-correlation iteration #{:d}'.format(cntr)) good_fit = np.ones(self._nslit, dtype=bool) good_fit[obad_slits] = False bad_slits = self.cross_match(good_fit,self._det_weak) @@ -2157,7 +2157,7 @@ def run_brute(self, min_nlines=10): obad_slits = bad_slits.copy() cntr += 1 if cntr > 10: - msgs.warning("Breaking while loop before convergence. Check the wavelength solution!") + log.warning("Breaking while loop before convergence. Check the wavelength solution!") break # With these updates to the fits of each slit, determine the final fit. @@ -2229,7 +2229,7 @@ def run_kdtree(self, polygon=4, detsrch=7, lstsrch=10, pixtol=5): wvutils.arc_lines_from_spec(self._spec[:, slit], sigdetect=sigdetect, fwhm=fwhm, nonlinear_counts = self._nonlinear_counts) if self._all_tcent.size == 0: - msgs.warning("No lines to identify in slit {0:d}!".format(slit+ 1)) + log.warning("No lines to identify in slit {0:d}!".format(slit+ 1)) continue # Save the detections @@ -2240,40 +2240,40 @@ def run_kdtree(self, polygon=4, detsrch=7, lstsrch=10, pixtol=5): use_tcentm, use_ecentm = self.get_use_tcent(-1, self._det_weak[str(slit)]) if use_tcentp.size < detsrch: if self._verbose: - msgs.info("Not enough lines to test this solution, will attempt another.") + log.info("Not enough lines to test this solution, will attempt another.") return None, None # Create a detlines KD Tree maxlinear = 0.5*self._npix if polygon == 3: - msgs.info("Generating patterns for a trigon") + log.info("Generating patterns for a trigon") patternp, indexp = kdtree_generator.trigon(use_tcentp, detsrch, maxlinear) patternm, indexm = kdtree_generator.trigon(use_tcentm, detsrch, maxlinear) elif polygon == 4: - msgs.info("Generating patterns for a tetragon") + log.info("Generating patterns for a tetragon") patternp, indexp = kdtree_generator.tetragon(use_tcentp, detsrch, maxlinear) patternm, indexm = kdtree_generator.tetragon(use_tcentm, detsrch, maxlinear) elif polygon == 5: - msgs.info("Generating patterns for a pentagon") + log.info("Generating patterns for a pentagon") patternp, indexp = kdtree_generator.pentagon(use_tcentp, detsrch, maxlinear) patternm, indexm = kdtree_generator.pentagon(use_tcentm, detsrch, maxlinear) elif polygon == 6: - msgs.info("Generating patterns for a hexagon") + log.info("Generating patterns for a hexagon") patternp, indexp = kdtree_generator.hexagon(use_tcentp, detsrch, maxlinear) patternm, indexm = kdtree_generator.hexagon(use_tcentm, detsrch, maxlinear) else: - msgs.warning("Patterns can only be generated with 3 <= polygon <= 6") + log.warning("Patterns can only be generated with 3 <= polygon <= 6") return None dettreep = scipy.spatial.cKDTree(patternp, leafsize=30) dettreem = scipy.spatial.cKDTree(patternm, leafsize=30) # Query the detections tree - msgs.info("Querying KD tree patterns (slit {0:d}/{1:d})".format(slit+1, self._nslit)) + log.info("Querying KD tree patterns (slit {0:d}/{1:d})".format(slit+1, self._nslit)) resultp = dettreep.query_ball_tree(lsttree, r=err) resultm = dettreem.query_ball_tree(lsttree, r=err) - msgs.info("Identifying wavelengths for each pattern") + log.info("Identifying wavelengths for each pattern") # First flatten the KD Tree query results so numba can handle the input array flatresp = [item for sublist in resultp for item in sublist] flatresm = [item for sublist in resultm for item in sublist] @@ -2285,7 +2285,7 @@ def run_kdtree(self, polygon=4, detsrch=7, lstsrch=10, pixtol=5): msols = results_kdtree_nb(use_tcentm, self._wvdata, flatresm, flatidxm, indexm, lindex, indexm.shape[1], self._npix) - msgs.info("Identifying the best solution") + log.info("Identifying the best solution") patt_dict, final_fit = self.solve_slit(slit, psols, msols, self._det_weak[str(slit)], nselw=1, nseld=2) # Print preliminary report @@ -2410,7 +2410,7 @@ def cross_match(self, good_fit, detections): bad_slits = np.setdiff1d(np.arange(self._nslit)[self._ok_mask], good_slits, assume_unique=True) nbad = bad_slits.size if nbad > 0: - msgs.info('Working on {:d}'.format(nbad) + ' bad slits: {:}'.format(bad_slits + 1)) + log.info('Working on {:d}'.format(nbad) + ' bad slits: {:}'.format(bad_slits + 1)) # Get the sign (i.e. if pixels correlate/anticorrelate with wavelength) # and dispersion (A/pix). Assume these are the same for all slits @@ -2439,7 +2439,7 @@ def cross_match(self, good_fit, detections): if bs not in self._ok_mask: continue if detections[str(bs)][0] is None: # No detections at all; slit is hopeless - msgs.warning('Slit {:d}'.format(bs) + ' has no arc line detections. Likely this slit is junk!') + log.warning('Slit {:d}'.format(bs) + ' has no arc line detections. Likely this slit is junk!') self._bad_slits.append(bs) continue @@ -2457,7 +2457,7 @@ def cross_match(self, good_fit, detections): stretch_vec = np.zeros(good_slits.size) ccorr_vec = np.zeros(good_slits.size) for cntr, gs in enumerate(good_slits): - msgs.info('Cross-correlating bad slit # {:d}'.format(bs + 1) + ' with good slit # {:d}'.format(gs + 1)) + log.info('Cross-correlating bad slit # {:d}'.format(bs + 1) + ' with good slit # {:d}'.format(gs + 1)) # Match the peaks between the two spectra. # spec_gs_adj is the stretched spectrum success, shift_vec[cntr], stretch_vec[cntr], _, ccorr_vec[cntr], _, _ = \ @@ -2465,7 +2465,7 @@ def cross_match(self, good_fit, detections): cc_thresh=cc_thresh, fwhm=fwhm, debug=self._debug, stretch_func=self._par['stretch_func']) if success != 1: - msgs.warning('cross-correlation failed or cc rms_thresh: - msgs.warning( + log.warning( '---------------------------------------------------\n' f'Cross-match report for slit {bs+1}/{self._nslit}\n' f' Poor RMS ({final_fit["rms"]:.3f})! Will try cross matching iteratively\n' @@ -2688,7 +2688,7 @@ def results_brute(self, tcent_ecent, poly=3, pix_tol=0.5, detsrch=5, lstsrch=5, elif poly == 4: from pypeit.core.wavecal.patterns import quadrangles as generate_patterns else: - msgs.warning("Pattern matching is only available for trigons and tetragons.") + log.warning("Pattern matching is only available for trigons and tetragons.") return None, None if wavedata is None: @@ -2698,11 +2698,11 @@ def results_brute(self, tcent_ecent, poly=3, pix_tol=0.5, detsrch=5, lstsrch=5, use_tcent, _ = self.get_use_tcent(1, tcent_ecent) if use_tcent.size < lstsrch or use_tcent.size < detsrch: if self._verbose: - msgs.info("Not enough lines to test this solution, will attempt another.") + log.info("Not enough lines to test this solution, will attempt another.") return None, None if self._verbose: - msgs.info("Begin pattern matching") + log.info("Begin pattern matching") # First run pattern recognition assuming pixels correlate with wavelength dindexp, lindexp, wvcenp, dispsp = generate_patterns(use_tcent, wavedata, self._npix, @@ -2863,7 +2863,7 @@ def solve_slit(self, slit, psols, msols, tcent_ecent, nstore=1, nselw=3, nseld=3 bestlist.append([allwcen[idx], alldisp[idx], allhnum[idx], sign, dindex, lindex]) if self._verbose: - msgs.info("Fitting the wavelength solution for each slit") + log.info("Fitting the wavelength solution for each slit") patt_dict, final_dict = None, None for idx in range(nstore): # Solve the patterns @@ -2954,7 +2954,7 @@ def solve_patterns(self, slit, bestlist, tcent_ecent): # Check that a solution has been found if patt_dict['nmatch'] == 0 and self._verbose: - msgs.info( + log.info( '\n---------------------------------------------------' '\nInitial report:' '\n No matches! Try another algorithm' @@ -2963,7 +2963,7 @@ def solve_patterns(self, slit, bestlist, tcent_ecent): return None elif self._verbose: # Report - msgs.info( + log.info( '\n---------------------------------------------------' '\nInitial report:' f'\n Pixels {signtxt} with wavelength' @@ -2995,14 +2995,14 @@ def finalize_fit(self, detections): out_dict = dict(pix=use_tcent, IDs=self._all_patt_dict[str(slit)]['IDs']) jdict = utils.jsonify(out_dict) ltu.savejson(self._outroot + slittxt + '.json', jdict, easy_to_read=True, overwrite=True) - msgs.info("Wrote: {:s}".format(self._outroot + slittxt + '.json')) + log.info("Wrote: {:s}".format(self._outroot + slittxt + '.json')) # Plot tmp_list = np.vstack([self._line_lists, self._unknwns]) match_qa(self._spec[:, slit], use_tcent, tmp_list, self._all_patt_dict[str(slit)]['IDs'], self._all_patt_dict[str(slit)]['scores'], outfile=self._outroot + slittxt + '.pdf') - msgs.info("Wrote: {:s}".format(self._outroot + slittxt + '.pdf')) + log.info("Wrote: {:s}".format(self._outroot + slittxt + '.pdf')) # Perform the final fit for the best solution best_final_fit = wv_fitting.fit_slit(self._spec[:, slit], self._all_patt_dict[str(slit)], use_tcent, self._line_lists, outroot=self._outroot, slittxt=slittxt) @@ -3019,7 +3019,7 @@ def report_prelim(self, slit, best_patt_dict, best_final_fit): good_fit = False # Report on the best preliminary result if best_final_fit is None: - msgs.warning( + log.warning( '---------------------------------------------------' f'\nPreliminary report for slit {slit+1}/{self._nslit}:' '\n No matches! Attempting to cross match.' @@ -3028,7 +3028,7 @@ def report_prelim(self, slit, best_patt_dict, best_final_fit): self._all_patt_dict[str(slit)] = None self._all_final_fit[str(slit)] = None elif best_final_fit['rms'] > rms_thresh: - msgs.warning( + log.warning( '---------------------------------------------------' f'\nPreliminary report for slit {slit+1}/{self._nslit}:' f'\n Poor RMS ({best_final_fit["rms"]:.3f})! Attempting to cross match.' @@ -3043,7 +3043,7 @@ def report_prelim(self, slit, best_patt_dict, best_final_fit): else: signtxt = 'anitcorrelate' # Report - msgs.info( + log.info( '---------------------------------------------------' f'\nPreliminary report for slit {slit+1}/{self._nslit}:' f'\n Pixels {signtxt} with wavelength' @@ -3070,10 +3070,10 @@ def report_final(self): f'Final report for slit {slit+1}/{self._nslit}:\n' ) if slit not in self._ok_mask: - msgs.warning(badmsg + 'Masked slit ignored') + log.warning(badmsg + 'Masked slit ignored') continue if self._all_patt_dict[str(slit)] is None: - msgs.warning(badmsg + ' Wavelength calibration not performed!') + log.warning(badmsg + ' Wavelength calibration not performed!') continue st = str(slit) if self._all_patt_dict[st]['sign'] == +1: @@ -3084,7 +3084,7 @@ def report_final(self): centwave = self._all_final_fit[st].pypeitfit.eval(0.5) tempwave = self._all_final_fit[st].pypeitfit.eval(0.5 + 1.0/self._npix) centdisp = abs(centwave-tempwave) - msgs.info( + log.info( '\n---------------------------------------------------' f'\nFinal report for slit {slit+1}/{self._nslit}:' f'\n Pixels {signtxt} with wavelength' diff --git a/pypeit/core/wavecal/echelle.py b/pypeit/core/wavecal/echelle.py index a9b8c9cca1..a46e12f6c2 100644 --- a/pypeit/core/wavecal/echelle.py +++ b/pypeit/core/wavecal/echelle.py @@ -11,7 +11,7 @@ from astropy.io import fits from astropy.table import Table -from pypeit import msgs +from pypeit import log from pypeit import dataPaths from pypeit.core import fitting from pypeit.core.wavecal import wvutils @@ -236,7 +236,7 @@ def identify_ech_orders(arcspec, echangle, xdangle, dispname, angle_fits_file, composite_arc_file, echangle, xdangle, dispname, nspec, norders, pad=pad) norders_guess = order_vec_guess.size - msgs.info(f'initial orders vec guess = {order_vec_guess}') + log.info(f'initial orders vec guess = {order_vec_guess}') # Since we padded the guess we need to pad the data to the same size arccen_pad = np.zeros((nspec, norders_guess)) arccen_pad[:nspec, :norders] = arcspec @@ -247,7 +247,7 @@ def identify_ech_orders(arcspec, echangle, xdangle, dispname, percent_ceil=cc_percent_ceil, sigdetect=5.0, sig_ceil=10.0, fwhm=4.0, debug=debug) if debug: - msgs.info(f'Cross-correlation for order identification: shift={shift_cc:.3f}, corr={corr_cc:.3f}') + log.info(f'Cross-correlation for order identification: shift={shift_cc:.3f}, corr={corr_cc:.3f}') from matplotlib import pyplot as plt xvals = np.arange(arccen_pad.flatten('F').size) plt.clf() @@ -261,9 +261,9 @@ def identify_ech_orders(arcspec, echangle, xdangle, dispname, # Finish ordr_shift = int(np.round(shift_cc / nspec)) spec_shift = int(np.round(shift_cc - ordr_shift * nspec)) - msgs.info('Shift in order number between prediction and reddest order: {:.3f}'.format( + log.info('Shift in order number between prediction and reddest order: {:.3f}'.format( ordr_shift + pad)) - msgs.info('Shift in spectral pixels between prediction and data: {:.3f}'.format(spec_shift)) + log.info('Shift in spectral pixels between prediction and data: {:.3f}'.format(spec_shift)) # Assign order_vec = order_vec_guess[0] + ordr_shift - np.arange(norders) diff --git a/pypeit/core/wavecal/templates.py b/pypeit/core/wavecal/templates.py index 850a6b6c5c..171d677703 100644 --- a/pypeit/core/wavecal/templates.py +++ b/pypeit/core/wavecal/templates.py @@ -25,7 +25,7 @@ import linetools.utils -from pypeit import msgs +from pypeit import log from pypeit import utils from pypeit import io from pypeit import wavecalib @@ -146,7 +146,7 @@ def build_template(in_files, slits, wv_cuts, binspec, outroot, outdir=None, else: wv_vac, spec = wvspec['wv_vac'], wvspec['spec'] # Diagnostics - msgs.info("wvmin, wvmax of {}: {}, {}".format(in_file, wv_vac.min(), wv_vac.max())) + log.info("wvmin, wvmax of {}: {}, {}".format(in_file, wv_vac.min(), wv_vac.max())) # Cut if len(slits) > 1: wvmin, wvmax = grab_wvlim(kk, wv_cuts, len(slits)) diff --git a/pypeit/core/wavecal/waveio.py b/pypeit/core/wavecal/waveio.py index c9ca050b1b..c8b8f46784 100644 --- a/pypeit/core/wavecal/waveio.py +++ b/pypeit/core/wavecal/waveio.py @@ -9,7 +9,7 @@ import linetools.utils import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import dataPaths from pypeit import cache @@ -221,7 +221,7 @@ def load_line_lists(lamps, all=False, include_unknown:bool=False, restrict_on_in i1 = line_file.rfind('_') lamps.append(line_file[i0+1:i1]) - msgs.info(f"Arc lamps used: {', '.join(lamps)}") + log.info(f"Arc lamps used: {', '.join(lamps)}") # Read standard files # NOTE: If one of the `lamps` does not exist, dataPaths.linelist.get_file_path() # will exit with raise PypeItError(). @@ -298,7 +298,7 @@ def load_tree(polygon=4, numsearch=20): file_load = pickle.load(f_obj) index = np.load(fileindx) except FileNotFoundError: - msgs.info( + log.info( 'The requested KDTree was not found on disk\nplease be patient while the ThAr KDTree ' 'is built and saved to disk.' ) diff --git a/pypeit/core/wavecal/wv_fitting.py b/pypeit/core/wavecal/wv_fitting.py index 7b201c2db1..3148aaa1b8 100644 --- a/pypeit/core/wavecal/wv_fitting.py +++ b/pypeit/core/wavecal/wv_fitting.py @@ -12,7 +12,7 @@ from pypeit.core.wavecal import autoid from pypeit.core.wavecal import defs from pypeit.core import fitting -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import datamodel @@ -149,7 +149,7 @@ def to_hdu(self, **kwargs): """ if 'force_to_bintbl' in kwargs: if not kwargs['force_to_bintbl']: - msgs.warning(f'{self.__class__.__name__} objects must always use ' + log.warning(f'{self.__class__.__name__} objects must always use ' 'force_to_bintbl = True!') kwargs.pop('force_to_bintbl') return super().to_hdu(force_to_bintbl=True, **kwargs) @@ -391,7 +391,7 @@ def iterative_fitting(spec, tcent, ifit, IDs, llist, dispersion, maxiter = xfit.size - n_order - 2 # if xfit.size == 0: - msgs.warning("All points rejected !!") + log.warning("All points rejected !!") return None # Fit pypeitFit = fitting.robust_fit(xfit/xnspecmin1, yfit, n_order, function=func, maxiter=maxiter, @@ -399,14 +399,14 @@ def iterative_fitting(spec, tcent, ifit, IDs, llist, dispersion, minx=fmin, maxx=fmax, weights=wfit) # Junk fit? if pypeitFit is None: - msgs.warning("Bad fit!!") + log.warning("Bad fit!!") return None # RMS is computed from `yfit`, which is the wavelengths of the lines. Convert to pixels. rms_angstrom = pypeitFit.calc_fit_rms(apply_mask=True) rms_pixels = rms_angstrom/dispersion if verbose: - msgs.info(f"n_order = {n_order}: RMS = {rms_pixels:g} pixels") + log.info(f"n_order = {n_order}: RMS = {rms_pixels:g} pixels") # Reject but keep originals (until final fit) ifit = list(ifit[pypeitFit.gpm == 1]) + sv_ifit @@ -434,7 +434,7 @@ def iterative_fitting(spec, tcent, ifit, IDs, llist, dispersion, # Final fit (originals can now be rejected) if len(ifit) <= n_final: n_order = len(ifit)-1 - msgs.warning(f'Not enough lines for n_final! Fit order = {n_order}') + log.warning(f'Not enough lines for n_final! Fit order = {n_order}') xfit, yfit, wfit = tcent[ifit], all_ids[ifit], weights[ifit] pypeitFit = fitting.robust_fit(xfit/xnspecmin1, yfit, n_order, function=func, @@ -450,7 +450,7 @@ def iterative_fitting(spec, tcent, ifit, IDs, llist, dispersion, if verbose: for kk, imask in enumerate(irej): wave = pypeitFit.eval(xrej[kk]/xnspecmin1)#, func, minx=fmin, maxx=fmax) - msgs.info('Rejecting arc line {:g}; {:g}'.format(yfit[imask], wave)) + log.info('Rejecting arc line {:g}; {:g}'.format(yfit[imask], wave)) else: xrej = [] yrej = [] @@ -459,7 +459,7 @@ def iterative_fitting(spec, tcent, ifit, IDs, llist, dispersion, # Final RMS computed from `yfit`, which is the wavelengths of the lines. Convert to pixels. rms_angstrom = pypeitFit.calc_fit_rms(apply_mask=True) rms_pixels = rms_angstrom/dispersion - msgs.info(f"RMS of the final wavelength fit: {rms_pixels:g} pixels") + log.info(f"RMS of the final wavelength fit: {rms_pixels:g} pixels") # Pack up fit spec_vec = np.arange(nspec) diff --git a/pypeit/core/wavecal/wvutils.py b/pypeit/core/wavecal/wvutils.py index 3303cca8aa..75115ab51d 100644 --- a/pypeit/core/wavecal/wvutils.py +++ b/pypeit/core/wavecal/wvutils.py @@ -16,7 +16,7 @@ from astropy import convolution from astropy import constants -from pypeit import msgs +from pypeit import log from pypeit import cache from pypeit import utils from pypeit.core import arc @@ -288,9 +288,9 @@ def get_wave_grid(waves=None, gpms=None, wave_method='linear', iref=0, wave_grid wave_grid = np.power(10.0,newloglam) elif wave_method == 'iref': # Use the iref index wavelength array - msgs.info(f'iref for the list is set to {iref}') - msgs.info(f'The shape of the list is: {np.shape(waves)}') - msgs.info(f'shape of the first wave_grid in the list is: {np.shape(waves[iref])}') + log.info(f'iref for the list is set to {iref}') + log.info(f'The shape of the list is: {np.shape(waves)}') + log.info(f'shape of the first wave_grid in the list is: {np.shape(waves[iref])}') wave_tmp = waves[iref] wave_grid = wave_tmp[wave_tmp > 1.0] if spec_samp_fact != 1: # adjust sampling via internal interpolation @@ -455,7 +455,7 @@ def zerolag_shift_stretch(theta, y1, y2, stretch_func = 'quadratic'): corr_zero = np.sum(y1*y2_corr) corr_denom = np.sqrt(np.sum(y1*y1)*np.sum(y2_corr*y2_corr)) if corr_denom == 0.0: - msgs.warning('The shifted and stretched spectrum is zero everywhere. Cross-correlation cannot be performed. There is likely a bug somewhere') + log.warning('The shifted and stretched spectrum is zero everywhere. Cross-correlation cannot be performed. There is likely a bug somewhere') raise PypeItError() corr_norm = corr_zero / corr_denom return -corr_norm @@ -515,7 +515,7 @@ def get_xcorr_arc(inspec1, sigdetect=5.0, input_thresh=None, sig_ceil=10.0, perc ampl_clip = np.clip(ampl, None, ceil_upper) if ampl_clip.size == 0: - msgs.warning('No lines were detected in the arc spectrum. Cannot create a synthetic arc spectrum for cross-correlation.') + log.warning('No lines were detected in the arc spectrum. Cannot create a synthetic arc spectrum for cross-correlation.') return np.zeros_like(inspec1) # Make a fake arc by plopping down Gaussians at the location of every centroided line we found @@ -597,7 +597,7 @@ def xcorr_shift(inspec1, inspec2, percent_ceil=50.0, use_raw_arc=False, sigdetec y1, y2 = inspec1, inspec2 if np.all(y1 == 0) or np.all(y2 == 0): - msgs.warning('One of the input spectra is all zeros. Returning shift = 0.0') + log.warning('One of the input spectra is all zeros. Returning shift = 0.0') return 0.0, 0.0 nspec = y1.shape[0] @@ -760,7 +760,7 @@ def xcorr_shift_stretch(inspec1, inspec2, cc_thresh=-1.0, percent_ceil=50.0, use sig_ceil=sig_ceil, fwhm=fwhm) if np.all(y1 == 0) or np.all(y2 == 0): - msgs.warning('No lines detected punting on shift/stretch') + log.warning('No lines detected punting on shift/stretch') return 0, None, None, None, None, None, None # Do the cross-correlation first and determine the initial shift @@ -787,7 +787,7 @@ def xcorr_shift_stretch(inspec1, inspec2, cc_thresh=-1.0, percent_ceil=50.0, use zerolag_shift_stretch, args=(y1,y2), x0=x0_guess, tol=toler, bounds=bounds, disp=False, polish=True, seed=seed) except PypeItError: - msgs.warning("Differential evolution failed.") + log.warning("Differential evolution failed.") return 0, None, None, None, None, None, None corr_de = -result.fun shift_de = result.x[0] @@ -796,11 +796,11 @@ def xcorr_shift_stretch(inspec1, inspec2, cc_thresh=-1.0, percent_ceil=50.0, use if not result.success: - msgs.warning('Fit for shift and stretch did not converge!') + log.warning('Fit for shift and stretch did not converge!') if(corr_de < corr_cc): # Occasionally the differential evolution crapps out and returns a value worse that the CC value. In these cases just use the cc value - msgs.warning( + log.warning( 'Shift/Stretch optimizer performed worse than simple x-correlation. ' 'Returning simple x-correlation shift and no stretch:\n' f' Optimizer: corr={corr_de:5.3f}, shift={shift_de:5.3f}, stretch={stretch_de:7.5f}\n' @@ -953,12 +953,12 @@ def write_template(nwwv, nwspec, binspec, outpath, outroot, det_cut=None, # Write outfile = os.path.join(outpath, outroot) tbl.write(outfile, overwrite=overwrite) - msgs.info(f"Your arxiv solution has been written to {outfile}\n") + log.info(f"Your arxiv solution has been written to {outfile}\n") if to_cache: # Also copy the file to the cache for direct use cache.write_file_to_cache(outroot, outroot, "arc_lines/reid_arxiv") - msgs.info(f"Your arxiv solution has also been cached.\n" + log.info(f"Your arxiv solution has also been cached.\n" f"To utilize this wavelength solution, insert the\n" f"following block in your PypeIt Reduction File:\n" f" [calibrations]\n" @@ -966,7 +966,7 @@ def write_template(nwwv, nwspec, binspec, outpath, outroot, det_cut=None, f" reid_arxiv = {outroot}\n" f" method = full_template\n") print("") # Empty line for clarity - msgs.info(f"To use exactly the solutions created above\n" + log.info(f"To use exactly the solutions created above\n" f"disable the 2d fitting by adding the keyword ech_2dfit = False") print("") # Empty line for clarity - msgs.info("Please consider sharing your solution with the PypeIt Developers.") + log.info("Please consider sharing your solution with the PypeIt Developers.") diff --git a/pypeit/datamodel.py b/pypeit/datamodel.py index be1a57924b..83fe59a057 100644 --- a/pypeit/datamodel.py +++ b/pypeit/datamodel.py @@ -468,7 +468,7 @@ def _validate(self): from astropy.table import Table from pypeit import io -from pypeit import msgs, PypeItDataModelError +from pypeit import log, PypeItDataModelError # TODO: There are methods in, e.g., doc/scripts/build_specobj_rst.py that output # datamodels for specific datacontainers. It would be useful if we had @@ -977,7 +977,7 @@ def _parse(cls, hdu, ext=None, ext_pseudo=None, transpose_table_arrays=False, # DataContainers that have no data, although such a usage case should be # rare. if np.all([_hdu[e].data is None for e in _ext]): - msgs.warning(f'Extensions to be read by {cls.__name__} have no data!') + log.warning(f'Extensions to be read by {cls.__name__} have no data!') # This is so that the returned booleans for reading the # data are not tripped as false! found_data = True @@ -1157,7 +1157,7 @@ def _check_parsed(cls, version_passed, type_passed, chk_version=True): if chk_version: raise PypeItDataModelError(msg) else: - msgs.warning(msg) + log.warning(msg) def __getattr__(self, item): """Maps values to attributes. @@ -1551,7 +1551,7 @@ def from_file(cls, ifile, verbose=True, chk_version=True, **kwargs): raise FileNotFoundError(f'{_ifile} does not exist!') if verbose: - msgs.info(f'Loading {cls.__name__} from {_ifile}') + log.info(f'Loading {cls.__name__} from {_ifile}') # Do it with io.fits_open(_ifile) as hdu: diff --git a/pypeit/display/display.py b/pypeit/display/display.py index 52e40b8b3e..cc6532c761 100644 --- a/pypeit/display/display.py +++ b/pypeit/display/display.py @@ -22,7 +22,7 @@ from ginga.util import grc -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import io from pypeit import utils @@ -83,7 +83,7 @@ def connect_to_ginga(host='localhost', port=grc.default_rc_port, if raise_err: raise ValueError else: - msgs.warning('Problem connecting to Ginga. Launch an RC Ginga viewer and ' + log.warning('Problem connecting to Ginga. Launch an RC Ginga viewer and ' f'then continue: \n ginga --rcport={port} --modules=RC,SlitWavelength') # Return @@ -372,7 +372,7 @@ def show_slits(viewer, ch, left, right, slit_ids=None, left_ids=None, right_ids= raise PypeItError('Input left and right traces must have the same shape if they have been ' 'synchronized into slits.') if left_ids is not None or right_ids is not None: - msgs.warning('For showing synced edges, left and right ID numbers are ignored.') + log.warning('For showing synced edges, left and right ID numbers are ignored.') nslits = _left.shape[1] _left_ids = None _right_ids = None diff --git a/pypeit/edgetrace.py b/pypeit/edgetrace.py index 4103d0d774..f92c0855eb 100644 --- a/pypeit/edgetrace.py +++ b/pypeit/edgetrace.py @@ -44,7 +44,7 @@ from astropy import table -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import utils from pypeit import sampling @@ -764,9 +764,9 @@ def auto_trace(self, bpm=None, debug=0): # the slit-mask design. If not expected traces are found in the image, they # will be removed. If traces are missed, they will be added. if not self.is_empty and self.par['use_maskdesign']: - msgs.info('-' * 50) - msgs.info('{0:^50}'.format('Matching traces to the slit-mask design')) - msgs.info('-' * 50) + log.info('-' * 50) + log.info('{0:^50}'.format('Matching traces to the slit-mask design')) + log.info('-' * 50) self.maskdesign_matching(debug=debug > 1) if debug > 0: self.show(title='After matching to slit-mask design metadata.') @@ -779,7 +779,7 @@ def auto_trace(self, bpm=None, debug=0): if self.par['auto_pca'] and not self.can_pca() and not self.is_empty and self.par['sync_predict'] == 'pca': # TODO: This causes the code to fault. Maybe there's a way # to catch this earlier on? - msgs.warning( + log.warning( 'Sync predict cannot use PCA because too few edges were found. If you are ' 'reducing multislit or echelle data, you may need a better trace image or ' 'change the mode used to predict traces (see below). If you are reducing ' @@ -893,9 +893,9 @@ def initial_trace(self, bpm=None): same shape as `img`. If None, all pixels are assumed to be valid. """ - msgs.info('-'*50) - msgs.info('{0:^50}'.format('Initialize Edge Tracing')) - msgs.info('-'*50) + log.info('-'*50) + log.info('{0:^50}'.format('Initialize Edge Tracing')) + log.info('-'*50) if self.traceid is not None: # Clear all pre-existing trace data @@ -973,7 +973,7 @@ def initial_trace(self, bpm=None): # Check that edges were found if np.all(trace_id_img == 0): - msgs.warning('No edges found! Trace data will be empty.') + log.warning('No edges found! Trace data will be empty.') self._reinit_trace_data() self.log = [inspect.stack()[0][3]] return @@ -1379,7 +1379,7 @@ def show(self, include_error=False, thin=10, in_ginga=False, include_img=True, # Use the provided SlitTraceSet _include_error = False if include_error: - msgs.warning('SlitTraceSet object has no errors.') + log.warning('SlitTraceSet object has no errors.') left, right, _ = slits.select_edges() # original=original) cen = np.hstack((left,right)) fit = cen @@ -1443,7 +1443,7 @@ def show(self, include_error=False, thin=10, in_ginga=False, include_img=True, plt.ylim(-img_buffer, self.nspec+img_buffer) if self.is_empty: - msgs.info('No traces defined.') + log.info('No traces defined.') plt.show() return @@ -1564,7 +1564,7 @@ def qa_plot(self, fileroot=None, min_spat=20): # Make plots j = 0 page = 0 - msgs.info('Constructing Trace QA plots') + log.info('Constructing Trace QA plots') for i in range(self.ntrace): # Plot index @@ -1625,7 +1625,7 @@ def qa_plot(self, fileroot=None, min_spat=20): page += 1 ofile = self.qa_path / 'PNGs', f'{fileroot}_{str(page).zfill(ndig)}.png' fig.canvas.print_figure(ofile, bbox_inches='tight') - msgs.info('Finished page {0}/{1}'.format(page, npages)) + log.info('Finished page {0}/{1}'.format(page, npages)) fig.clear() plt.close(fig) fig = plt.figure(figsize=(1.5*w,1.5*h)) @@ -1774,13 +1774,13 @@ def centroid_refine(self, follow=True, start_indx=None, continuous=False, use_fi minimum_spec_length = self.par['det_min_spec_length']*self.nspec # Report - msgs.info('-'*50) - msgs.info('{0:^50}'.format('Edge Centroid Refinement')) - msgs.info('-'*50) - msgs.info('Width of window for centroiding the edges: {0:.1f}'.format(width)) - msgs.info('Max shift between spectrally adjacent pixels: {0:.2f}'.format(maxshift_follow)) - msgs.info('Max centroid error: {0}'.format(maxerror)) - msgs.info('Minimum spectral pixels for a valid trace: {0}'.format(minimum_spec_length)) + log.info('-'*50) + log.info('{0:^50}'.format('Edge Centroid Refinement')) + log.info('-'*50) + log.info('Width of window for centroiding the edges: {0:.1f}'.format(width)) + log.info('Max shift between spectrally adjacent pixels: {0:.2f}'.format(maxshift_follow)) + log.info('Max centroid error: {0}'.format(maxerror)) + log.info('Minimum spectral pixels for a valid trace: {0}'.format(minimum_spec_length)) # To improve performance, generate bogus ivar and mask once # here so that they don't have to be generated multiple times. @@ -1819,7 +1819,7 @@ def centroid_refine(self, follow=True, start_indx=None, continuous=False, use_fi if not np.any(indx): continue - msgs.info('Found {0} {1} edge trace(s) to refine'.format(np.sum(indx), side)) + log.info('Found {0} {1} edge trace(s) to refine'.format(np.sum(indx), side)) if follow: # Find the bad trace positions @@ -1839,7 +1839,7 @@ def centroid_refine(self, follow=True, start_indx=None, continuous=False, use_fi raise PypeItError('Traces remain but could not select good starting position.') ## TODO row and column should not be used here in the output. Adopt the PypeIt convention spec, spat - msgs.info('Following {0} {1} edge(s) '.format(np.sum(to_trace), side) + log.info('Following {0} {1} edge(s) '.format(np.sum(to_trace), side) + 'from row {0}; '.format(_start_indx) + '{0} trace(s) remain.'.format(np.sum(untraced)-np.sum(to_trace))) # Follow the centroid of the Sobel-filtered image @@ -1924,7 +1924,7 @@ def trace_pixels_off_detector(self, cen=None): """ buff = 0 if self.par['det_buffer'] is None else self.par['det_buffer'] if buff < 0: - msgs.warning('Detector buffer must be >=0 (input was {0}). Setting buffer to 0.'.format( + log.warning('Detector buffer must be >=0 (input was {0}). Setting buffer to 0.'.format( self.par['det_buffer'])) buff = 0 if cen is None: @@ -2001,7 +2001,7 @@ def check_traces(self, cen=None, msk=None, subset=None, min_spatial=None, max_sp """ if self.is_empty: - msgs.warning('No traces to check.') + log.warning('No traces to check.') # Indices of traces to check indx = np.ones(self.ntrace, dtype=bool) if subset is None else subset @@ -2016,7 +2016,7 @@ def check_traces(self, cen=None, msk=None, subset=None, min_spatial=None, max_sp # right), and be within the provided matching tolerance repeat = np.zeros_like(indx, dtype=bool) if subset is not None: - msgs.info('Tolerance for finding repeat traces: {0:.1f}'.format(self.par['match_tol'])) + log.info('Tolerance for finding repeat traces: {0:.1f}'.format(self.par['match_tol'])) side = -1 if np.all(self.traceid[indx] < 0) else 1 compare = (side*self.traceid > 0) & np.logical_not(indx) if np.any(compare): @@ -2032,17 +2032,17 @@ def check_traces(self, cen=None, msk=None, subset=None, min_spatial=None, max_sp repeat[indx] = (mindiff.data < self.par['match_tol']) & np.logical_not(mindiff.mask) if np.any(repeat): _msk[:,repeat] = self.bitmask.turn_on(_msk[:,repeat], 'DUPLICATE') - msgs.info('Found {0} repeat trace(s).'.format(np.sum(repeat))) + log.info('Found {0} repeat trace(s).'.format(np.sum(repeat))) # Find spectrally short traces short = np.zeros_like(indx, dtype=bool) if minimum_spec_length is not None: - msgs.info('Minimum spectral length of any trace (pixels): {0:.2f}'.format( + log.info('Minimum spectral length of any trace (pixels): {0:.2f}'.format( minimum_spec_length)) short[indx] = np.sum(np.logical_not(_bpm[:,indx]), axis=0) < minimum_spec_length if np.any(short): _msk[:,short] = self.bitmask.turn_on(_msk[:,short], 'SHORTRANGE') - msgs.info('Found {0} short trace(s).'.format(np.sum(short))) + log.info('Found {0} short trace(s).'.format(np.sum(short))) # Find traces that are at the minimum column at the center # spectral row @@ -2054,7 +2054,7 @@ def check_traces(self, cen=None, msk=None, subset=None, min_spatial=None, max_sp & np.logical_not(_bpm[self.nspec//2,indx]) if np.any(hit_min): _msk[:,hit_min] = self.bitmask.turn_on(_msk[:,hit_min], 'HITMIN') - msgs.info('{0} trace(s) hit the minimum centroid value.'.format(np.sum(hit_min))) + log.info('{0} trace(s) hit the minimum centroid value.'.format(np.sum(hit_min))) # Find traces that are at the maximum column at the center # spectral row @@ -2065,7 +2065,7 @@ def check_traces(self, cen=None, msk=None, subset=None, min_spatial=None, max_sp & np.logical_not(_bpm[self.nspec//2,indx]) if np.any(hit_max): _msk[:,hit_max] = self.bitmask.turn_on(_msk[:,hit_max], 'HITMAX') - msgs.info('{0} trace(s) hit the maximum centroid value.'.format(np.sum(hit_max))) + log.info('{0} trace(s) hit the maximum centroid value.'.format(np.sum(hit_max))) # Find traces, or trace regions, that fall off the detector off_detector = np.zeros_like(indx, dtype=bool) @@ -2078,7 +2078,7 @@ def check_traces(self, cen=None, msk=None, subset=None, min_spatial=None, max_sp # Good traces bad = indx & (repeat | short | hit_min | hit_max | off_detector) - msgs.info('Identified {0} bad trace(s) in all.'.format(np.sum(bad))) + log.info('Identified {0} bad trace(s) in all.'.format(np.sum(bad))) good = indx & np.logical_not(bad) return good, bad @@ -2127,12 +2127,12 @@ def merge_traces(self, merge_frac=0.5, refit=True, debug=False): True. """ if self.is_empty: - msgs.warning('No traces to merge.') + log.warning('No traces to merge.') if self.edge_fit is None: raise PypeItError('Trace merging requires model fits to the trace location; run fit_refine.') _refit = refit if refit and self.edge_fit is None: - msgs.warning('No previous fits existed, so fitting will not be redone.') + log.warning('No previous fits existed, so fitting will not be redone.') _refit = False # Construct the bad pixel mask depending whether we matching @@ -2164,14 +2164,14 @@ def merge_traces(self, merge_frac=0.5, refit=True, debug=False): continue rmtrace[indx[i+1:]] = merge merge = np.append(indx[i], indx[i+1:][merge]) - msgs.info('Merging traces: {0}'.format(self.traceid[merge])) + log.info('Merging traces: {0}'.format(self.traceid[merge])) merged_trace = np.ma.mean(cen_merge[:,merge], axis=1) gpm = np.logical_not(np.ma.getmaskarray(merged_trace)) self.edge_cen[gpm,indx[i]] = merged_trace[gpm] self.edge_msk[gpm,indx[i]] = 0 if not np.any(rmtrace): - msgs.info('No traces merged.') + log.info('No traces merged.') return # Remove traces and resort them @@ -2232,7 +2232,7 @@ def good_traces(self, include_box=False, good_orders=False): trace},)` flagging good traces. """ if self.spectrograph.pypeline == 'Echelle' and good_orders and self.orderid is None: - msgs.warning('Orders undefined! Selecting all traces. To select good orders only, first ' + log.warning('Orders undefined! Selecting all traces. To select good orders only, first ' 'run match_order().') bad_flags = self.bitmask.bad_flags exclude = self.bitmask.insert_flags @@ -2288,7 +2288,7 @@ def _masked_single_slit(self, trace_cen): if self.ntrace != 2: raise ValueError('Coding error: Should only get here if there are two traces.') - msgs.warning('The single slit found has been rejected because it is too short. If this ' + log.warning('The single slit found has been rejected because it is too short. If this ' 'was by mistake, re-run pypeit with a smaller `minimum_slit_length` parameter.' ' Otherwise, we assume this is a long-slit with one edge off the detector ' 'and with the current slit edges errantly isolating some feature in the data.') @@ -2296,12 +2296,12 @@ def _masked_single_slit(self, trace_cen): # TODO: May want to limit the number of columns included in this calculation. if np.mean(self.traceimg.image[:,int(np.ceil(np.max(trace_cen[:,1]))):]) \ > np.mean(self.traceimg.image[:,:int(np.floor(np.min(trace_cen[:,0])))]): - msgs.warning('The mean of the trace image to the right of the right trace is larger ' + log.warning('The mean of the trace image to the right of the right trace is larger ' 'than it is to the left of the left trace. Removing the right trace and ' 're-synchronizing.') self.remove_traces(np.array([False,True])) else: - msgs.warning('The mean of the trace image to the left of the left trace is larger than ' + log.warning('The mean of the trace image to the left of the left trace is larger than ' 'it is to the right of the right trace. Removing the right trace and ' 're-synchronizing.') self.remove_traces(np.array([True,False])) @@ -2330,9 +2330,9 @@ def _flag_edges(self, trace_cen, indx, flg): # the potential to yield an infinite loop, but it's # also the simplest approach. return self._masked_single_slit(trace_cen) - msgs.warning('All slits have been flagged!') + log.warning('All slits have been flagged!') if np.any(indx): - msgs.info(f'Flagging {np.sum(indx)//2} slits as {flg}!') + log.info(f'Flagging {np.sum(indx)//2} slits as {flg}!') self.edge_msk[:,indx] = self.bitmask.turn_on(self.edge_msk[:,indx], flg) def check_synced(self, rebuild_pca=False): @@ -2403,13 +2403,13 @@ def check_synced(self, rebuild_pca=False): re-syncronized. """ if self.is_empty: - msgs.warning('No traces to check.') + log.warning('No traces to check.') return # Decide if the PCA should be rebuilt _rebuild_pca = rebuild_pca and self.pcatype is not None and self.can_pca() if rebuild_pca and not _rebuild_pca: - msgs.warning('Rebuilding the PCA was requested but is not possible.') + log.warning('Rebuilding the PCA was requested but is not possible.') # Remove any fully masked traces and its synced counterpart; # force the removal of traces marked as SYNCERROR, even if @@ -2447,8 +2447,8 @@ def check_synced(self, rebuild_pca=False): or self.par['minimum_slit_gap'] is not None: platescale = parse.parse_binning(self.traceimg.detector.binning)[1] \ * self.traceimg.detector['platescale'] - msgs.info('Binning: {0}'.format(self.traceimg.detector.binning)) - msgs.info('Platescale per binned pixel: {0}'.format(platescale)) + log.info('Binning: {0}'.format(self.traceimg.detector.binning)) + log.info('Platescale per binned pixel: {0}'.format(platescale)) if self.par['minimum_slit_dlength'] is not None: dlength_atol = self.par['minimum_slit_dlength']/platescale if self.par['minimum_slit_length'] is not None: @@ -2458,17 +2458,17 @@ def check_synced(self, rebuild_pca=False): if self.par['minimum_slit_gap'] is not None: gap_atol = self.par['minimum_slit_gap']/platescale - msgs.info('Minimum slit gap (binned pixels): {0}'.format(gap_atol)) - msgs.info('Minimum change in slit length (binned pixels): {0}'.format(dlength_atol)) - msgs.info('Range in the change in slit length not limited' if dlength_rtol is None else + log.info('Minimum slit gap (binned pixels): {0}'.format(gap_atol)) + log.info('Minimum change in slit length (binned pixels): {0}'.format(dlength_atol)) + log.info('Range in the change in slit length not limited' if dlength_rtol is None else f'Range in the change in slit length limited to +/-{dlength_rtol*100:.1f}%') - msgs.info('Minimum slit length (binned pixels): {0}'.format(length_atol)) - msgs.info('Minimum science slit length (binned pixels): {0}'.format(length_atol_sci)) - msgs.info('Range in slit length not limited' if length_rtol is None else + log.info('Minimum slit length (binned pixels): {0}'.format(length_atol)) + log.info('Minimum science slit length (binned pixels): {0}'.format(length_atol_sci)) + log.info('Range in slit length not limited' if length_rtol is None else f'Range in slit length limited to +/-{length_rtol*100:.1f}%') if length_rtol is None and self.par['overlap']: - msgs.warning('Overlap keyword ignored! Must set length_range to identify abnormally ' + log.warning('Overlap keyword ignored! Must set length_range to identify abnormally ' 'short slits.') # TODO: Should here and below only use the unmasked parts of @@ -2480,7 +2480,7 @@ def check_synced(self, rebuild_pca=False): indx = slit_gap < gap_atol if np.any(indx): # TODO: Allow for these traces to be flagged instead of just removed? - msgs.info('Found {0} slit(s) with gaps below {1} arcsec ({2:.2f} pixels).'.format( + log.info('Found {0} slit(s) with gaps below {1} arcsec ({2:.2f} pixels).'.format( np.sum(indx), self.par['minimum_slit_gap'], gap_atol)) rmtrace = np.concatenate(([False],np.repeat(indx,2),[False])) self.remove_traces(rmtrace, rebuild_pca=_rebuild_pca) @@ -2535,13 +2535,13 @@ def check_synced(self, rebuild_pca=False): short = np.repeat(np.log(med_slit_length/np.median(med_slit_length)) < np.log(1-length_rtol), 2) if np.any(short): - msgs.info(f'Flagging {np.sum(short)} abnormally short slit edges.') + log.info(f'Flagging {np.sum(short)} abnormally short slit edges.') self.edge_msk[:,short] \ = self.bitmask.turn_on(self.edge_msk[:,short], 'ABNORMALSLIT_SHORT') long = np.repeat(np.log(med_slit_length/np.median(med_slit_length)) > np.log(1+length_rtol), 2) if np.any(long): - msgs.info(f'Flagging {np.sum(long)} abnormally long slit edges.') + log.info(f'Flagging {np.sum(long)} abnormally long slit edges.') self.edge_msk[:,long] \ = self.bitmask.turn_on(self.edge_msk[:,long], 'ABNORMALSLIT_LONG') @@ -2550,7 +2550,7 @@ def check_synced(self, rebuild_pca=False): # # along the dispersion direction. # dl_flag = self.fully_masked_traces(flag='LARGELENGTHCHANGE') # if np.any(dl_flag): -# msgs.info(f'Removing {np.sum(dl_flag)} traces because of large spatial extent ' +# log.info(f'Removing {np.sum(dl_flag)} traces because of large spatial extent ' # ' changes along the dispersion direction.') # self.remove_traces(dl_flag, rebuild_pca=_rebuild_pca) @@ -2568,7 +2568,7 @@ def check_synced(self, rebuild_pca=False): exclude=self.bitmask.insert_flags) short = self.synced_selection(short, mode='neither', assume_synced=True) if self.par['overlap'] and np.any(short): - msgs.info('Assuming slits flagged as abnormally short are actually due to ' + log.info('Assuming slits flagged as abnormally short are actually due to ' 'overlapping slit edges.') rmtrace = np.zeros(self.ntrace, dtype=bool) # Find sets of adjacent short slits and assume they all select @@ -2594,7 +2594,7 @@ def check_synced(self, rebuild_pca=False): # the synchronization process over again, with the adjustments for # the "short" slits that are assumed to be overlap regions. if not self.is_synced: - msgs.info('Checking/cleaning traces for overlap led to de-syncronization.') + log.info('Checking/cleaning traces for overlap led to de-syncronization.') return False # TODO: Check that slit edges meet a minimum slit gap? @@ -2607,7 +2607,7 @@ def check_synced(self, rebuild_pca=False): # Remove 'em self.remove_traces(rmtrace, rebuild_pca=_rebuild_pca) if self.is_empty: - msgs.warning('Assuming a single long-slit and continuing.') + log.warning('Assuming a single long-slit and continuing.') self.bound_detector() return True @@ -2661,7 +2661,7 @@ def rm_user_traces(self, rm_traces): slit_to_remove = (lefts[y_spec,:] < xcen) & (rights[y_spec,:] > xcen) # Any slits found? if not np.any(slit_to_remove): - msgs.warning(f'No slit found to remove at pixel {y_spec}:{xcen} on ' + log.warning(f'No slit found to remove at pixel {y_spec}:{xcen} on ' f'{self.traceimg.detector.name}.') continue # More than one slit found? @@ -2671,7 +2671,7 @@ def rm_user_traces(self, rm_traces): ' Refine your tracing parameters and try again.') idx = np.where(slit_to_remove)[0][0] indx[2*idx:2*idx+2] = True - msgs.info(f'Removing user-supplied slit at pixel {y_spec}:{xcen} on ' + log.info(f'Removing user-supplied slit at pixel {y_spec}:{xcen} on ' f'{self.traceimg.detector.name}.') # TODO: Bring this back? This is kind of useless because the traces @@ -2719,15 +2719,15 @@ def remove_traces(self, indx, resort=True, rebuild_pca=False): """ # Make sure there are traces to remove if not np.any(indx): - msgs.warning('No trace to remove.') + log.warning('No trace to remove.') return if np.all(indx): - msgs.warning('All traces removed!') + log.warning('All traces removed!') self._reinit_trace_data() return - msgs.info('Removing {0} edge traces.'.format(np.sum(indx))) + log.info('Removing {0} edge traces.'.format(np.sum(indx))) # Reset the trace data keep = np.logical_not(indx) @@ -2843,7 +2843,7 @@ def clean_traces(self, force_flag=None, rebuild_pca=True, sync_mode='ignore', :func:`synced_selection`. """ if self.is_empty: - msgs.warning('No traces to clean.') + log.warning('No traces to clean.') return # Traces to remove @@ -2900,7 +2900,7 @@ def spatial_sort(self, use_mean=False, use_fit=True): # Check input if use_fit and self.edge_fit is None: - msgs.warning('Fit data is not available; cannot use it for spatially sorting the edges.') + log.warning('Fit data is not available; cannot use it for spatially sorting the edges.') # Set up the coordinates to use bpm = self.bitmask.flagged(self.edge_msk, self.bitmask.bad_flags) @@ -2920,7 +2920,7 @@ def spatial_sort(self, use_mean=False, use_fit=True): reference_row = trace.most_common_trace_row(bpm) if self.pcatype is None \ else (self.left_pca.reference_row if self.par['left_right_pca'] else self.pca.reference_row) - msgs.info('Re-sorting edges based on where they cross row {0}'.format(reference_row)) + log.info('Re-sorting edges based on where they cross row {0}'.format(reference_row)) srt = np.argsort(cen[reference_row,:]) # Resort the arrays @@ -3024,18 +3024,18 @@ def fit_refine(self, weighting='uniform', debug=False, idx=None): xmin = 0. xmax = self.nspec-1. - msgs.info('-'*50) - msgs.info('{0:^50}'.format('Fitting Polynomial to Edge Trace')) - msgs.info('-'*50) - msgs.info('Max shift btwn input and remeasured edge centroids: {0:.2f}'.format(maxshift)) - msgs.info('Max centroid error: {0}'.format(maxerror)) - msgs.info('Trace fitting function: {0}'.format(function)) - msgs.info('Trace fitting order: {0}'.format(order)) - msgs.info('Weighting for remeasuring edge centroids: {0}'.format(weighting)) - msgs.info('FWHM parameter for remeasuring edge centroids: {0:.1f}'.format(fwhm)) - msgs.info('Maximum deviation for fitted data: {0:.1f}'.format(maxdev)) - msgs.info('Maximum number of rejection iterations: {0}'.format(maxiter)) - msgs.info('Number of remeasuring and refitting iterations: {0}'.format(niter)) + log.info('-'*50) + log.info('{0:^50}'.format('Fitting Polynomial to Edge Trace')) + log.info('-'*50) + log.info('Max shift btwn input and remeasured edge centroids: {0:.2f}'.format(maxshift)) + log.info('Max centroid error: {0}'.format(maxerror)) + log.info('Trace fitting function: {0}'.format(function)) + log.info('Trace fitting order: {0}'.format(order)) + log.info('Weighting for remeasuring edge centroids: {0}'.format(weighting)) + log.info('FWHM parameter for remeasuring edge centroids: {0:.1f}'.format(fwhm)) + log.info('Maximum deviation for fitted data: {0:.1f}'.format(maxdev)) + log.info('Maximum number of rejection iterations: {0}'.format(maxiter)) + log.info('Number of remeasuring and refitting iterations: {0}'.format(niter)) # Check the traces to make sure they meet the minimum length. # This modifies self.edge_msk directly. @@ -3125,7 +3125,7 @@ def can_pca(self): # Set and report the minimum length needed for the PCA in # pixels minimum_spec_length = self.par['fit_min_spec_length']*self.nspec - msgs.info('Minimum length of traces to include in the PCA: {0}'.format(minimum_spec_length)) + log.info('Minimum length of traces to include in the PCA: {0}'.format(minimum_spec_length)) # This call to check_traces will flag any trace with a length # below minimum_spec_length as SHORTRANGE @@ -3254,27 +3254,27 @@ def build_pca(self, use_center=False, debug=False): maxrej = self.par['pca_maxrej'] maxiter = self.par['pca_maxiter'] - msgs.info('-'*50) - msgs.info('{0:^50}'.format('Constructing PCA interpolator')) - msgs.info('-'*50) - msgs.info('PCA composition of the left and right traces is done {0}.'.format( + log.info('-'*50) + log.info('{0:^50}'.format('Constructing PCA interpolator')) + log.info('-'*50) + log.info('PCA composition of the left and right traces is done {0}.'.format( 'separately' if left_right_pca else 'simultaneously')) if npca is not None: - msgs.info('Restricted number of PCA components: {0}'.format(npca)) + log.info('Restricted number of PCA components: {0}'.format(npca)) if pca_explained_var is not None: - msgs.info('Requested pecentage of variance explained by PCA: {0:.1f}'.format( + log.info('Requested pecentage of variance explained by PCA: {0:.1f}'.format( pca_explained_var)) - msgs.info('Function fit to PCA coefficients: {0}'.format(function)) - msgs.info('Lower sigma rejection: {0:.1f}'.format(lower)) - msgs.info('Upper sigma rejection: {0:.1f}'.format(upper)) - msgs.info('Maximum number of rejections per iteration: {0}'.format(maxrej)) - msgs.info('Maximum number of rejection iterations: {0}'.format(maxiter)) + log.info('Function fit to PCA coefficients: {0}'.format(function)) + log.info('Lower sigma rejection: {0:.1f}'.format(lower)) + log.info('Upper sigma rejection: {0:.1f}'.format(upper)) + log.info('Maximum number of rejections per iteration: {0}'.format(maxrej)) + log.info('Maximum number of rejection iterations: {0}'.format(maxiter)) # Check the state of the current object if self.pcatype is not None: - msgs.warning('PCA model already exists and will be overwritten.') + log.warning('PCA model already exists and will be overwritten.') if self.edge_fit is None and not use_center: - msgs.warning('No trace fits exits. PCA based on trace centroid measurements.') + log.warning('No trace fits exits. PCA based on trace centroid measurements.') # Check if the PCA decomposition can be performed if not self.can_pca(): @@ -3307,7 +3307,7 @@ def build_pca(self, use_center=False, debug=False): pcaindx = [None, None] for i, side in enumerate(['left', 'right']): pcaindx[i] = (self.is_left if side == 'left' else self.is_right) & use_trace - msgs.info('Using {0}/{1} of the {2} traces in the PCA analysis.'.format( + log.info('Using {0}/{1} of the {2} traces in the PCA analysis.'.format( np.sum(pcaindx[i]), self.ntrace, side)) # Run the PCA decomposition and construct its interpolator @@ -3327,7 +3327,7 @@ def build_pca(self, use_center=False, debug=False): # order for components that account for a smaller # percentage of the variance. _order = np.clip(order - np.arange(_pca[i].npca), 1, None).astype(int) - msgs.info('Order of function fit to each component: {0}'.format(_order)) + log.info('Order of function fit to each component: {0}'.format(_order)) # Apply a 10% relative error to each coefficient. This # performs better than use_mad, since larger coefficients @@ -3484,23 +3484,23 @@ def peak_refine(self, rebuild_pca=False, show_fits=False, show_peaks=False): maxdev = self.par['fit_maxdev'] maxiter = self.par['fit_maxiter'] - msgs.info('-'*50) - msgs.info('{0:^50}'.format('Refining traces using collapsed Sobel image')) - msgs.info('-'*50) - msgs.info('Threshold for peak detection: {0:.1f}'.format(peak_thresh)) - msgs.info('Detector range (spectral axis) collapsed: {0}'.format(smash_range)) - msgs.info('Image fraction for trace mask filter: {0}'.format(trace_median_frac)) - msgs.info('Threshold for trace masking: {0}'.format(trace_thresh)) - msgs.info('Trace fitting function: {0}'.format(function)) - msgs.info('Trace fitting order: {0}'.format(order)) - msgs.info('FWHM parameter for uniform-weighted centroids: {0:.1f}'.format(fwhm_uniform)) - msgs.info('Number of uniform-weighted iterations: {0:.1f}'.format(niter_uniform)) - msgs.info('FWHM parameter for Gaussian-weighted centroids: {0:.1f}'.format(fwhm_gaussian)) - msgs.info('Number of Gaussian-weighted iterations: {0:.1f}'.format(niter_gaussian)) - msgs.info('Minimum separation between any two subsequent edges of the same side: ' + log.info('-'*50) + log.info('{0:^50}'.format('Refining traces using collapsed Sobel image')) + log.info('-'*50) + log.info('Threshold for peak detection: {0:.1f}'.format(peak_thresh)) + log.info('Detector range (spectral axis) collapsed: {0}'.format(smash_range)) + log.info('Image fraction for trace mask filter: {0}'.format(trace_median_frac)) + log.info('Threshold for trace masking: {0}'.format(trace_thresh)) + log.info('Trace fitting function: {0}'.format(function)) + log.info('Trace fitting order: {0}'.format(order)) + log.info('FWHM parameter for uniform-weighted centroids: {0:.1f}'.format(fwhm_uniform)) + log.info('Number of uniform-weighted iterations: {0:.1f}'.format(niter_uniform)) + log.info('FWHM parameter for Gaussian-weighted centroids: {0:.1f}'.format(fwhm_gaussian)) + log.info('Number of Gaussian-weighted iterations: {0:.1f}'.format(niter_gaussian)) + log.info('Minimum separation between any two subsequent edges of the same side: ' f'{fwhm_gaussian * min_edge_side_sep:.1f} pixels') - msgs.info('Maximum deviation for fitted data: {0:.1f}'.format(maxdev)) - msgs.info('Maximum number of rejection iterations: {0}'.format(maxiter)) + log.info('Maximum deviation for fitted data: {0:.1f}'.format(maxdev)) + log.info('Maximum number of rejection iterations: {0}'.format(maxiter)) # Generate bogus ivar and mask once here so that they don't # have to be generated multiple times. @@ -3567,7 +3567,7 @@ def peak_refine(self, rebuild_pca=False, show_fits=False, show_peaks=False): # Assess the output ntrace = fit.shape[1] if ntrace < self.ntrace: - msgs.warning('Found fewer traces using peak finding than originally available. ' + log.warning('Found fewer traces using peak finding than originally available. ' 'May want to reset peak threshold.') if self.par['trace_rms_tol'] is not None: @@ -3592,14 +3592,14 @@ def peak_refine(self, rebuild_pca=False, show_fits=False, show_peaks=False): rms = np.sqrt(np.mean((diff - np.mean(diff, axis=0)[None,:])**2, axis=0)) # Report - msgs.info('-'*30) - msgs.info('Matched spatial locations and RMS difference along spectral direction') - msgs.info(f' {"OLD":>8} {"NEW":>8} {"RMS":>8}') - msgs.info(' '+'-'*8+' '+'-'*8+' '+'-'*8) + log.info('-'*30) + log.info('Matched spatial locations and RMS difference along spectral direction') + log.info(f' {"OLD":>8} {"NEW":>8} {"RMS":>8}') + log.info(' '+'-'*8+' '+'-'*8+' '+'-'*8) for i in range(len(peak_indx)): if peak_indx[i] < 0: continue - msgs.info(f' {self.edge_fit[reference_row][gpm][peak_indx[i]]:8.1f}' + log.info(f' {self.edge_fit[reference_row][gpm][peak_indx[i]]:8.1f}' f' {fit[reference_row][i]:8.1f} {rms[i]:8.3f}') # Select traces below the RMS tolerance or that were newly @@ -3608,7 +3608,7 @@ def peak_refine(self, rebuild_pca=False, show_fits=False, show_peaks=False): # constrained! indx = (rms < self.par['trace_rms_tol']) | (peak_indx == -1) if not np.all(indx): - msgs.info(f'Removing {indx.size - np.sum(indx)} trace(s) due to large RMS ' + log.info(f'Removing {indx.size - np.sum(indx)} trace(s) due to large RMS ' 'difference with previous trace locations.') fit = fit[:,indx] cen = cen[:,indx] @@ -3727,10 +3727,10 @@ def _get_reference_locations(self, trace_cen, add_edge): to_edge = self.par['sync_to_edge'] gap_offset = self.par['gap_offset'] - msgs.info('Mode used to set spatial position of new traces: {0}'.format(center_mode)) - msgs.info('For first left and last right, set trace to the edge: {0}'.format(to_edge)) + log.info('Mode used to set spatial position of new traces: {0}'.format(center_mode)) + log.info('For first left and last right, set trace to the edge: {0}'.format(to_edge)) if center_mode == 'gap': - msgs.info('Gap offset for adjacent slits: {0}'.format(gap_offset)) + log.info('Gap offset for adjacent slits: {0}'.format(gap_offset)) # Get the reference row for the placement calculation; allow # the use of inserted traces. @@ -3814,7 +3814,7 @@ def _get_reference_locations(self, trace_cen, add_edge): trace_ref[indx[too_hi]] = trace_ref[indx[too_hi]+1] - gap_offset noffset += np.sum(too_hi) if noffset > 0: - msgs.warning('Reference locations for {0} slit edges adjusted '.format(noffset) + log.warning('Reference locations for {0} slit edges adjusted '.format(noffset) + 'to have a slit gap of {0} pixel(s).'.format(gap_offset)) return trace_ref @@ -3852,11 +3852,11 @@ def nudge_traces(self, trace_cen): raise PypeItError('Traces have incorrect length.') _buffer = self.par['det_buffer'] if _buffer < 0: - msgs.warning('Buffer must be greater than 0; ignoring.') + log.warning('Buffer must be greater than 0; ignoring.') _buffer = 0 if self.par['max_nudge'] is not None: - msgs.info('Nudging traces, by at most {0} pixel(s)'.format(self.par['max_nudge']) + log.info('Nudging traces, by at most {0} pixel(s)'.format(self.par['max_nudge']) + ', to be no closer than {0} pixel(s) from the detector edge.'.format(_buffer)) # NOTE: Should never happen, but this makes a compromise if a @@ -3954,7 +3954,7 @@ def sync(self, rebuild_pca=True, debug=False): if self.is_empty: if not self.par['bound_detector']: return False - msgs.warning('No traces left! Left and right edges placed at detector boundaries.') + log.warning('No traces left! Left and right edges placed at detector boundaries.') self.bound_detector() # Make sure that the traces are sorted spatially @@ -3983,10 +3983,10 @@ def sync(self, rebuild_pca=True, debug=False): return True # Report - msgs.info('-'*50) - msgs.info('{0:^50}'.format('Synchronizing left and right traces')) - msgs.info('-'*50) - msgs.info('Found {0} left and {1} right trace(s) to add.'.format( + log.info('-'*50) + log.info('{0:^50}'.format('Synchronizing left and right traces')) + log.info('-'*50) + log.info('Found {0} left and {1} right trace(s) to add.'.format( np.sum((side == -1) & add_edge), np.sum((side == 1) & add_edge))) # Allow the edges to be synced, even if a fit hasn't been done yet @@ -3994,9 +3994,9 @@ def sync(self, rebuild_pca=True, debug=False): # If there was only one edge, just add the other one if side.size == 2: - msgs.warning('Only one edge traced. Ignoring center_mode and adding edge at the ' + log.warning('Only one edge traced. Ignoring center_mode and adding edge at the ' 'opposite edge of the detector.') - msgs.info('Detector edge buffer: {0}'.format(self.par['det_buffer'])) + log.info('Detector edge buffer: {0}'.format(self.par['det_buffer'])) # TODO: PCA would have failed because there needs to be at # least two traces. Get rid of this test once satisfied # that this exception is never raised... @@ -4016,7 +4016,7 @@ def sync(self, rebuild_pca=True, debug=False): maxiter = 3 i = 0 while i < maxiter: - msgs.info(f'Beginning syncing iteration : {i+1} (of at most {maxiter})') + log.info(f'Beginning syncing iteration : {i+1} (of at most {maxiter})') # Get the traces trace_cen = self.edge_cen if self.edge_fit is None else self.edge_fit @@ -4067,7 +4067,7 @@ def sync(self, rebuild_pca=True, debug=False): raise PypeItError('Catastrophic error in left-right synchronization. Edge order is not ' 'correctly sorted.') if np.any(indx): - msgs.warning('Synchronized traces are not properly ordered, likely because they ' + log.warning('Synchronized traces are not properly ordered, likely because they ' 'have been placed close to the detector edges. Flagging ' '{0} traces that are not properly sorted for removal.'.format(np.sum(indx))) # Mask the traces as due to a synchronization error @@ -4077,7 +4077,7 @@ def sync(self, rebuild_pca=True, debug=False): self.edge_msk[:,indx] = self.bitmask.turn_on(self.edge_msk[:,indx], 'SYNCERROR') if debug: - msgs.info('Show instance includes inserted traces but before checking the sync.') + log.info('Show instance includes inserted traces but before checking the sync.') self.show(title='includes inserted traces before checking the sync', flag='any') # Check the full synchronized list and log completion of the @@ -4155,7 +4155,7 @@ def add_user_traces(self, add_traces, method='straight'): lindx = (x_start < lefts[y_spec,:]) & (x_end > lefts[y_spec,:]) rindx = (x_start < rights[y_spec,:]) & (x_end > rights[y_spec,:]) if any(lindx) or any(rindx): - msgs.warning(f'Inserted slit at {y_spec}:{x_start}:{x_end} on ' + log.warning(f'Inserted slit at {y_spec}:{x_start}:{x_end} on ' f'{self.traceimg.detector.name} overlaps with an existing slit! ' 'New slit will *not* be added.') keep[i] = False @@ -4292,7 +4292,7 @@ def insert_traces(self, side, trace_cen, loc=None, mode='user', resort=True, nud # remove any existing array and warn the user they they'll need to # rematch the orders. if self.orderid is not None: - msgs.warning('Inserting traces invalidates order matching. Removing.') + log.warning('Inserting traces invalidates order matching. Removing.') self.orderid = None # Check input @@ -4307,7 +4307,7 @@ def insert_traces(self, side, trace_cen, loc=None, mode='user', resort=True, nud if loc.size != ntrace: raise PypeItError('Number of sides does not match the number of insertion locations.') - msgs.info(f'Inserting {ntrace} new traces.') + log.info(f'Inserting {ntrace} new traces.') # Nudge the traces if nudge: @@ -4452,7 +4452,7 @@ def maskdesign_matching(self, debug=False): # Check that there are still traces to match! if self.is_empty: - msgs.warning('No edges traced. Slitmask matching cannot be performed') + log.warning('No edges traced. Slitmask matching cannot be performed') return # `traceimg` must have knowledge of the flat frame that built it @@ -4468,7 +4468,7 @@ def maskdesign_matching(self, debug=False): debug=debug) if omodel_bspat[omodel_bspat!=-1].size < 3: - msgs.warning('Less than 3 slits are expected on this detector, slitmask matching cannot be performed') + log.warning('Less than 3 slits are expected on this detector, slitmask matching cannot be performed') # update minimum_slit_gap and minimum_slit_length_sci par # this will allow to catch the boxslit, since in this case slitmask matching is not performed self.par = self.spectrograph.update_edgetracepar(self.par) @@ -4530,7 +4530,7 @@ def maskdesign_matching(self, debug=False): plt.ylim(0, self.traceimg.shape[1] + 20) plt.legend() plt.show() - msgs.info('SLIT_MATCH: RMS residuals for left and right edges: {}, {} pixels'.format(sigres_b, sigres_t)) + log.info('SLIT_MATCH: RMS residuals for left and right edges: {}, {} pixels'.format(sigres_b, sigres_t)) # We compute the predicted edge positions from the optical model after the x-correlation with the traced edges @@ -4562,7 +4562,7 @@ def maskdesign_matching(self, debug=False): needind_t = np.where(needadd_t)[0] # edges we are missing if (needind_b.size > 0) | (needind_t.size > 0): - msgs.warning('Missing edge traces: {} left and {} right'.format(needind_b.shape[0], needind_t.shape[0])) + log.warning('Missing edge traces: {} left and {} right'.format(needind_b.shape[0], needind_t.shape[0])) if debug: slitdesign_matching.plot_matches(self.edge_fit[:,self.is_left], ind_b, bot_edge_pred, reference_row, @@ -4581,7 +4581,7 @@ def maskdesign_matching(self, debug=False): edges_dupl[self.is_right] = dupl_t # Remove duplicate match - msgs.info('Removing duplicate matches: {} left and {} right'.format(ind_b[dupl_b].size, ind_t[dupl_t].size)) + log.info('Removing duplicate matches: {} left and {} right'.format(ind_b[dupl_b].size, ind_t[dupl_t].size)) self.remove_traces(edges_dupl, rebuild_pca=True) ind_b = ind_b[np.logical_not(dupl_b)] ind_t = ind_t[np.logical_not(dupl_t)] @@ -4600,7 +4600,7 @@ def maskdesign_matching(self, debug=False): # The code below is to add traces that are predicted but not found. # Left edges if needind_b.size > 0: - msgs.info('Adding {} left missing edge(s)'.format(needind_b.size)) + log.info('Adding {} left missing edge(s)'.format(needind_b.size)) # Append the missing indices and re-sort all ind_b = np.append(ind_b, needind_b) sortind_b = np.argsort(utils.index_of_x_eq_y(self.slitmask.slitid[sortindx], @@ -4633,7 +4633,7 @@ def maskdesign_matching(self, debug=False): if needind_t.size > 0: # Right edges - msgs.info('Adding {} right missing edge(s)'.format(needind_t.size)) + log.info('Adding {} right missing edge(s)'.format(needind_t.size)) # Append the missing indices and re-sort all ind_t = np.append(ind_t, needind_t) sortind_t = np.argsort(utils.index_of_x_eq_y(self.slitmask.slitid[sortindx], @@ -4714,7 +4714,7 @@ def maskdesign_matching(self, debug=False): slit_length = np.median(np.squeeze(np.diff(self.edge_fit.reshape(self.nspec, -1, 2), axis=-1)), axis=0) ind_short = np.repeat(slit_length < min_slitlen, 2) if np.any(ind_short): - msgs.info('Rejecting {0} traces that are too short.'.format(np.sum(ind_short))) + log.info('Rejecting {0} traces that are too short.'.format(np.sum(ind_short))) self.edge_msk[:, ind_short] = self.bitmask.turn_on(self.edge_msk[:, ind_short], 'SHORTSLIT') # force clean_traces for certain flags, because if a slit has one of these flags @@ -4724,9 +4724,9 @@ def maskdesign_matching(self, debug=False): sync_mode='both', assume_synced=True) if self.is_synced: - msgs.info('LEFT AND RIGHT EDGES SYNCHRONIZED AFTER MASK DESIGN MATCHING') + log.info('LEFT AND RIGHT EDGES SYNCHRONIZED AFTER MASK DESIGN MATCHING') else: - msgs.warning('LEFT AND RIGHT EDGES *NOT* SYNCHRONIZED AFTER MASK DESIGN MATCHING') + log.warning('LEFT AND RIGHT EDGES *NOT* SYNCHRONIZED AFTER MASK DESIGN MATCHING') def _fill_design_table(self, maskdef_id, cc_params_b, cc_params_t, omodel_bspat, omodel_tspat, spat_id): """ @@ -4878,7 +4878,7 @@ def order_refine(self, debug=False): present in the current set of edges. """ if self.spectrograph.pypeline != 'Echelle': - msgs.warning('Parameter add_missed_orders only valid for Echelle spectrographs.') + log.warning('Parameter add_missed_orders only valid for Echelle spectrographs.') return if not self.can_pca(): @@ -4906,7 +4906,7 @@ def order_refine(self, debug=False): = self.order_refine_free_format(reference_row, bracket=bracket, debug=debug) if add_left is None or add_right is None: - msgs.info('No additional orders found to add') + log.info('No additional orders found to add') return if rmtraces is not None: @@ -5077,7 +5077,7 @@ def order_refine_free_format(self, reference_row, combined_order_tol=1.8, bracke order_cen, order_missing \ = trace.find_missing_orders(cen[good_order], width_fit, gap_fit) if np.sum(order_missing) > order_missing.size // 2: - msgs.warning('Found more missing orders than detected orders. Check the order ' + log.warning('Found more missing orders than detected orders. Check the order ' 'refinement QA file! The code will continue, but you likely need to adjust ' 'your edge-tracing parameters.') @@ -5430,7 +5430,7 @@ def order_refine_free_format_qa(self, cen, bad_order, width, gap, width_fit, gap plt.show() else: fig.canvas.print_figure(ofile, bbox_inches='tight') - msgs.info(f'Missing order QA written to: {ofile}') + log.info(f'Missing order QA written to: {ofile}') fig.clear() plt.close(fig) @@ -5569,12 +5569,12 @@ def match_order(self, reference_row=None): fnd = slit_indx > -1 missed_orders = self.spectrograph.orders[np.logical_not(fnd)] if not np.all(fnd): - msgs.warning(f'Did not find all orders! Missing orders: {missed_orders}') + log.warning(f'Did not find all orders! Missing orders: {missed_orders}') # Flag paired edges that were not matched to a known order nomatch = np.setdiff1d(np.arange(np.sum(good_sync)), slit_indx[fnd]) if nomatch.size > 0: - msgs.warning(f'Flagging {nomatch.size} trace pairs as not being matched to an order.') + log.warning(f'Flagging {nomatch.size} trace pairs as not being matched to an order.') # Create a vector that selects the appropriate traces. This # *assumes* that the traces are left-right syncronized and the order # has not changed between the order of the traces in the relevant @@ -5592,16 +5592,16 @@ def match_order(self, reference_row=None): med_offset = np.median(sep[fnd]) # Report - msgs.info(f'Median offset is {med_offset:.3f}.') - msgs.info('After offsetting, order-matching separations are:') - msgs.info(f' {"ORDER":>6} {"PAIR":>4} {"SEP":>6}') - msgs.info(f' {"-"*6} {"-"*4} {"-"*6}') + log.info(f'Median offset is {med_offset:.3f}.') + log.info('After offsetting, order-matching separations are:') + log.info(f' {"ORDER":>6} {"PAIR":>4} {"SEP":>6}') + log.info(f' {"-"*6} {"-"*4} {"-"*6}') for i in range(self.spectrograph.norders): if fnd[i]: - msgs.info(f' {self.spectrograph.orders[i]:>6} {i+1:>4} {sep[i]-med_offset:6.3f}') + log.info(f' {self.spectrograph.orders[i]:>6} {i+1:>4} {sep[i]-med_offset:6.3f}') else: - msgs.info(f' {self.spectrograph.orders[i]:>6} {"N/A":>4} {"MISSED":>6}') - msgs.info(f' {"-"*6} {"-"*4} {"-"*6}') + log.info(f' {self.spectrograph.orders[i]:>6} {"N/A":>4} {"MISSED":>6}') + log.info(f' {"-"*6} {"-"*4} {"-"*6}') # Instantiate the order ID; 0 means the order is unassigned self.orderid = np.zeros(self.nslits*2, dtype=int) @@ -5713,7 +5713,7 @@ def get_slits(self): # Check for mismatched `maskdef_id` in the left and right edges mkd_id_mismatch = self.maskdef_id[self.is_left] != self.maskdef_id[self.is_right] if np.any(mkd_id_mismatch): - msgs.warning("Mismatched `maskdefId` in left and right traces for {}/{} slits. ".format( + log.warning("Mismatched `maskdefId` in left and right traces for {}/{} slits. ".format( self.maskdef_id[self.is_left][mkd_id_mismatch].size, self.nslits) + "Choosing the left edge `maskdefId` if it is not -99, otherwise choosing right one") _maskdef_id = self.maskdef_id[gpm & self.is_left] @@ -5722,7 +5722,7 @@ def get_slits(self): # this may not work if the corresponding right edge is also -99. Assuming this is not the case _maskdef_id[mkd_id_bad] = self.maskdef_id[gpm & self.is_right][mkd_id_bad] if np.any(_maskdef_id == -99): - msgs.warning("{} slits do not have `maskdefId` assigned.".format(_maskdef_id[_maskdef_id == -99].size) + + log.warning("{} slits do not have `maskdefId` assigned.".format(_maskdef_id[_maskdef_id == -99].size) + "They will not be included in the design table") # Store the matched slit-design and object information in a table. diff --git a/pypeit/extraction.py b/pypeit/extraction.py index d72ba6c7d7..96ffe62109 100644 --- a/pypeit/extraction.py +++ b/pypeit/extraction.py @@ -13,7 +13,7 @@ from astropy import stats from abc import ABCMeta -from pypeit import msgs, utils +from pypeit import log, utils from pypeit import PypeItError from pypeit.display import display from pypeit.core import skysub, extract, flexure, flat @@ -168,7 +168,7 @@ def __init__(self, sciImg, slits, sobjs_obj, spectrograph, par, objtype, global_ self.spat_flexure_shift = None # Initialize the slits - msgs.info("Initializing slits") + log.info("Initializing slits") self.initialize_slits(slits) # Internal bpm mask @@ -221,14 +221,14 @@ def __init__(self, sciImg, slits, sobjs_obj, spectrograph, par, objtype, global_ tilt_flexure_shift = _spat_flexure - self.waveTilts.spat_flexure else: tilt_flexure_shift = self.spat_flexure_shift - msgs.info("Generating tilts image from fit in waveTilts") + log.info("Generating tilts image from fit in waveTilts") self.tilts = self.waveTilts.fit2tiltimg(self.slitmask, flexure=tilt_flexure_shift) elif waveTilts is None and tilts is not None: - msgs.info("Using user input tilts image") + log.info("Using user input tilts image") self.tilts = tilts # Now generate the wavelength image - msgs.info("Generating wavelength image") + log.info("Generating wavelength image") if wv_calib is None and waveimg is None: raise PypeItError("Must provide either wv_calib or waveimg to Extract") if wv_calib is not None and waveimg is not None: @@ -239,12 +239,12 @@ def __init__(self, sciImg, slits, sobjs_obj, spectrograph, par, objtype, global_ elif wv_calib is None and waveimg is not None: self.waveimg = waveimg - msgs.info("Generating spectral FWHM image") + log.info("Generating spectral FWHM image") self.fwhmimg = None if wv_calib is not None: self.fwhmimg = wv_calib.build_fwhmimg(self.tilts, self.slits, initial=True, spat_flexure=self.spat_flexure_shift) else: - msgs.warning("Spectral FWHM image could not be generated") + log.warning("Spectral FWHM image could not be generated") # get flatfield image for blaze function self.flatimg = None @@ -256,7 +256,7 @@ def __init__(self, sciImg, slits, sobjs_obj, spectrograph, par, objtype, global_ # TODO: Can we just use flat_raw if flatimages.pixelflat_norm is None? self.flatimg, _ = flat.flatfield(flat_raw, flatimages.pixelflat_norm) if self.flatimg is None: - msgs.warning("No flat image was found. A spectrum of the flatfield will not be extracted!") + log.warning("No flat image was found. A spectrum of the flatfield will not be extracted!") # Now apply a global flexure correction to each slit provided it's not a standard star if self.par['flexure']['spec_method'] != 'skip' and not self.std_redux: @@ -360,7 +360,7 @@ def extract(self, global_sky, bkg_redux_global_sky=None, model_noise=None, spat_ #self.sobjs_obj = sobjs_obj if self.par['reduce']['extraction']['skip_optimal']: # Boxcar only with global sky subtraction - msgs.info("Skipping optimal extraction") + log.info("Skipping optimal extraction") # This will hold the extracted objects self.sobjs = self.sobjs_obj.copy() @@ -402,10 +402,10 @@ def extract(self, global_sky, bkg_redux_global_sky=None, model_noise=None, spat_ # Find them if sobj.OPT_COUNTS is None and sobj.BOX_COUNTS is None: remove_idx.append(idx) - msgs.warning(f'Removing object at pixel {sobj.SPAT_PIXPOS} because ' + log.warning(f'Removing object at pixel {sobj.SPAT_PIXPOS} because ' f'both optimal and boxcar extraction could not be performed') elif sobj.OPT_COUNTS is None: - msgs.warning(f'Optimal extraction could not be performed for object at pixel {sobj.SPAT_PIXPOS}') + log.warning(f'Optimal extraction could not be performed for object at pixel {sobj.SPAT_PIXPOS}') # Remove them if len(remove_idx) > 0: @@ -514,7 +514,7 @@ def spec_flexure_correct(self, mode="local", sobjs=None): Spectrally extracted objects """ if self.par['flexure']['spec_method'] == 'skip': - msgs.info('Skipping flexure correction.') + log.info('Skipping flexure correction.') return # Perform some checks @@ -528,7 +528,7 @@ def spec_flexure_correct(self, mode="local", sobjs=None): # Prepare a list of slit spectra, if required. if mode == "global": - msgs.info('Performing global spectral flexure correction') + log.info('Performing global spectral flexure correction') gd_slits = np.logical_not(self.extract_bpm) trace_spat = 0.5 * (self.slits_left + self.slits_right) _global_sky = self.global_sky if self.bkg_redux_global_sky is None else self.bkg_redux_global_sky @@ -542,12 +542,12 @@ def spec_flexure_correct(self, mode="local", sobjs=None): if gd_slits[islit] and len(flex_list[islit]['shift']) > 0: self.slitshift[islit] = flex_list[islit]['shift'][0] # Apply flexure to the new wavelength solution - msgs.info("Regenerating wavelength image") + log.info("Regenerating wavelength image") self.waveimg = self.wv_calib.build_waveimg(self.tilts, self.slits, spat_flexure=self.spat_flexure_shift, spec_flexure=self.slitshift) elif mode == "local": - msgs.info('Performing local spectral flexure correction') + log.info('Performing local spectral flexure correction') # Measure flexure: # If mode == local: specobjs != None and slitspecs = None flex_list = flexure.spec_flexure_slit(self.slits, self.slits.slitord_id, self.extract_bpm, @@ -658,7 +658,7 @@ def show(self, attr, image=None, showmask=False, sobjs=None, ch_name = chname if chname is not None else 'image' viewer, ch = display.show_image(image, chname=ch_name, clear=clear, wcs_match=True) else: - msgs.warning("Not an option for show") + log.warning("Not an option for show") if sobjs is not None: for spec in sobjs: @@ -764,7 +764,7 @@ def local_skysub_extract(self, global_sky, sobjs, bkg_redux_global_sky=None, # Loop on slits for slit_idx in gdslits: slit_spat = self.slits.spat_id[slit_idx] - msgs.info("Local sky subtraction and extraction for slit: {:d}".format(slit_spat)) + log.info("Local sky subtraction and extraction for slit: {:d}".format(slit_spat)) thisobj = self.sobjs.SLITID == slit_spat # indices of objects for this slit if not np.any(thisobj): continue diff --git a/pypeit/find_objects.py b/pypeit/find_objects.py index 3bcfd1051a..38ef090a22 100644 --- a/pypeit/find_objects.py +++ b/pypeit/find_objects.py @@ -14,7 +14,7 @@ from abc import ABCMeta from pypeit import specobjs -from pypeit import msgs, utils +from pypeit import log, utils from pypeit import PypeItError from pypeit.display import display from pypeit.core import skysub, qa, parse, flat, flexure @@ -160,7 +160,7 @@ def __init__(self, sciImg, slits, spectrograph, par, objtype, wv_calib=None, wav self.spat_flexure_shift = None # Initialise the slits - msgs.info("Initializing slits") + log.info("Initializing slits") self.initialize_slits(slits) # Internal bpm mask @@ -228,10 +228,10 @@ def __init__(self, sciImg, slits, spectrograph, par, objtype, wv_calib=None, wav tilt_flexure_shift = _spat_flexure - self.waveTilts.spat_flexure else: tilt_flexure_shift = self.spat_flexure_shift - msgs.info("Generating tilts image from fit in waveTilts") + log.info("Generating tilts image from fit in waveTilts") self.tilts = self.waveTilts.fit2tiltimg(self.slitmask, flexure=tilt_flexure_shift) elif waveTilts is None and tilts is not None: - msgs.info("Using user input tilts image") + log.info("Using user input tilts image") self.tilts = tilts # Show? @@ -264,7 +264,7 @@ def create_skymask(self, sobjs_obj): gdslits = np.where(np.logical_not(self.reduce_bpm))[0] for slit_idx in gdslits: slit_spat = self.slits.spat_id[slit_idx] - msgs.info(f'Generating skymask for slit # {slit_spat}') + log.info(f'Generating skymask for slit # {slit_spat}') thismask = self.slitmask == slit_spat this_sobjs = sobjs_obj.SLITID == slit_spat # Boxcar mask? @@ -348,7 +348,7 @@ def run(self, std_trace=None, show_peaks=False, show_skysub_fit=False): # If the skip_skysub is set (i.e. image is already sky-subtracted), simply find objects if self.par['reduce']['findobj']['skip_skysub']: - msgs.info("Skipping global sky sub as per user request") + log.info("Skipping global sky sub as per user request") sobjs_obj, self.nobj = self.find_objects(self.sciImg.image, self.sciImg.ivar, std_trace=std_trace, show=self.findobj_show, show_peaks=show_peaks) @@ -369,7 +369,7 @@ def run(self, std_trace=None, show_peaks=False, show_skysub_fit=False): if self.nobj == 0 or self.initial_skymask is not None: # Either no objects were found, or the initial sky mask was provided by the user. # Either way, don't don't redo global sky subtraction - msgs.info('Either no objects were found or a user-provided sky mask was used. ' + log.info('Either no objects were found or a user-provided sky mask was used. ' 'Skipping second pass of sky-subtraction and object finding.') return initial_sky0, sobjs_obj @@ -386,7 +386,7 @@ def run(self, std_trace=None, show_peaks=False, show_skysub_fit=False): self.sciImg.ivar, std_trace=std_trace, show=self.findobj_show, show_peaks=show_peaks) else: - msgs.info("Skipping 2nd run of finding objects") + log.info("Skipping 2nd run of finding objects") # TODO I think the final global should go here as well from the pypeit.py class lines 837 return initial_sky, sobjs_obj @@ -449,7 +449,7 @@ def find_objects(self, image, ivar, std_trace=None, # Find negative objects if self.find_negative: - msgs.info("Finding objects in the negative image") + log.info("Finding objects in the negative image") # Parses manual_extract_dict = self.manual.dict_for_objfind(self.detname, neg=True) if self.manual is not None else None sobjs_obj_single_neg, nobj_single_neg = \ @@ -551,7 +551,7 @@ def global_skysub(self, skymask=None, bkg_redux_sciimg=None, sigrej = 7.0 update_crmask = False if not self.par['reduce']['skysub']['global_sky_std']: - msgs.info('Skipping global sky-subtraction for standard star.') + log.info('Skipping global sky-subtraction for standard star.') return global_sky else: sigrej = 3.0 @@ -579,12 +579,12 @@ def global_skysub(self, skymask=None, bkg_redux_sciimg=None, # Loop on slits for slit_idx in gdslits: slit_spat = self.slits.spat_id[slit_idx] - msgs.info("Global sky subtraction for slit: {:d}".format(slit_spat)) + log.info("Global sky subtraction for slit: {:d}".format(slit_spat)) thismask = self.slitmask == slit_spat inmask = self.sciImg.select_flag(invert=True) & thismask & skymask_now # All masked? if not np.any(inmask): - msgs.warning("No pixels for fitting sky. If you are using mask_by_boxcar=True, your radius may be too large.") + log.warning("No pixels for fitting sky. If you are using mask_by_boxcar=True, your radius may be too large.") self.reduce_bpm[slit_idx] = True continue @@ -603,7 +603,7 @@ def global_skysub(self, skymask=None, bkg_redux_sciimg=None, # Mask if something went wrong if np.sum(global_sky[thismask]) == 0.: - msgs.warning("Bad fit to sky. Rejecting slit: {:d}".format(slit_spat)) + log.warning("Bad fit to sky. Rejecting slit: {:d}".format(slit_spat)) self.reduce_bpm[slit_idx] = True if update_crmask and self.par['scienceframe']['process']['mask_cr']: @@ -670,7 +670,7 @@ def show(self, attr, image=None, global_sky=None, showmask=False, sobjs=None, ch_name = chname if chname is not None else 'image' viewer, ch = display.show_image(image, chname=ch_name, clear=clear, wcs_match=True) else: - msgs.warning("Not an option for show") + log.warning("Not an option for show") if sobjs is not None: for spec in sobjs: @@ -771,7 +771,7 @@ def find_objects_pypeline(self, image, ivar, std_trace=None, for slit_idx in gdslits: slit_spat = self.slits.spat_id[slit_idx] qa_title ="Finding objects on slit # {:d}".format(slit_spat) - msgs.info(qa_title) + log.info(qa_title) thismask = self.slitmask == slit_spat inmask = self.sciImg.select_flag(invert=True) & thismask specobj_dict = {'SLITID': slit_spat, @@ -1018,7 +1018,7 @@ def global_skysub(self, skymask=None, bkg_redux_sciimg=None, update_crmask=True, if self.wv_calib is None: raise PypeItError("A wavelength calibration is needed (wv_calib) if a joint sky fit is requested.") - msgs.info("Generating wavelength image") + log.info("Generating wavelength image") # Generate the waveimg which is needed if flexure is being computed self.waveimg = self.wv_calib.build_waveimg(self.tilts, self.slits, spat_flexure=self.spat_flexure_shift) @@ -1032,7 +1032,7 @@ def global_skysub(self, skymask=None, bkg_redux_sciimg=None, update_crmask=True, if method in ['slitcen']: self.slitshift = self.calculate_flexure(global_sky_sep) # Recalculate the wavelength image, and the global sky taking into account the spectral flexure - msgs.info("Generating wavelength image, accounting for spectral flexure") + log.info("Generating wavelength image, accounting for spectral flexure") self.waveimg = self.wv_calib.build_waveimg(self.tilts, self.slits, spec_flexure=self.slitshift, spat_flexure=self.spat_flexure_shift) @@ -1053,7 +1053,7 @@ def joint_skysub(self, skymask=None, update_crmask=True, trim_edg=(0,0), """ Perform a joint sky model fit to the data. See Reduce.global_skysub() for parameter definitions. """ - msgs.info("Performing joint global sky subtraction") + log.info("Performing joint global sky subtraction") # Mask objects using the skymask? If skymask has been set by objfinding, and masking is requested, then do so skymask_now = skymask if (skymask is not None) else np.ones_like(self.sciImg.image, dtype=bool) _global_sky = np.zeros_like(self.sciImg.image) @@ -1070,7 +1070,7 @@ def joint_skysub(self, skymask=None, update_crmask=True, trim_edg=(0,0), sigrej = 7.0 update_crmask = False if not self.par['reduce']['skysub']['global_sky_std']: - msgs.info('Skipping global sky-subtraction for standard star.') + log.info('Skipping global sky-subtraction for standard star.') return _global_sky # Use the FWHM map determined from the arc lines to convert the science frame @@ -1087,7 +1087,7 @@ def joint_skysub(self, skymask=None, update_crmask=True, trim_edg=(0,0), slitmask = self.slits.slit_img(pad=0, flexure=self.spat_flexure_shift) slitmask_trim = self.slits.slit_img(pad=-3, flexure=self.spat_flexure_shift) for nn in range(numiter): - msgs.info("Performing iterative joint sky subtraction - ITERATION {0:d}/{1:d}".format(nn+1, numiter)) + log.info("Performing iterative joint sky subtraction - ITERATION {0:d}/{1:d}".format(nn+1, numiter)) # TODO trim_edg is in the parset so it should be passed in here via trim_edg=tuple(self.par['reduce']['trim_edge']), _global_sky[thismask] = skysub.global_skysub(sciimg, model_ivar, tilt_wave, thismask, self.slits_left, self.slits_right, inmask=inmask, @@ -1105,7 +1105,7 @@ def joint_skysub(self, skymask=None, update_crmask=True, trim_edg=(0,0), sciimg /= scaleImg # Update the ivar image used in the sky fit - msgs.info("Updating sky noise model") + log.info("Updating sky noise model") # Choose the highest counts out of sky and object counts = _global_sky _scale = None if self.sciImg.img_scale is None else self.sciImg.img_scale[thismask] @@ -1148,7 +1148,7 @@ def joint_skysub(self, skymask=None, update_crmask=True, trim_edg=(0,0), show_fit=show_fit) # Update the ivar image used in the sky fit - msgs.info("Updating sky noise model") + log.info("Updating sky noise model") # Choose the highest counts out of sky and object counts = _global_sky _scale = None if self.sciImg.img_scale is None else self.sciImg.img_scale[thismask] @@ -1205,7 +1205,7 @@ def calculate_flexure(self, global_sky): maxwave=self.par['flexure']['maxwave']) this_slitshift = np.zeros(self.slits.nslits) if flex_dict_ref is not None: - msgs.warning("Only a relative spectral flexure correction will be performed") + log.warning("Only a relative spectral flexure correction will be performed") this_slitshift = np.ones(self.slits.nslits) * flex_dict_ref['shift'] # Now loop through all slits to calculate the additional shift relative to the reference slit flex_list = [] @@ -1231,12 +1231,12 @@ def calculate_flexure(self, global_sky): new_slitshift = self.slitshift + this_slitshift # Now report the flexure values for slit_idx, slit_spat in enumerate(self.slits.spat_id): - msgs.info("Flexure correction, slit {0:d} (spat id={1:d}): {2:.3f} pixels".format(1+slit_idx, slit_spat, + log.info("Flexure correction, slit {0:d} (spat id={1:d}): {2:.3f} pixels".format(1+slit_idx, slit_spat, self.slitshift[slit_idx])) # Save QA # TODO :: Need to implement QA once the flexure code has been tidied up, and this routine has been moved # out of the find_objects() class. - msgs.debug("QA is not currently implemented for the flexure correction") + log.debug("QA is not currently implemented for the flexure correction") if False:#flex_list is not None: basename = f'{self.basename}_global_{self.spectrograph.get_det_name(self.det)}' out_dir = os.path.join(self.par['rdx']['redux_path'], 'QA') @@ -1255,7 +1255,7 @@ def apply_relative_scale(self, scaleImg): if self.scaleimg.size == 1: self.scaleimg = np.ones_like(self.sciImg.image) # Correct the relative illumination of the science frame - msgs.info("Correcting science frame for relative illumination") + log.info("Correcting science frame for relative illumination") self.scaleimg *= scaleImg.copy() self.sciImg.image, _bpm, varImg = flat.flatfield(self.sciImg.image, scaleImg, varframe=utils.inverse(self.sciImg.ivar)) diff --git a/pypeit/flatfield.py b/pypeit/flatfield.py index 7fe1950309..0db73e7ec6 100644 --- a/pypeit/flatfield.py +++ b/pypeit/flatfield.py @@ -19,7 +19,7 @@ from IPython import embed -from pypeit import msgs +from pypeit import log from pypeit import PypeItError, PypeItDataModelError from pypeit import utils from pypeit import bspline @@ -208,7 +208,7 @@ def _parse(cls, hdu, ext=None, transpose_table_arrays=False, hdu_prefix=None, ** try: d[key] = np.array([bspline.bspline.from_hdu(hdu[k]) for k in ext_bspl]) except Exception as e: - msgs.warning('Error in bspline extension read:\n {0}: {1}'.format( + log.warning('Error in bspline extension read:\n {0}: {1}'.format( e.__class__.__name__, str(e))) # Assume this is because the type failed type_passed = False @@ -234,7 +234,7 @@ def _parse(cls, hdu, ext=None, transpose_table_arrays=False, hdu_prefix=None, ** allfit.append(fitting.PypeItFit.from_hdu(hdu[k])) d[key] = np.array(allfit) except Exception as e: - msgs.warning('Error in finecorr extension read:\n {0}: {1}'.format( + log.warning('Error in finecorr extension read:\n {0}: {1}'.format( e.__class__.__name__, str(e))) # Assume this is because the type failed type_passed = False @@ -286,17 +286,17 @@ def get_bpmflats(self, frametype='pixel'): """ # Check if both BPMs are none if self.pixelflat_bpm is None and self.illumflat_bpm is None: - msgs.warning("FlatImages contains no BPM - trying to generate one") + log.warning("FlatImages contains no BPM - trying to generate one") return np.zeros(self.shape, dtype=int) # Now return the requested case, checking for None if frametype == 'illum': if self.illumflat_bpm is not None: return self.illumflat_bpm - msgs.warning("illumflat has no BPM - using the pixelflat BPM") + log.warning("illumflat has no BPM - using the pixelflat BPM") return self.pixelflat_bpm if self.pixelflat_bpm is not None: return self.pixelflat_bpm - msgs.warning("pixelflat has no BPM - using the illumflat BPM") + log.warning("pixelflat has no BPM - using the illumflat BPM") return self.illumflat_bpm def get_spat_bsplines(self, frametype='illum', finecorr=False): @@ -331,17 +331,17 @@ def get_spat_bsplines(self, frametype='illum', finecorr=False): illum_bsplines = self.illumflat_spat_bsplines # Ensure that at least one has been generated if pixel_bsplines is None and illum_bsplines is None: - msgs.warning(f'FlatImages contains no {fctxt}spatial bspline fit.') + log.warning(f'FlatImages contains no {fctxt}spatial bspline fit.') return None # Now return the requested case, checking for None if frametype == 'illum': if illum_bsplines is not None: return illum_bsplines - msgs.warning(f'illumflat has no {fctxt}spatial bspline fit - using the pixelflat.') + log.warning(f'illumflat has no {fctxt}spatial bspline fit - using the pixelflat.') return pixel_bsplines if pixel_bsplines is not None: return pixel_bsplines - msgs.warning(f'pixelflat has no {fctxt}spatial bspline fit - using the illumflat.') + log.warning(f'pixelflat has no {fctxt}spatial bspline fit - using the illumflat.') return illum_bsplines def fit2illumflat(self, slits, frametype='illum', finecorr=False, initial=False, @@ -432,7 +432,7 @@ def show(self, frametype='all', slits=None, wcs_match=True, chk_version=True): try: slits = slittrace.SlitTraceSet.from_file(slits_file, chk_version=chk_version) except (FileNotFoundError, PypeItDataModelError): - msgs.warning('Could not load slits to include when showing flat-field images. File ' + log.warning('Could not load slits to include when showing flat-field images. File ' 'was either not provided directly, or it could not be read based on its ' f'expected name: {slits_file}.') @@ -561,7 +561,7 @@ def __init__(self, rawflatimg, spectrograph, flatpar, slits, wavetilts=None, wv_ # get waveimg here if available if self.wavetilts is None or self.wv_calib is None: - msgs.warning("Wavelength calib or tilts are not available. Wavelength image not generated.") + log.warning("Wavelength calib or tilts are not available. Wavelength image not generated.") else: self.build_waveimg() # this set self.waveimg @@ -605,7 +605,7 @@ def run(self, doqa=False, debug=False, show=False): # check if self.wavetilts is available. It can be None if the flat is slitless, but it's needed otherwise if self.wavetilts is None and not self.slitless: - msgs.warning("Wavelength tilts are not available. Cannot generate this flat image.") + log.warning("Wavelength tilts are not available. Cannot generate this flat image.") return None # Fit it @@ -635,7 +635,7 @@ def run(self, doqa=False, debug=False, show=False): # has already been divided out by the pixel flat. if self.spat_illum_only: break - msgs.info("Iteration {0:d}/{1:d} of 2D detector response extraction".format(ff+1, niter)) + log.info("Iteration {0:d}/{1:d} of 2D detector response extraction".format(ff+1, niter)) # Extract a detector response image det_resp = self.extract_structure(rawflat_orig) # Trim the slits to avoid edge effects @@ -701,7 +701,7 @@ def build_waveimg(self): """ Generate an image of the wavelength of each pixel. """ - msgs.info("Generating wavelength image") + log.info("Generating wavelength image") if self.wavetilts is None or self.wv_calib is None: raise PypeItError("Wavelength calib or tilts are not available. Cannot generate wavelength image.") else: @@ -909,19 +909,19 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): for slit_idx, slit_spat in enumerate(self.slits.spat_id): # Is this a good slit?? if self.slits.bitmask.flagged(self.slits.mask[slit_idx], flag=['SHORTSLIT', 'USERIGNORE', 'BADTILTCALIB']): - msgs.info('Skipping bad slit: {}'.format(slit_spat)) + log.info('Skipping bad slit: {}'.format(slit_spat)) self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADFLATCALIB') continue elif self.slits.bitmask.flagged(self.slits.mask[slit_idx], flag=['BOXSLIT']): - msgs.info('Skipping alignment slit: {}'.format(slit_spat)) + log.info('Skipping alignment slit: {}'.format(slit_spat)) continue elif self.slits.bitmask.flagged(self.slits.mask[slit_idx], flag=['BADWVCALIB']) and \ (self.flatpar['pixelflat_min_wave'] is not None or self.flatpar['pixelflat_max_wave'] is not None): - msgs.info('Skipping slit with bad wavecalib: {}'.format(slit_spat)) + log.info('Skipping slit with bad wavecalib: {}'.format(slit_spat)) self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADFLATCALIB') continue - msgs.info('Modeling the flat-field response for slit spat_id={}: {}/{}'.format( + log.info('Modeling the flat-field response for slit spat_id={}: {}/{}'.format( slit_spat, slit_idx+1, self.slits.nslits)) # Find the pixels on the initial slit @@ -945,14 +945,14 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): + common_message) elif saturated_slits == 'mask': self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADFLATCALIB') - msgs.warning('Only {:4.2f}'.format(100*good_frac) + log.warning('Only {:4.2f}'.format(100*good_frac) + '% of the pixels on slit {0} are not saturated. '.format(slit_spat) + 'Selected behavior was to mask this slit and continue with the ' + 'remainder of the reduction, meaning no science data will be ' + 'extracted from this slit. ' + common_message) elif saturated_slits == 'continue': self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'SKIPFLATCALIB') - msgs.warning('Only {:4.2f}'.format(100*good_frac) + log.warning('Only {:4.2f}'.format(100*good_frac) + '% of the pixels on slit {0} are not saturated. '.format(slit_spat) + 'Selected behavior was to simply continue, meaning no ' + 'field-flatting correction will be applied to this slit but ' @@ -1005,12 +1005,12 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): spec_gpm = onslit_trimmed & gpm_log # & (rawflat < nonlinear_counts) spec_nfit = np.sum(spec_gpm) spec_ntot = np.sum(onslit_init) - msgs.info('Spectral fit of flatfield for {0}/{1} '.format(spec_nfit, spec_ntot) + log.info('Spectral fit of flatfield for {0}/{1} '.format(spec_nfit, spec_ntot) + ' pixels in the slit.') # Set this to a parameter? if spec_nfit/spec_ntot < 0.5: # TODO: Shouldn't this raise an exception or continue to the next slit instead? - msgs.warning('Spectral fit includes only {:.1f}'.format(100*spec_nfit/spec_ntot) + log.warning('Spectral fit includes only {:.1f}'.format(100*spec_nfit/spec_ntot) + '% of the pixels on this slit.\n' + ' Either the slit has many bad pixels or the number of ' 'trimmed pixels is too large.') @@ -1044,7 +1044,7 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): if exit_status > 1: # TODO -- MAKE A FUNCTION - msgs.warning('Flat-field spectral response bspline fit failed! Not flat-fielding ' + log.warning('Flat-field spectral response bspline fit failed! Not flat-fielding ' 'slit {0} and continuing!'.format(slit_spat)) self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADFLATCALIB') continue @@ -1094,18 +1094,18 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): # Report spat_nfit = np.sum(spat_gpm) spat_ntot = np.sum(onslit_padded) - msgs.info('Spatial fit of flatfield for {0}/{1} '.format(spat_nfit, spat_ntot) + log.info('Spatial fit of flatfield for {0}/{1} '.format(spat_nfit, spat_ntot) + ' pixels in the slit.') if spat_nfit/spat_ntot < 0.5: # TODO: Shouldn't this raise an exception or continue to the next slit instead? - msgs.warning('Spatial fit includes only {:.1f}'.format(100*spat_nfit/spat_ntot) + log.warning('Spatial fit includes only {:.1f}'.format(100*spat_nfit/spat_ntot) + '% of the pixels on this slit.\n' + ' Either the slit has many bad pixels, the model of the ' 'spectral shape is poor, or the illumination profile is very irregular.') # First fit -- With initial slits if not np.any(spat_gpm): - msgs.warning('Flat-field failed during normalization! Not flat-fielding ' + log.warning('Flat-field failed during normalization! Not flat-fielding ' 'slit {0} and continuing!'.format(slit_spat)) self.slits.mask[slit_idx] = self.slits.bitmask.turn_on( self.slits.mask[slit_idx], 'BADFLATCALIB') @@ -1208,14 +1208,14 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): continue else: # Save the nada - msgs.warning('Slit illumination profile bspline fit failed! Spatial profile not ' + log.warning('Slit illumination profile bspline fit failed! Spatial profile not ' 'included in flat-field model for slit {0}!'.format(slit_spat)) self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADFLATCALIB') continue # ---------------------------------------------------------- # Fit the 2D residuals of the 1D spectral and spatial fits. - msgs.info('Performing 2D illumination + scattered light flat field fit') + log.info('Performing 2D illumination + scattered light flat field fit') # Construct the spectrally and spatially normalized flat norm_spec_spat[...] = 1. @@ -1300,7 +1300,7 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): # Save the 2D residual model twod_model[...] = 1. if exit_status > 1: - msgs.warning('Two-dimensional fit to flat-field data failed! No higher order ' + log.warning('Two-dimensional fit to flat-field data failed! No higher order ' 'flat-field corrections included in model of slit {0}!'.format(slit_spat)) self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADFLATCALIB') else: @@ -1317,7 +1317,7 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): # Check for infinities and NaNs in the flat-field model winfnan = np.where(np.logical_not(np.isfinite(self.flat_model[onslit_tweak]))) if winfnan[0].size != 0: - msgs.warning( + log.warning( f'There are {winfnan[0].size} pixels with non-finite values in the flat-field ' f'model for slit {slit_spat}!\nThese model pixel values will be set to the ' 'raw pixel value.' @@ -1327,7 +1327,7 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): whilo = np.where((self.flat_model[onslit_tweak] >= nonlinear_counts) | (self.flat_model[onslit_tweak] <= 0.0)) if whilo[0].size != 0: - msgs.warning( + log.warning( f'There are {whilo[0].size} pixels with unrealistically high or low values in ' f'the flat-field model for slit {slit_spat}!\nThese model pixel values will ' 'be set to the raw pixel value.' @@ -1468,13 +1468,13 @@ def spatial_fit_finecorr(self, normed, onslit_tweak, slit_idx, slit_spat, gpm, """ # check id self.waveimg is available if self.waveimg is None: - msgs.warning("Cannot perform the fine correction to the spatial illumination without the wavelength image.") + log.warning("Cannot perform the fine correction to the spatial illumination without the wavelength image.") return # TODO :: Include fit_order in the parset?? fit_order = np.array([3, 6]) slit_txt = self.slits.slitord_txt slit_ordid = self.slits.slitord_id[slit_idx] - msgs.info(f"Performing a fine correction to the spatial illumination ({slit_txt} {slit_ordid})") + log.info(f"Performing a fine correction to the spatial illumination ({slit_txt} {slit_ordid})") # initialise illumflat_finecorr = np.ones_like(self.rawflatimg.image) # Trim the edges by a few pixels to avoid edge effects @@ -1523,7 +1523,7 @@ def spatial_fit_finecorr(self, normed, onslit_tweak, slit_idx, slit_spat, gpm, self.list_of_finecorr_fits[slit_idx] = fullfit illumflat_finecorr[this_slit] = fullfit.eval(xpos, ypos) else: - msgs.warning(f"Fine correction to the spatial illumination failed for {slit_txt} {slit_ordid}") + log.warning(f"Fine correction to the spatial illumination failed for {slit_txt} {slit_ordid}") return illumflat_finecorr # If corrections exceed the tolerance, then clip them to the level of the tolerance @@ -1563,7 +1563,7 @@ def extract_structure(self, rawflat_orig, slit_trim=3): An image containing the detector structure (i.e. the raw flatfield image divided by the spectral and spatial illumination profile fits). """ - msgs.info("Extracting flatfield structure") + log.info("Extracting flatfield structure") # check if the waveimg is available if self.waveimg is None: @@ -1637,12 +1637,12 @@ def spectral_illumination(self, gpm=None, debug=False): scale_model: `numpy.ndarray`_ An image containing the appropriate scaling """ - msgs.info("Deriving spectral illumination profile") + log.info("Deriving spectral illumination profile") # check if the waveimg is available if self.waveimg is None: - msgs.warning("Cannot perform the spectral illumination without the wavelength image.") + log.warning("Cannot perform the spectral illumination without the wavelength image.") return None - msgs.info('Performing a joint fit to the flat-field response') + log.info('Performing a joint fit to the flat-field response') # Grab some parameters trim = self.flatpar['slit_trim'] rawflat = self.rawflatimg.image / (self.msillumflat * self.mspixelflat) @@ -1836,7 +1836,7 @@ def make_slitless_pixflat(self, msbias=None, msdark=None, calib_dir=None, write_ in_file = np.array([d in file_detnames for d in detnames]) # if all detectors are in the file, return if np.all(in_file): - msgs.info(f"Both slitless_pixflat frames and user-defined file found. " + log.info(f"Both slitless_pixflat frames and user-defined file found. " f"The user-defined file will be used: {self.par['flatfield']['pixelflat_file']}") # return unchanged self.par['flatfield']['pixelflat_file'] return self.par['flatfield']['pixelflat_file'] @@ -1844,7 +1844,7 @@ def make_slitless_pixflat(self, msbias=None, msdark=None, calib_dir=None, write_ # get the detectors that are not in the file _detectors = _detectors[np.logical_not(in_file)] detnames = detnames[np.logical_not(in_file)] - msgs.info(f'Both slitless_pixflat frames and user-defined file found, but the ' + log.info(f'Both slitless_pixflat frames and user-defined file found, but the ' f'following detectors are not in the file: {detnames}. Using the ' f'slitless_pixflat frames to generate the missing detectors.') @@ -1857,14 +1857,14 @@ def make_slitless_pixflat(self, msbias=None, msdark=None, calib_dir=None, write_ this_raw_idx = self.spectrograph.parse_raw_files(self.fitstbl[self.slitless_rows], det=_det, ftype='slitless_pixflat') if len(this_raw_idx) == 0: - msgs.warning(f'No raw slitless_pixflat frames found for {self.spectrograph.get_det_name(_det)}. ' + log.warning(f'No raw slitless_pixflat frames found for {self.spectrograph.get_det_name(_det)}. ' f'Continuing...') continue this_raw_files = self.fitstbl.frame_paths(self.slitless_rows[this_raw_idx]) - msgs.info(f'Creating slitless pixel-flat calibration frame ' + log.info(f'Creating slitless pixel-flat calibration frame ' f'for {self.spectrograph.get_det_name(_det)} using files: ') for f in this_raw_files: - msgs.info(f' {Path(f).name}') + log.info(f' {Path(f).name}') # Reset the BPM msbpm = self.spectrograph.bpm(this_raw_files[0], _det, msbias=msbias if self.par['bpm_usebias'] else None) @@ -1961,7 +1961,7 @@ def spatillum_finecorr_qa(normed, finecorr, left, right, ypos, cut, outfile=None plt.rcdefaults() plt.rcParams['font.family'] = 'serif' - msgs.info("Generating QA for spatial illumination fine correction") + log.info("Generating QA for spatial illumination fine correction") # Setup some plotting variables nseg = 10 # Number of segments to plot in QA - needs to be large enough so the fine correction is approximately linear in between adjacent segments colors = plt.cm.jet(np.linspace(0, 1, nseg)) @@ -2042,7 +2042,7 @@ def spatillum_finecorr_qa(normed, finecorr, left, right, ypos, cut, outfile=None plt.show() else: plt.savefig(outfile, dpi=400) - msgs.info("Saved QA:\n"+outfile) + log.info("Saved QA:\n"+outfile) plt.close() plt.rcdefaults() @@ -2066,7 +2066,7 @@ def detector_structure_qa(det_resp, det_resp_model, outfile=None, title="Detecto """ plt.rcdefaults() plt.rcParams['font.family'] = 'serif' - msgs.info("Generating QA for flat field structure correction") + log.info("Generating QA for flat field structure correction") # Calculate the scale to be used in the plot # med = np.median(det_resp) # mad = 1.4826*np.median(np.abs(det_resp-med)) @@ -2110,7 +2110,7 @@ def detector_structure_qa(det_resp, det_resp_model, outfile=None, title="Detecto plt.show() else: plt.savefig(outfile, dpi=400) - msgs.info("Saved QA:\n" + outfile) + log.info("Saved QA:\n" + outfile) plt.close() plt.rcdefaults() @@ -2197,11 +2197,11 @@ def illum_profile_spectral(rawimg, waveimg, slits, slit_illum_ref_idx=0, smooth_ scale_model: `numpy.ndarray`_ An image containing the appropriate scaling """ - msgs.info("Performing relative spectral sensitivity correction (reference slit = {0:d})".format(slit_illum_ref_idx)) + log.info("Performing relative spectral sensitivity correction (reference slit = {0:d})".format(slit_illum_ref_idx)) if polydeg is not None: - msgs.info("Using polynomial of degree {0:d} for relative spectral sensitivity".format(polydeg)) + log.info("Using polynomial of degree {0:d} for relative spectral sensitivity".format(polydeg)) else: - msgs.info("Using 'smooth_weights' algorithm for relative spectral sensitivity") + log.info("Using 'smooth_weights' algorithm for relative spectral sensitivity") # Setup some helpful parameters skymask_now = skymask if (skymask is not None) else np.ones_like(rawimg, dtype=bool) gpm = gpmask if (gpmask is not None) else np.ones_like(rawimg, dtype=bool) @@ -2295,7 +2295,7 @@ def illum_profile_spectral(rawimg, waveimg, slits, slit_illum_ref_idx=0, smooth_ break else: lo_prev, hi_prev = 1/minv, maxv - msgs.info("Iteration {0:d} :: Minimum/Maximum scales = {1:.5f}, {2:.5f}".format(rr + 1, minv, maxv)) + log.info("Iteration {0:d} :: Minimum/Maximum scales = {1:.5f}, {2:.5f}".format(rr + 1, minv, maxv)) # Store rescaling scaleImg *= relscl_model #rawimg_copy /= relscl_model @@ -2404,7 +2404,7 @@ def write_pixflat_to_fits(pixflat_norm_list, detname_list, spec_name, outdir, pi """ - msgs.info("Writing the pixel-to-pixel flat-field images to a FITS file.") + log.info("Writing the pixel-to-pixel flat-field images to a FITS file.") # Check that the number of detectors matches the number of pixelflat_norm arrays if len(pixflat_norm_list) != len(detname_list): @@ -2419,7 +2419,7 @@ def write_pixflat_to_fits(pixflat_norm_list, detname_list, spec_name, outdir, pi old_detnames = [] old_hdr = None if pixelflat_file.exists(): - msgs.warning("The pixelflat file already exists. It will be overwritten/updated.") + log.warning("The pixelflat file already exists. It will be overwritten/updated.") old_hdus = fits.open(pixelflat_file) old_detnames = [h.name.split('-')[0] for h in old_hdus] # this has also 'PRIMARY' old_hdr = old_hdus[0].header @@ -2458,11 +2458,11 @@ def write_pixflat_to_fits(pixflat_norm_list, detname_list, spec_name, outdir, pi if not pixelflat_file.parent.is_dir(): pixelflat_file.parent.mkdir(parents=True) new_hdulist.writeto(pixelflat_file, overwrite=True) - msgs.info(f'A slitless Pixel Flat file for detectors {detname_list} has been saved to\n' + log.info(f'A slitless Pixel Flat file for detectors {detname_list} has been saved to\n' f'{pixelflat_file}') # common msg - add_msgs = ( + add_log = ( f"add the following to your PypeIt Reduction File:\n" f" [calibrations]\n" f" [[flatfield]]\n" @@ -2474,15 +2474,15 @@ def write_pixflat_to_fits(pixflat_norm_list, detname_list, spec_name, outdir, pi # NOTE that the file saved in the cache is gzipped, while the one saved in the outdir is not # This prevents `dataPaths.pixelflat.get_file_path()` from returning the file saved in the outdir cache.write_file_to_cache(pixelflat_file, pixelflat_name+'.gz', f"pixelflats") - msgs.info( + log.info( f"The slitless Pixel Flat file has also been saved to the PypeIt cache directory\n" f"{str(dataPaths.pixelflat)}\n" f"It will be automatically used in this run. " - f"If you want to use this file in future runs, {add_msgs}") + f"If you want to use this file in future runs, {add_log}") else: - msgs.info( + log.info( f"To use this file, move it to the PypeIt data directory\n" - f"{str(dataPaths.pixelflat)}\n and {add_msgs}" + f"{str(dataPaths.pixelflat)}\n and {add_log}" ) @@ -2558,7 +2558,7 @@ def load_pixflat(pixel_flat_file, spectrograph, det, flatimages, calib_dir=None, if pixflat_msc.shape != traceimg.image.shape: raise PypeItError('The constructed pixel flat mosaic does not have the correct shape. ' 'Cannot load this pixel flat as a mosaic!') - msgs.info(f'Using pixelflat file: {pixel_flat_file} ' + log.info(f'Using pixelflat file: {pixel_flat_file} ' f'for {spectrograph.get_det_name(det)}.') nrm_image = FlatImages(pixelflat_norm=pixflat_msc) @@ -2575,7 +2575,7 @@ def load_pixflat(pixel_flat_file, spectrograph, det, flatimages, calib_dir=None, # get the index of the current detector idx = file_detnames.index(detname) # get the pixel flat image - msgs.info(f'Using pixelflat file: {pixel_flat_file} for {detname}.') + log.info(f'Using pixelflat file: {pixel_flat_file} for {detname}.') nrm_image = FlatImages(pixelflat_norm=hdu[idx].data) else: raise PypeItError(f'{detname} not found in the pixel flat file: ' diff --git a/pypeit/fluxcalibrate.py b/pypeit/fluxcalibrate.py index 1cc6336baa..f5cafa2342 100644 --- a/pypeit/fluxcalibrate.py +++ b/pypeit/fluxcalibrate.py @@ -4,7 +4,7 @@ import matplotlib.pyplot as plt from astropy.io import fits -from pypeit import msgs +from pypeit import log from pypeit.spectrographs.util import load_spectrograph from pypeit import specobjs from pypeit import sensfunc diff --git a/pypeit/images/bitmaskarray.py b/pypeit/images/bitmaskarray.py index 0cf3543496..46f03bc059 100644 --- a/pypeit/images/bitmaskarray.py +++ b/pypeit/images/bitmaskarray.py @@ -17,7 +17,7 @@ from pypeit.datamodel import DataContainer from pypeit.bitmask import BitMask -from pypeit import msgs +from pypeit import log from pypeit import PypeItError class BitMaskArray(DataContainer): diff --git a/pypeit/images/buildimage.py b/pypeit/images/buildimage.py index aa658adadf..74748b1d07 100644 --- a/pypeit/images/buildimage.py +++ b/pypeit/images/buildimage.py @@ -8,7 +8,7 @@ import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit.par import pypeitpar from pypeit.images import rawimage diff --git a/pypeit/images/combineimage.py b/pypeit/images/combineimage.py index 37a5f764dc..772f66174d 100644 --- a/pypeit/images/combineimage.py +++ b/pypeit/images/combineimage.py @@ -8,7 +8,7 @@ import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit.core import combine from pypeit.core import procimg @@ -192,7 +192,7 @@ def run(self, ignore_saturation=False, maxiters=5): # TODO: JFH suggests that we move this to calibrations.check_calibrations if np.any(np.absolute(np.diff(exptime)) > 0): # TODO: This should likely throw an error instead! - msgs.warning('Exposure time is not consistent for all images being combined! ' + log.warning('Exposure time is not consistent for all images being combined! ' 'Using the average.') comb_texp = np.mean(exptime) else: @@ -205,15 +205,15 @@ def run(self, ignore_saturation=False, maxiters=5): no_nan = np.logical_not(np.isnan(spat_flex)) if np.sum(no_nan) > 0: if np.any(np.absolute(np.diff(spat_flex[no_nan])) > 0.1): - msgs.warning(f'Spatial flexure is not consistent for all images being combined: {spat_flex}.') + log.warning(f'Spatial flexure is not consistent for all images being combined: {spat_flex}.') comb_spat_flex = np.round(np.mean(spat_flex[no_nan]),3) - msgs.warning(f'Using the average: {comb_spat_flex}.') + log.warning(f'Using the average: {comb_spat_flex}.') else: comb_spat_flex = spat_flex[no_nan][0] # scale the images to their mean, if requested, before combining if self.par['scale_to_mean']: - msgs.info("Scaling images to have the same mean before combining") + log.info("Scaling images to have the same mean before combining") # calculate the mean of the images [mean_img], _, mean_gpm, _ = combine.weighted_combine(np.ones(self.nimgs, dtype=float)/self.nimgs, [img_stack], diff --git a/pypeit/images/detector_container.py b/pypeit/images/detector_container.py index b236fd2d38..5e2a9fc015 100644 --- a/pypeit/images/detector_container.py +++ b/pypeit/images/detector_container.py @@ -11,7 +11,7 @@ import numpy as np from pypeit import datamodel -from pypeit import msgs +from pypeit import log from pypeit.core import procimg diff --git a/pypeit/images/mosaic.py b/pypeit/images/mosaic.py index f496f5c06d..973139960f 100644 --- a/pypeit/images/mosaic.py +++ b/pypeit/images/mosaic.py @@ -15,7 +15,7 @@ from pypeit import datamodel from pypeit import io from pypeit.images.detector_container import DetectorContainer -from pypeit import msgs +from pypeit import log from pypeit import PypeItError @@ -168,7 +168,7 @@ def _parse(cls, hdu, hdu_prefix=None, **kwargs): # version and type checking. _d, vp, tp, ph = DetectorContainer._parse(_hdu) if not vp: - msgs.warning('Detector datamodel version is incorrect. May cause a fault.') + log.warning('Detector datamodel version is incorrect. May cause a fault.') version_passed &= vp d['detectors'] += [DetectorContainer.from_dict(d=_d) if tp else None] type_passed &= tp diff --git a/pypeit/images/pypeitimage.py b/pypeit/images/pypeitimage.py index 27abdbac2c..ccff798729 100644 --- a/pypeit/images/pypeitimage.py +++ b/pypeit/images/pypeitimage.py @@ -11,7 +11,7 @@ from astropy.io import fits -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit.images.imagebitmask import ImageBitMaskArray from pypeit.images.detector_container import DetectorContainer @@ -784,7 +784,7 @@ def sub(self, other): spat_flexure = self.spat_flexure if other.spat_flexure is not None and spat_flexure is not None \ and other.spat_flexure != spat_flexure: - msgs.warning(f'Spatial flexure different for images being subtracted ({spat_flexure} ' + log.warning(f'Spatial flexure different for images being subtracted ({spat_flexure} ' f'vs. {other.spat_flexure}). Adopting {np.max(np.abs([spat_flexure, other.spat_flexure]))}.') # Create a copy of the detector, if it is defined, to be used when diff --git a/pypeit/images/rawimage.py b/pypeit/images/rawimage.py index abce8f3239..8db965e243 100644 --- a/pypeit/images/rawimage.py +++ b/pypeit/images/rawimage.py @@ -13,7 +13,7 @@ import numpy as np from astropy import stats -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit.core import arc from pypeit.core import parse @@ -256,7 +256,7 @@ def apply_gain(self, force=False): step = inspect.stack()[0][3] if self.steps[step] and not force: # Already applied - msgs.warning('Gain was already applied.') + log.warning('Gain was already applied.') return # Have the images been trimmed? @@ -317,7 +317,7 @@ def correct_nonlinear(self): step = inspect.stack()[0][3] if self.steps[step]: # Already applied - msgs.warning('Non-linear correction was already applied.') + log.warning('Non-linear correction was already applied.') return inim = self.image.copy() @@ -356,7 +356,7 @@ def estimate_readnoise(self): gain = 1. if self.steps['apply_gain'] else self.detector[i]['gain'][amp] biaspix = self.image[i,self.oscansec_img[i]==amp+1] * gain self.ronoise[i,amp] = stats.sigma_clipped_stats(biaspix, sigma=5)[-1] - msgs.info(f'Estimated readnoise of amplifier {amp+1} = ' + log.info(f'Estimated readnoise of amplifier {amp+1} = ' f'{self.ronoise[i,amp]:.3f} e-') def build_rn2img(self, units='e-', digitization=False): @@ -559,9 +559,9 @@ def process(self, par, bpm=None, scattlight=None, flatimages=None, bias=None, sl raise PypeItError('Mosaicing must be performed if multiple detectors are processed and ' 'either flat-fielding or spatial flexure corrections are applied.') if self.nimg == 1 and mosaic: - msgs.warning('Only processing a single detector; mosaicing is ignored.') + log.warning('Only processing a single detector; mosaicing is ignored.') - msgs.info(f'Performing basic image processing on {os.path.basename(self.filename)}.') + log.info(f'Performing basic image processing on {os.path.basename(self.filename)}.') # TODO: Checking for bit saturation should be done here. # - Convert from ADU to electron counts. @@ -624,7 +624,7 @@ def process(self, par, bpm=None, scattlight=None, flatimages=None, bias=None, sl raise PypeItError(f'CODING ERROR: From-scratch BPM has incorrect shape!') # If the above was successful, the code can continue, but first warn # the user that the code ignored the provided bpm. - msgs.warning(f'Bad-pixel mask has incorrect shape: found {bpm_shape}, expected ' + log.warning(f'Bad-pixel mask has incorrect shape: found {bpm_shape}, expected ' f'{self.image.shape}. Assuming this is because different binning used for ' 'various frames. Recreating BPM specifically for this frame ' f'({os.path.basename(self.filename)}) and assuming the difference in the ' @@ -787,7 +787,7 @@ def spatial_flexure_shift(self, slits, force=False, debug=False): step = inspect.stack()[0][3] if self.steps[step] and not force: # Already field flattened - msgs.warning('Spatial flexure shift already calculated.') + log.warning('Spatial flexure shift already calculated.') return if self.nimg > 1: raise PypeItError('CODING ERROR: Must use a single image (single detector or detector ' @@ -846,7 +846,7 @@ def flatfield(self, flatimages, slits=None, force=False, debug=False): step = inspect.stack()[0][3] if self.steps[step] and not force: # Already field flattened - msgs.warning('Image was already flat fielded.') + log.warning('Image was already flat fielded.') return # Check input @@ -904,7 +904,7 @@ def orient(self, force=False): step = inspect.stack()[0][3] # Check if already oriented if self.steps[step] and not force: - msgs.warning('Image was already oriented.') + log.warning('Image was already oriented.') return # Orient the image to have blue/red run bottom to top self.image = np.array([self.spectrograph.orient_image(d, i) @@ -937,7 +937,7 @@ def subtract_bias(self, bias_image, force=False): step = inspect.stack()[0][3] if self.steps[step] and not force: # Already bias subtracted - msgs.warning('Image was already bias subtracted.') + log.warning('Image was already bias subtracted.') return _bias = bias_image.image if self.nimg > 1 else np.expand_dims(bias_image.image, 0) if self.image.shape != _bias.shape: @@ -1026,7 +1026,7 @@ def build_dark(self, dark_image=None, expscale=False): separator=',') drk_str = np.array2string(0.5*self.dark, formatter={'float_kind':lambda x: "%.2f" % x}, separator=',') - msgs.warning(f'Dark-subtracted dark frame has significant signal remaining. Median ' + log.warning(f'Dark-subtracted dark frame has significant signal remaining. Median ' f'counts are {med_str}; warning threshold = +/- {drk_str}.') # Combine the tabulated and observed dark values @@ -1053,7 +1053,7 @@ def subtract_dark(self, force=False): step = inspect.stack()[0][3] if self.steps[step] and not force: # Already bias subtracted - msgs.warning('Image was already dark subtracted.') + log.warning('Image was already dark subtracted.') return if self.dark is None: @@ -1079,7 +1079,7 @@ def subtract_overscan(self, force=False): step = inspect.stack()[0][3] if self.steps[step] and not force: # Already overscan subtracted - msgs.warning("Image was already overscan subtracted!") + log.warning("Image was already overscan subtracted!") return # NOTE: procimg.subtract_overscan checks that the provided images all @@ -1114,7 +1114,7 @@ def subtract_pattern(self): step = inspect.stack()[0][3] if self.steps[step]: # Already pattern subtracted - msgs.warning("Image was already pattern subtracted!") + log.warning("Image was already pattern subtracted!") return # The image cannot have already been trimmed @@ -1151,7 +1151,7 @@ def subtract_continuum(self, force=False): step = inspect.stack()[0][3] if self.steps[step] and not force: # Already bias subtracted - msgs.warning('Image was already continuum subtracted.') + log.warning('Image was already continuum subtracted.') return # Generate the continuum image @@ -1183,11 +1183,11 @@ def subtract_scattlight(self, msscattlight, slits, debug=False): step = inspect.stack()[0][3] if self.steps[step]: # Already pattern subtracted - msgs.warning("The scattered light has already been subtracted from the image!") + log.warning("The scattered light has already been subtracted from the image!") return if self.par["scattlight"]["method"] == "model" and msscattlight.scattlight_param is None: - msgs.warning("Scattered light parameters are not set. Cannot perform scattered light subtraction.") + log.warning("Scattered light parameters are not set. Cannot perform scattered light subtraction.") return # Obtain some information that is needed for the scattered light @@ -1266,16 +1266,16 @@ def subtract_scattlight(self, msscattlight, slits, debug=False): # If failure, revert back to the Scattered Light calibration frame model parameters if not success: if msscattlight is not None: - msgs.warning("Scattered light model failed - using predefined model parameters") + log.warning("Scattered light model failed - using predefined model parameters") scatt_img = scattlight.scattered_light_model(this_modpar, _img) else: - msgs.warning("Scattered light model failed - using archival model parameters") + log.warning("Scattered light model failed - using archival model parameters") # Use archival model parameters arx_modpar, _ = self.spectrograph.scattered_light_archive(binning, dispname) arx_modpar[8] = 0.0 scatt_img = scattlight.scattered_light_model(arx_modpar, _img) else: - msgs.warning("Scattered light not performed") + log.warning("Scattered light not performed") scatt_img = np.zeros(self.image[ii, ...].shape) do_finecorr = False # Check if a fine correction to the scattered light should be applied @@ -1315,11 +1315,11 @@ def trim(self, force=False): # Image *must* have been trimmed already because shape does not # match raw image self.steps[step] = True - msgs.warning('Image shape does not match raw image. Assuming it was already trimmed.') + log.warning('Image shape does not match raw image. Assuming it was already trimmed.') return if self.steps[step] and not force: # Already trimmed - msgs.warning('Image was already trimmed.') + log.warning('Image was already trimmed.') return self.image = np.array([procimg.trim_frame(i, d < 1) for i, d in zip(self.image, self.datasec_img)]) @@ -1354,7 +1354,7 @@ def build_mosaic(self): if self.nimg == 1: # NOTE: This also catches cases where the mosaicing has already been # performed. - msgs.warning('There is only one image, so there is nothing to mosaic!') + log.warning('There is only one image, so there is nothing to mosaic!') return # Check that the mosaicing is allowed diff --git a/pypeit/inputfiles.py b/pypeit/inputfiles.py index 57469f2902..25a90053db 100644 --- a/pypeit/inputfiles.py +++ b/pypeit/inputfiles.py @@ -17,7 +17,7 @@ from pypeit import utils from pypeit.io import files_from_extension -from pypeit import msgs, __version__ +from pypeit import log, __version__ from pypeit import PypeItError from pypeit.spectrographs.util import load_spectrograph from pypeit.par.pypeitpar import PypeItPar @@ -133,7 +133,7 @@ def from_file(cls, input_file:str, vet:bool=True, preserve_comments:bool=False): :class:`InputFile`: An instance of the InputFile class """ # Read in the pypeit reduction file - msgs.info('Loading the reduction file') + log.info('Loading the reduction file') lines = cls.readlines(input_file) if not preserve_comments: @@ -211,7 +211,7 @@ def from_file(cls, input_file:str, vet:bool=True, preserve_comments:bool=False): is_config[data_end+1:] = False # vet - msgs.info(f'{cls.flavor} input file loaded successfully.') + log.info(f'{cls.flavor} input file loaded successfully.') # Instantiate return cls(config=list(lines[is_config]), @@ -589,7 +589,7 @@ def write(self, input_file, version_override=None, date_override=None): f.write(f"{self.data_block} end\n") f.write("\n") - msgs.info(f'{self.flavor} input file written to: {input_file}') + log.info(f'{self.flavor} input file written to: {input_file}') def get_spectrograph(self): """ @@ -669,7 +669,7 @@ def vet(self): raise PypeItError("Setup does not appear in your setup block! Add it") # Done - msgs.info('PypeIt file successfully vetted.') + log.info('PypeIt file successfully vetted.') @property def frametypes(self): @@ -756,7 +756,7 @@ def vet(self): # This is allowed if using an archived sensitivity function # And the checking has to be done in the script as the specgtrograph must be known.. if 'sensfile' not in self.data.keys(): - msgs.warning("sensfile column not provided. Fluxing will crash if an archived sensitivity function does not exist") + log.warning("sensfile column not provided. Fluxing will crash if an archived sensitivity function does not exist") self.data['sensfile'] = '' @property @@ -866,7 +866,7 @@ def vet(self): raise PypeItError(f"Missing spectrograph in the Parameter block of your .coadd2d file. Add it!") # Done - msgs.info('.coadd2d file successfully vetted.') + log.info('.coadd2d file successfully vetted.') class Coadd3DFile(InputFile): @@ -889,7 +889,7 @@ def vet(self): raise PypeItError(f"Missing spectrograph in the Parameter block of your .coadd2d file. Add it!") # Done - msgs.info('.coadd3d file successfully vetted.') + log.info('.coadd3d file successfully vetted.') @property def options(self): @@ -1046,7 +1046,7 @@ def vet(self): raise PypeItError(f"Missing spectrograph in the Parameter block of your .flex file. Add it!") # Done - msgs.info('.flex file successfully vetted.') + log.info('.flex file successfully vetted.') class Collate1DFile(InputFile): """Child class for collate 1D script @@ -1094,7 +1094,7 @@ def vet(self): super().vet() # Done - msgs.info('.rawfiles file successfully vetted.') + log.info('.rawfiles file successfully vetted.') # NOTE: I originally had this in pypeit/io.py, but I think it was causing a diff --git a/pypeit/io.py b/pypeit/io.py index 4f6a318dd8..f741bad864 100644 --- a/pypeit/io.py +++ b/pypeit/io.py @@ -36,7 +36,7 @@ # favor of specutils.Spectrum1D (or whatever it is in specutils>2.0). from linetools.spectra import xspectrum1d -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import dataPaths from pypeit import __version__ @@ -672,9 +672,9 @@ def write_to_fits(d, ofile, name=None, hdr=None, overwrite=False, checksum=True) # this is slow but still faster than if you have astropy.io.fits do # it directly if _ofile is not ofile: - msgs.info('Compressing file: {0}'.format(_ofile)) + log.info('Compressing file: {0}'.format(_ofile)) compress_file(_ofile, overwrite=True) - msgs.info('File written to: {0}'.format(ofile)) + log.info('File written to: {0}'.format(ofile)) def hdu_iter_by_ext(hdu, ext=None, hdu_prefix=None): @@ -788,7 +788,7 @@ def fits_open(filename, **kwargs): try: return fits.open(filename, **kwargs) except OSError as e: - msgs.warning(f'Error opening {filename} ({e}). Trying again by setting ' + log.warning(f'Error opening {filename} ({e}). Trying again by setting ' 'ignore_missing_end=True, assuming the error was a header problem.') try: return fits.open(filename, ignore_missing_end=True, **kwargs) diff --git a/pypeit/manual_extract.py b/pypeit/manual_extract.py index a3401894f9..b261baa680 100644 --- a/pypeit/manual_extract.py +++ b/pypeit/manual_extract.py @@ -9,7 +9,7 @@ import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import datamodel from pypeit.core import parse diff --git a/pypeit/metadata.py b/pypeit/metadata.py index 60cd753a74..f5911aa157 100644 --- a/pypeit/metadata.py +++ b/pypeit/metadata.py @@ -17,7 +17,7 @@ from astropy import table, time -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import inputfiles from pypeit.core import framematch @@ -98,7 +98,7 @@ def __init__(self, spectrograph, par, files=None, data=None, usrdata=None, if data is None and files is None: # Warn that table will be empty - msgs.warning('Both data and files are None in the instantiation of PypeItMetaData.' + log.warning('Both data and files are None in the instantiation of PypeItMetaData.' ' The table will be empty!') # Initialize internals @@ -173,11 +173,11 @@ def _vet_instrument(self, meta_tbl): # An empty table is allowed if len(instr_names) > 0: if len(instr_names) != 1: - msgs.warning(f'More than one instrument in your dataset! {instr_names} \n' + log.warning(f'More than one instrument in your dataset! {instr_names} \n' 'Proceed with great caution...') # Check the name if not instr_names[0].startswith(self.spectrograph.header_name): - msgs.warning('The instrument name in the headers of the raw files does not match the ' + log.warning('The instrument name in the headers of the raw files does not match the ' f'expected one! Found {instr_names[0]}, expected {self.spectrograph.header_name}. ' 'You may have chosen the wrong PypeIt spectrograph name!') @@ -203,7 +203,7 @@ def _build(self, files, strict=True, usrdata=None): # Allow for single files _files = files if hasattr(files, '__len__') else [files] - msgs.info(f"Building metadata for {len(_files)} files.") + log.info(f"Building metadata for {len(_files)} files.") # Build lists to fill data = {k:[] for k in self.spectrograph.meta.keys()} data['directory'] = ['None']*len(_files) @@ -232,7 +232,7 @@ def _build(self, files, strict=True, usrdata=None): # Read the fits headers. NOTE: If the file cannot be opened, # headarr will be None, and the subsequent loop over the meta keys # will fill the data dictionary with None values. - msgs.info(f'Adding metadata for {data["filename"][idx]}') + log.info(f'Adding metadata for {data["filename"][idx]}') headarr = self.spectrograph.get_headarr(_ifile, strict=strict) # Grab Meta @@ -244,7 +244,7 @@ def _build(self, files, strict=True, usrdata=None): self.par['rdx']['ignore_bad_headers'] or strict)) if isinstance(value, str) and '#' in value: value = value.replace('#', '') - msgs.warning('Removing troublesome # character from {0}. Returning {1}.'.format( + log.warning('Removing troublesome # character from {0}. Returning {1}.'.format( meta_key, value)) data[meta_key].append(value) @@ -264,7 +264,7 @@ def _build(self, files, strict=True, usrdata=None): 'frames either could not be opened, are empty, or have corrupt headers:\n' for file in bad_files: msg += f' {file}\n' - msgs.warning(msg) + log.warning(msg) # Return return data @@ -720,20 +720,20 @@ def unique_configurations(self, force=False, copy=False, rm_none=False): return self._get_cfgs(copy=copy, rm_none=rm_none) if 'setup' in self.keys(): - msgs.info('Setup column already set. Finding unique configurations.') + log.info('Setup column already set. Finding unique configurations.') uniq, indx = np.unique(self['setup'], return_index=True) ignore = uniq == 'None' if np.sum(ignore) > 0: - msgs.warning(f'Ignoring {np.sum(ignore)} frames with configuration set to None.') + log.warning(f'Ignoring {np.sum(ignore)} frames with configuration set to None.') self.configs = {} for i in range(len(uniq)): if ignore[i]: continue self.configs[uniq[i]] = self.get_configuration(indx[i]) - msgs.info('Found {0} unique configurations.'.format(len(self.configs))) + log.info('Found {0} unique configurations.'.format(len(self.configs))) return self._get_cfgs(copy=copy, rm_none=rm_none) - msgs.info('Using metadata to determine unique configurations.') + log.info('Using metadata to determine unique configurations.') # sort self.table mjd = self.table['mjd'].copy() @@ -760,7 +760,7 @@ def unique_configurations(self, force=False, copy=False, rm_none=False): if len(self.spectrograph.configuration_keys()) == 0: self.configs = {} self.configs[next(cfg_gen)] = {} - msgs.info('All files assumed to be from a single configuration.') + log.info('All files assumed to be from a single configuration.') return self._get_cfgs(copy=copy, rm_none=rm_none) # Use the first file to set the first unique configuration @@ -790,7 +790,7 @@ def unique_configurations(self, force=False, copy=False, rm_none=False): # Add the configuration self.configs[setup] = cfg - msgs.info(f'Found {len(self.configs)} unique configuration(s).') + log.info(f'Found {len(self.configs)} unique configuration(s).') return self._get_cfgs(copy=copy, rm_none=rm_none) def set_configurations(self, configs=None, force=False, fill=None): @@ -927,7 +927,7 @@ def set_configurations(self, configs=None, force=False, fill=None): # Warn the user that the matching meta values are not # unique for this configuration. if uniq_meta.size != 1: - msgs.warning('When setting the instrument configuration for {0} '.format(ftype) + log.warning('When setting the instrument configuration for {0} '.format(ftype) + 'frames, configuration {0} does not have unique '.format(cfg_key) + '{0} values.' .format(mkey)) # Find the frames of this type that match any of the @@ -952,7 +952,7 @@ def set_configurations(self, configs=None, force=False, fill=None): cfg_gen = self.configuration_generator(start=len(np.unique(self.table['setup'][np.logical_not(not_setup)]))) nw_setup = next(cfg_gen) self.configs[nw_setup] = {} - msgs.warning('All files that did not match any setup are grouped into a single configuration.') + log.warning('All files that did not match any setup are grouped into a single configuration.') self.table['setup'][not_setup] = nw_setup def clean_configurations(self): @@ -982,7 +982,7 @@ def clean_configurations(self): # Check that the metadata are valid for this column. indx = np.isin(self[key], cfg_limits[key]) if not np.all(indx): - msgs.warning('Found frames with invalid {0}.'.format(key)) + log.warning('Found frames with invalid {0}.'.format(key)) good &= indx if np.all(good): @@ -996,7 +996,7 @@ def clean_configurations(self): indx = np.where(np.logical_not(good))[0] for i in indx: msg += ' {0}\n'.format(self['filename'][i]) - msgs.warning(msg) + log.warning(msg) # And remove 'em self.table = self.table[good] @@ -1227,7 +1227,7 @@ def ignore_frames(self): if 'frametype' not in self.keys(): raise PypeItError('To ignore frames, types must have been defined; run get_frame_types.') list_ignore_frames = list(ignore_frames.keys()) - msgs.info('Unique configurations ignore frames with type: {0}'.format(list_ignore_frames)) + log.info('Unique configurations ignore frames with type: {0}'.format(list_ignore_frames)) for ftype in list_ignore_frames: ignmsk |= self.find_frames(ftype) # Isolate the frames to be ignored @@ -1408,14 +1408,14 @@ def get_frame_types(self, flag_unknown=False, user=None, merge=True): """ # Checks if 'frametype' in self.keys() or 'framebit' in self.keys(): - msgs.warning('Removing existing frametype and framebit columns.') + log.warning('Removing existing frametype and framebit columns.') if 'frametype' in self.keys(): del self.table['frametype'] if 'framebit' in self.keys(): del self.table['framebit'] # Start - msgs.info("Typing files") + log.info("Typing files") type_bits = np.zeros(len(self), dtype=self.type_bitmask.minimum_dtype()) # Use the user-defined frame types from the input dictionary @@ -1425,7 +1425,7 @@ def get_frame_types(self, flag_unknown=False, user=None, merge=True): raise PypeItError('Your pypeit file has duplicate filenames which is not allowed.') else: raise PypeItError('The user-provided dictionary does not match table length.') - msgs.info('Using user-provided frame types.') + log.info('Using user-provided frame types.') for ifile,ftypes in user.items(): indx = self['filename'] == ifile try: @@ -1456,19 +1456,19 @@ def get_frame_types(self, flag_unknown=False, user=None, merge=True): # Find the files without any types indx = np.logical_not(self.type_bitmask.flagged(type_bits)) if np.any(indx): - msgs.info("Couldn't identify the following files:") + log.info("Couldn't identify the following files:") for f in self['filename'][indx]: - msgs.info(f) + log.info(f) if not flag_unknown: raise PypeItError("Check these files before continuing") - msgs.warning("These files are commented out and will be ignored during the reduction.") + log.warning("These files are commented out and will be ignored during the reduction.") # Comment out the frames that could not be identified # first change the dtype of the filename column to be able to add a # self['filename'] = self['filename'].value.astype(f" 0: -# msgs.warning('PypeIt executed in quicklook mode. Only reducing science frames ' +# log.warning('PypeIt executed in quicklook mode. Only reducing science frames ' # 'in the first combination group!') # break # @@ -437,13 +437,13 @@ def reduce_all(self): self.save_exposure(frames[0], sci_spec2d, sci_sobjs, self.basename, history, skip_write_2d=self.par['scienceframe']['process']['skip_write_2d']) else: - msgs.warning('No spec2d and spec1d saved to file because the ' + log.warning('No spec2d and spec1d saved to file because the ' 'calibration/reduction was not successful for all the detectors') else: - msgs.warning(f'Output file: {self.fitstbl.construct_basename(frames[0])} already ' + log.warning(f'Output file: {self.fitstbl.construct_basename(frames[0])} already ' 'exists. Set overwrite=True to recreate and overwrite.') - msgs.info(f'Finished calibration group {calib_ID}') + log.info(f'Finished calibration group {calib_ID}') # Finish self.print_end_time() @@ -540,33 +540,33 @@ def reduce_exposure(self, frames, bg_frames=None, std_outfile=None): objFind_list = [] # Print status message - msgs_string = f'Reducing target {self.fitstbl['target'][frames[0]]}\n' + log_string = f'Reducing target {self.fitstbl['target'][frames[0]]}\n' # TODO: Print these when the frames are actually combined, # backgrounds are used, etc? - msgs_string += 'Combining frames:\n' + log_string += 'Combining frames:\n' for iframe in frames: - msgs_string += f'{self.fitstbl['filename'][iframe]}\n' - msgs.info(msgs_string) + log_string += f'{self.fitstbl['filename'][iframe]}\n' + log.info(log_string) if has_bg: - bg_msgs_string = '' + bg_log_string = '' for iframe in bg_frames: - bg_msgs_string += f'{self.fitstbl['filename'][iframe]}\n' - bg_msgs_string = '\nUsing background from frames:\n' + bg_msgs_string - msgs.info(bg_msgs_string) + bg_log_string += f'{self.fitstbl['filename'][iframe]}\n' + bg_log_string = '\nUsing background from frames:\n' + bg_log_string + log.info(bg_log_string) # Find the detectors to reduce detectors = self.select_detectors(self.spectrograph, self.par['rdx']['detnum'], slitspatnum=self.par['rdx']['slitspatnum']) - msgs.info(f'Detectors to work on: {detectors}') + log.info(f'Detectors to work on: {detectors}') # Loop on Detectors -- Calibrate, process image, find objects # TODO: Attempt to put in a multiprocessing call here? for self.det in detectors: - msgs.info(f'Reducing detector {self.det}') + log.info(f'Reducing detector {self.det}') # run calibration self.caliBrate = self.calib_one(frames, self.det) if not self.caliBrate.success: - msgs.warning(f'Calibrations for detector {self.det} were unsuccessful! The step ' + log.warning(f'Calibrations for detector {self.det} were unsuccessful! The step ' f'that failed was {self.caliBrate.failed_step}. Continuing by ' f'skipping this detector.') continue @@ -704,7 +704,7 @@ def calib_one(self, frames, det, stop_at_step:str=None): """ - msgs.info(f'Building/loading calibrations for detector {det}') + log.info(f'Building/loading calibrations for detector {det}') # Instantiate Calibrations class user_slits = slittrace.merge_user_slit(self.par['rdx']['slitspatnum'], self.par['rdx']['maskIDs']) @@ -764,7 +764,7 @@ def objfind_one(self, frames, det, bg_frames=None, std_outfile=None): self.objtype, self.setup, self.obstime, self.basename, self.binning \ = self.get_sci_metadata(frames[0], det) - msgs.info("Object finding begins for {} on det={}".format(self.basename, det)) + log.info("Object finding begins for {} on det={}".format(self.basename, det)) # Is this a standard star? self.std_redux = self.objtype == 'standard' @@ -823,13 +823,13 @@ def objfind_one(self, frames, det, bg_frames=None, std_outfile=None): (self.objtype == 'standard' and self.par['calibrations']['standardframe']['process']['spat_flexure_correct']) or \ manual_flexure: if (manual_flexure or manual_flexure == 0) and not (np.issubdtype(self.fitstbl[frames[0]]["shift"], np.integer)): - msgs.info(f'Implementing manual flexure of {manual_flexure}') + log.info(f'Implementing manual flexure of {manual_flexure}') spat_flexure = np.float64(manual_flexure) sciImg.spat_flexure = spat_flexure else: - msgs.info(f'Using auto-computed flexure') + log.info(f'Using auto-computed flexure') spat_flexure = sciImg.spat_flexure - msgs.info(f'Flexure being used is: {spat_flexure}') + log.info(f'Flexure being used is: {spat_flexure}') # Build the initial sky mask initial_skymask = self.load_skyregions(initial_slits=self.spectrograph.pypeline != 'SlicerIFU', scifile=sciImg.files[0], frame=frames[0], spat_flexure=spat_flexure) @@ -925,13 +925,13 @@ def load_skyregions(self, initial_slits=False, scifile=None, frame=None, spat_fl raise PypeItError(f'Unable to find SkyRegions file: {regfile} . Create a SkyRegions ' 'frame using pypeit_skysub_regions, or change the user_regions to ' 'the percentage format. See documentation.') - msgs.info(f'Loading SkyRegions file: {regfile}') + log.info(f'Loading SkyRegions file: {regfile}') return buildimage.SkyRegions.from_file(regfile).image.astype(bool) skyregtxt = self.par['reduce']['skysub']['user_regions'] if isinstance(skyregtxt, list): skyregtxt = ",".join(skyregtxt) - msgs.info(f'Generating skysub mask based on the user defined regions: {skyregtxt}') + log.info(f'Generating skysub mask based on the user defined regions: {skyregtxt}') # NOTE : Do not include spatial flexure here! # It is included when generating the mask in the return statement below slits_left, slits_right, _ \ @@ -1035,7 +1035,7 @@ def extract_one(self, frames, det, sciImg, bkg_redux_sciimg, objFind, initial_sk slits.bitmask.turn_on(slits.mask[flagged_slits], 'BADSKYSUB') if not self.par['reduce']['extraction']['skip_extraction']: - msgs.info(f"Extraction begins for {self.basename} on det={det}") + log.info(f"Extraction begins for {self.basename} on det={det}") # Instantiate Reduce object # Required for pipeline specific object # At instantiation, the fullmask in self.sciImg is modified @@ -1051,7 +1051,7 @@ def extract_one(self, frames, det, sciImg, bkg_redux_sciimg, objFind, initial_sk slitgpm = np.logical_not(self.exTract.extract_bpm) slitshift = self.exTract.slitshift else: - msgs.info(f"Extraction skipped for {self.basename} on det={det}") + log.info(f"Extraction skipped for {self.basename} on det={det}") # Since the extraction was not performed, fill the arrays with the best available information skymodel, bkg_redux_skymodel, objmodel, ivarmodel, outmask, sobjs, waveImg, tilts = \ final_global_sky, \ @@ -1144,7 +1144,7 @@ def refframe_correct(self, slits, ra, dec, obstime, slitgpm=None, waveimg=None, vel_corr = 0.0 if refframe in ['heliocentric', 'barycentric'] \ and self.par['calibrations']['wavelengths']['reference'] != 'pixel': - msgs.info("Performing a {0} correction".format(self.par['calibrations']['wavelengths']['refframe'])) + log.info("Performing a {0} correction".format(self.par['calibrations']['wavelengths']['refframe'])) # Calculate correction radec = ltu.radec_to_coord((ra, dec)) vel, vel_corr = wave.geomotion_correct(radec, obstime, @@ -1153,7 +1153,7 @@ def refframe_correct(self, slits, ra, dec, obstime, slitgpm=None, waveimg=None, self.spectrograph.telescope['elevation'], refframe) # Apply correction to objects - msgs.info('Applying {0} correction = {1:0.5f} km/s'.format(refframe, vel)) + log.info('Applying {0} correction = {1:0.5f} km/s'.format(refframe, vel)) if (sobjs is not None) and (sobjs.nobj != 0): # Loop on slits to apply gd_slitord = slits.slitord_id[slitgpm] @@ -1170,7 +1170,7 @@ def refframe_correct(self, slits, ra, dec, obstime, slitgpm=None, waveimg=None, if waveimg is not None: waveimg *= vel_corr else: - msgs.info('A wavelength reference frame correction will not be performed.') + log.info('A wavelength reference frame correction will not be performed.') # Return the value of the correction and the corrected wavelength image return vel_corr, waveimg @@ -1263,7 +1263,7 @@ def print_end_time(self): Print the elapsed time """ # Capture the end time and print it to user - msgs.info(utils.get_time_string(time.perf_counter()-self.tstart)) + log.info(utils.get_time_string(time.perf_counter()-self.tstart)) # TODO: Move this to fitstbl? def show_science(self): diff --git a/pypeit/pypeitdata.py b/pypeit/pypeitdata.py index 27ef29220d..971092d35e 100644 --- a/pypeit/pypeitdata.py +++ b/pypeit/pypeitdata.py @@ -58,7 +58,7 @@ from IPython import embed -from pypeit import msgs +from pypeit import log from pypeit import PypeItError, PypeItPathError from pypeit import cache @@ -297,7 +297,7 @@ def get_file_path(self, data_file, force_update=False, to_pkg=None, return_forma # If it does not, inform the user and download it into the cache. # NOTE: This should not be required for from-source (dev) installations. if not quiet: - msgs.info(f'{data_file} does not exist in the expected package directory ' + log.info(f'{data_file} does not exist in the expected package directory ' f'({self.path}). Checking cache or downloading the file now.') # Get the path to the cached file @@ -307,7 +307,7 @@ def get_file_path(self, data_file, force_update=False, to_pkg=None, return_forma _cached_file = cache.fetch_remote_file(data_file, subdir, remote_host=self.host, force_update=force_update, return_none=return_none) if _cached_file is None: - msgs.warning(f'File {data_file} not found in the cache.') + log.warning(f'File {data_file} not found in the cache.') return None # If we've made it this far, the file is being pulled from the cache. diff --git a/pypeit/pypeitsetup.py b/pypeit/pypeitsetup.py index 44bc3d652d..3690f6f5fc 100644 --- a/pypeit/pypeitsetup.py +++ b/pypeit/pypeitsetup.py @@ -10,7 +10,7 @@ from IPython import embed -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit.metadata import PypeItMetaData from pypeit import inputfiles @@ -194,7 +194,7 @@ def from_file_root(cls, root, spectrograph, extension=None): if nfiles == 0: raise PypeItError(f'Unable to find any raw files for {spec.name} in {root}!') else: - msgs.info(f'Found {nfiles} {spec.name} raw files.') + log.info(f'Found {nfiles} {spec.name} raw files.') return cls.from_rawfiles(files, spectrograph) @classmethod @@ -253,7 +253,7 @@ def append_user_cfg(self, user_cfg:list=None): def nfiles(self): """The number of files to reduce.""" if self.fitstbl is None: - msgs.warning('No fits files have been read!') + log.warning('No fits files have been read!') return 0 if self.fitstbl is None else len(self.fitstbl) def __repr__(self): diff --git a/pypeit/scattlight.py b/pypeit/scattlight.py index 6ccd416236..6e541c527b 100644 --- a/pypeit/scattlight.py +++ b/pypeit/scattlight.py @@ -11,7 +11,7 @@ import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import datamodel from pypeit import calibframe from pypeit.display import display @@ -100,9 +100,9 @@ def get_model(self, image): model : `numpy.ndarray`_ A model of the expected scattered light in the input image. Shape is (nspec, nspat). """ - msgs.info("Generating a scattered light image") + log.info("Generating a scattered light image") if self.scattlight_param is None: - msgs.warning("No scattered light parameters are available") + log.warning("No scattered light parameters are available") return np.zeros_like(image) # Return the model of the scattered light return scattlight.scattered_light_model_pad(self.scattlight_param, image) diff --git a/pypeit/scripts/arxiv_solution.py b/pypeit/scripts/arxiv_solution.py index e1ea2d3b55..81f43729b2 100644 --- a/pypeit/scripts/arxiv_solution.py +++ b/pypeit/scripts/arxiv_solution.py @@ -6,7 +6,7 @@ .. include:: ../include/links.rst """ import time -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import par from pypeit import inputfiles @@ -39,7 +39,7 @@ def main(args): chk_version = not args.try_old # Set the verbosity, and create a logfile if verbosity == 2 -# msgs.set_logfile_and_verbosity('arxiv_solution', args.verbosity) +# log.set_logfile_and_verbosity('arxiv_solution', args.verbosity) # Check that a file has been provided if args.file is None: diff --git a/pypeit/scripts/cache_github_data.py b/pypeit/scripts/cache_github_data.py index b90a0035f4..82c4ed98ee 100644 --- a/pypeit/scripts/cache_github_data.py +++ b/pypeit/scripts/cache_github_data.py @@ -51,7 +51,7 @@ def main(args): import github - from pypeit import msgs + from pypeit import log from pypeit import cache from pypeit.pypeitdata import PypeItDataPath @@ -79,7 +79,7 @@ def main(args): # Access the repo; use a token if one is available if os.getenv('GITHUB_TOKEN') is None: - msgs.warning('GITHUB_TOKEN environmental variable is not defined, meaning script will ' + log.warning('GITHUB_TOKEN environmental variable is not defined, meaning script will ' 'not authenticate a GitHub user via an OAuth token. Beware of rate limits!') auth = None else: @@ -88,7 +88,7 @@ def main(args): # Cycle through all the data paths that use github as their host and # collect the contents of the directory - msgs.info('Searching github repository ... ') + log.info('Searching github repository ... ') contents = {} for datadir, meta in github_paths.items(): if datadir not in selected_paths: @@ -96,7 +96,7 @@ def main(args): # Recursively get the directory contents contents[meta['path']] \ = cache.github_contents(repo, branch, f'pypeit/data/{meta["path"]}') - msgs.info('Searching github repository ... done.') + log.info('Searching github repository ... done.') # Determine which files should be in the cache (or in the repo) # TODO: This is currently broken because not all spectrograph-dependent @@ -106,7 +106,7 @@ def main(args): # a major effort to rename all those files. The other (better?) option # would be to put all these reid_arxiv files into subdirectories named # after each spectrograph. - msgs.info('Parsing which files to include in cache.') + log.info('Parsing which files to include in cache.') to_download = {} for path, files in contents.items(): nfiles = len(files) @@ -124,9 +124,9 @@ def main(args): # Use the `get_file_path`` function to find each file locally or pull it # into the cache - msgs.info(f'Number of files to check against package installation/cache:') + log.info(f'Number of files to check against package installation/cache:') for path in contents.keys(): - msgs.info(f' {path}: {np.sum(to_download[path])}') + log.info(f' {path}: {np.sum(to_download[path])}') files = np.array(contents[path])[to_download[path]] if len(files) == 0: continue diff --git a/pypeit/scripts/chk_edges.py b/pypeit/scripts/chk_edges.py index c482d543a0..6d1f791002 100644 --- a/pypeit/scripts/chk_edges.py +++ b/pypeit/scripts/chk_edges.py @@ -33,7 +33,7 @@ def main(args): from pathlib import Path - from pypeit import edgetrace, slittrace, msgs + from pypeit import edgetrace, slittrace, log chk_version = not args.try_old @@ -51,7 +51,7 @@ def main(args): slit_filename = Path(args.slits_file).absolute() if not slit_filename.exists(): # But doesn't exist - msgs.warning(f'{slit_filename} does not exist!') + log.warning(f'{slit_filename} does not exist!') # Set the file name to None so that the code will try to find # the default file slit_filename = None @@ -59,7 +59,7 @@ def main(args): slit_filename = slittrace.SlitTraceSet.construct_file_name( edges.traceimg.calib_key, calib_dir=edges.traceimg.calib_dir) if not slit_filename.exists(): - msgs.warning(f'{slit_filename} does not exist!') + log.warning(f'{slit_filename} does not exist!') # NOTE: At this point, slit_filename *must* be a Path object slits = slittrace.SlitTraceSet.from_file(slit_filename, chk_version=chk_version) \ diff --git a/pypeit/scripts/chk_flexure.py b/pypeit/scripts/chk_flexure.py index 54ef522acb..dec368e2af 100644 --- a/pypeit/scripts/chk_flexure.py +++ b/pypeit/scripts/chk_flexure.py @@ -31,7 +31,7 @@ def main(args): from IPython import embed from astropy.io import fits - from pypeit import msgs + from pypeit import log from pypeit import PypeItError from pypeit import specobjs from pypeit import spec2dobj @@ -42,7 +42,7 @@ def main(args): # Loop over the input files for in_file in args.input_file: - msgs.info(f'Checking fluxure for file: {in_file}') + log.info(f'Checking fluxure for file: {in_file}') # What kind of file are we?? hdul = fits.open(in_file) diff --git a/pypeit/scripts/chk_for_calibs.py b/pypeit/scripts/chk_for_calibs.py index 0a76ba1d70..c8c126a223 100644 --- a/pypeit/scripts/chk_for_calibs.py +++ b/pypeit/scripts/chk_for_calibs.py @@ -54,7 +54,7 @@ def main(args): from pypeit.pypeitsetup import PypeItSetup from pypeit import calibrations - from pypeit import msgs + from pypeit import log from pypeit.par import PypeItPar import shutil @@ -77,7 +77,7 @@ def main(args): ps.run(setup_only=True) is_science = ps.fitstbl.find_frames('science') - msgs.info('Loaded spectrograph {0}'.format(ps.spectrograph.name)) + log.info('Loaded spectrograph {0}'.format(ps.spectrograph.name)) # Unique configurations uniq_cfg = ps.fitstbl.unique_configurations(copy=True) @@ -104,10 +104,10 @@ def main(args): answers['scifiles'][i] = None continue - msgs.info('=======================================================================') - msgs.info('Working on setup: {}'.format(setup)) - msgs.info(str(uniq_cfg[setup])) - msgs.info('=======================================================================') + log.info('=======================================================================') + log.info('Working on setup: {}'.format(setup)) + log.info(str(uniq_cfg[setup])) + log.info('=======================================================================') # TODO: Make the snippet below, which is also in the init of # PypeIt a method somewhere @@ -121,10 +121,10 @@ def main(args): if 'science' in row['frametype'] or 'standard' in row['frametype']: config_specific_file = data_files[idx] if config_specific_file is not None: - msgs.info('Setting configuration-specific parameters using {0}'.format( + log.info('Setting configuration-specific parameters using {0}'.format( os.path.split(config_specific_file)[1])) else: - msgs.warning('No science or standard frame. Punting..') + log.warning('No science or standard frame. Punting..') answers['pass'][i] = False answers['scifiles'][i] = None continue @@ -137,19 +137,19 @@ def main(args): par = PypeItPar.from_cfg_lines(cfg_lines=spectrograph_cfg_lines) # Print science frames if np.any(in_cfg & is_science): - msgs.info('Your science frames are: {0}'.format( + log.info('Your science frames are: {0}'.format( ps.fitstbl['filename'][in_cfg & is_science].tolist())) answers['scifiles'][i] \ = ', '.join(ps.fitstbl['filename'][in_cfg & is_science].tolist()) else: - msgs.warning("This setup has no science frames!") + log.warning("This setup has no science frames!") answers['scifiles'][i] = '' # Check! answers['pass'][i] = calibrations.check_for_calibs(par, ps.fitstbl, raise_error=False, cut_cfg=in_cfg) if not answers['pass'][i]: - msgs.warning("Setup {} did not pass the calibration check!".format(setup)) + log.warning("Setup {} did not pass the calibration check!".format(setup)) print('= RESULTS ============================================') # Print diff --git a/pypeit/scripts/chk_noise_1dspec.py b/pypeit/scripts/chk_noise_1dspec.py index 252ed627ec..93bc916df0 100644 --- a/pypeit/scripts/chk_noise_1dspec.py +++ b/pypeit/scripts/chk_noise_1dspec.py @@ -16,7 +16,7 @@ from pypeit.scripts import scriptbase from pypeit import utils -from pypeit import msgs +from pypeit import log from pypeit import specobjs from pypeit.onespec import OneSpec @@ -254,7 +254,7 @@ def main(args): input_mask &= lbda < args.wavemax if lbda[input_mask].size < 10: - msgs.warning("The spectrum was cut down to <10 pixels. Skipping") + log.warning("The spectrum was cut down to <10 pixels. Skipping") continue # determine if plotting the shaded area in the plot that shows the diff --git a/pypeit/scripts/chk_noise_2dspec.py b/pypeit/scripts/chk_noise_2dspec.py index eb35aa0f21..e8360093fe 100644 --- a/pypeit/scripts/chk_noise_2dspec.py +++ b/pypeit/scripts/chk_noise_2dspec.py @@ -16,7 +16,7 @@ from astropy.table import Table from pypeit import spec2dobj -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import io from pypeit import utils @@ -275,7 +275,7 @@ def main(args): # Cut down chi_select = chi_slit * input_mask if np.all(chi_select == 0): - msgs.warning(f"All of the chi values are masked in slit {pypeit_id} of {basename}!") + log.warning(f"All of the chi values are masked in slit {pypeit_id} of {basename}!") continue # Flux to show @@ -294,7 +294,7 @@ def main(args): # Wavelengths if spec2DObj.waveimg[input_mask].size == 0: - msgs.warning(f"None of the wavelength values work in slit {pypeit_id} of {basename}!") + log.warning(f"None of the wavelength values work in slit {pypeit_id} of {basename}!") continue lbda_1darray = spec2DObj.waveimg[:, mid_spat] diff --git a/pypeit/scripts/chk_plugins.py b/pypeit/scripts/chk_plugins.py index bd612df597..e576812d88 100644 --- a/pypeit/scripts/chk_plugins.py +++ b/pypeit/scripts/chk_plugins.py @@ -12,12 +12,12 @@ class ChkPlugins(scriptbase.ScriptBase): def main(args): from pypeit.display import required_plugins, plugins_available - from pypeit import msgs + from pypeit import log from pypeit import PypeItError success, report = plugins_available(return_report=True) if not success: raise PypeItError(report) - msgs.info('All required plugins found: {0}'.format(', '.join(required_plugins))) + log.info('All required plugins found: {0}'.format(', '.join(required_plugins))) diff --git a/pypeit/scripts/chk_scattlight.py b/pypeit/scripts/chk_scattlight.py index 1a8b093aa4..5767d9bfc3 100644 --- a/pypeit/scripts/chk_scattlight.py +++ b/pypeit/scripts/chk_scattlight.py @@ -35,7 +35,7 @@ def get_parser(cls, width=None): def main(args): from pypeit import scattlight, spec2dobj, slittrace - from pypeit import msgs + from pypeit import log from pypeit import PypeItError, PypeItDataModelError from pypeit.images.detector_container import DetectorContainer from pypeit import io @@ -65,7 +65,7 @@ def main(args): spec2D = spec2dobj.Spec2DObj.from_file(args.spec2d, detname, chk_version=chk_version) except PypeItDataModelError: - msgs.warning(f"Error loading spec2d file {args.spec2d} - attempting to load science image from fits") + log.warning(f"Error loading spec2d file {args.spec2d} - attempting to load science image from fits") spec2D = None # Now set the frame to be displayed diff --git a/pypeit/scripts/chk_wavecalib.py b/pypeit/scripts/chk_wavecalib.py index 8372b069cf..83ac86d2bb 100644 --- a/pypeit/scripts/chk_wavecalib.py +++ b/pypeit/scripts/chk_wavecalib.py @@ -27,7 +27,7 @@ def main(args): from IPython import embed from astropy.io import fits - from pypeit import wavecalib, spec2dobj, msgs + from pypeit import wavecalib, spec2dobj, log from pypeit import PypeItError chk_version = not args.try_old diff --git a/pypeit/scripts/clean_cache.py b/pypeit/scripts/clean_cache.py index 2de81a4917..794e778358 100644 --- a/pypeit/scripts/clean_cache.py +++ b/pypeit/scripts/clean_cache.py @@ -34,7 +34,7 @@ def main(args): from IPython import embed import astropy.utils.data - from pypeit import msgs + from pypeit import log from pypeit import PypeItError from pypeit import cache @@ -42,7 +42,7 @@ def main(args): # Print the full contents contents = cache.search_cache(None, path_only=False) if len(contents) == 0: - msgs.info('Cache is empty!') + log.info('Cache is empty!') return cache.list_cache_contents(contents) return @@ -52,7 +52,7 @@ def main(args): if args.clear: # Removes the entire cache - msgs.info('Clearing the cache!') + log.info('Clearing the cache!') astropy.utils.data.clear_download_cache(pkgname='pypeit') return @@ -78,13 +78,13 @@ def main(args): # For now, we only need the urls. contents = list(contents.keys()) if len(contents) == 0: - msgs.warning('No files to remove.') + log.warning('No files to remove.') return # Report - msgs.info('Removing the following files from the cache:') + log.info('Removing the following files from the cache:') for c in contents: - msgs.info(f' {c}') + log.info(f' {c}') # TODO: Require confirmation? # Remove the selected contents. cache_url argument must be a list diff --git a/pypeit/scripts/coadd_1dspec.py b/pypeit/scripts/coadd_1dspec.py index 8489548754..9529fe5fbb 100644 --- a/pypeit/scripts/coadd_1dspec.py +++ b/pypeit/scripts/coadd_1dspec.py @@ -13,7 +13,7 @@ from astropy.io import fits from astropy.time import Time -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import inputfiles from pypeit import coadd1d @@ -152,7 +152,7 @@ def main(args): """ Runs the 1d coadding steps """ # Set the verbosity, and create a logfile if verbosity == 2 -# msgs.set_logfile_and_verbosity('coadd_1dspec', args.verbosity) +# log.set_logfile_and_verbosity('coadd_1dspec', args.verbosity) # Load the file #config_lines, spec1dfiles, objids = read_coaddfile(args.coadd1d_file) @@ -202,7 +202,7 @@ def main(args): coAdd1d.run() # Save to file coAdd1d.save(coaddfile) - msgs.info('Coadding complete') + log.info('Coadding complete') diff --git a/pypeit/scripts/coadd_2dspec.py b/pypeit/scripts/coadd_2dspec.py index d57edbda64..20603dd7ce 100644 --- a/pypeit/scripts/coadd_2dspec.py +++ b/pypeit/scripts/coadd_2dspec.py @@ -57,7 +57,7 @@ def main(args): from astropy.io import fits - from pypeit import msgs + from pypeit import log from pypeit import coadd2d from pypeit import inputfiles from pypeit import specobjs @@ -65,7 +65,7 @@ def main(args): from pypeit.spectrographs.util import load_spectrograph # Set the verbosity, and create a logfile if verbosity == 2 -# msgs.set_logfile_and_verbosity('coadd_2dspec', args.verbosity) +# log.set_logfile_and_verbosity('coadd_2dspec', args.verbosity) # Load the file coadd2dFile = inputfiles.Coadd2DFile.from_file(args.coadd2d_file) @@ -74,16 +74,16 @@ def main(args): # Check some of the parameters # TODO Heliocentric for coadd2d needs to be thought through. Currently turning it off. if par['calibrations']['wavelengths']['refframe'] != 'observed': - msgs.warning('Wavelength reference frame shift (e.g., heliocentric correction) not yet ' + log.warning('Wavelength reference frame shift (e.g., heliocentric correction) not yet ' 'fully developed. Ignoring input and setting "refframe = observed".') par['calibrations']['wavelengths']['refframe'] = 'observed' # TODO Flexure correction for coadd2d needs to be thought through. Currently turning it off. if par['flexure']['spec_method'] != 'skip': - msgs.warning('Spectroscopic flexure correction not yet fully developed. Skipping.') + log.warning('Spectroscopic flexure correction not yet fully developed. Skipping.') par['flexure']['spec_method'] = 'skip' # TODO This is currently the default for 2d coadds, but we need a way to toggle it on/off if not par['reduce']['findobj']['skip_skysub']: - msgs.warning('Must skip sky subtraction when finding objects (i.e., sky should have ' + log.warning('Must skip sky subtraction when finding objects (i.e., sky should have ' 'been subtracted during primary reduction procedure). Skipping.') par['reduce']['findobj']['skip_skysub'] = True @@ -110,13 +110,13 @@ def main(args): find_negative = head2d['FINDOBJ'] == 'POS_NEG' # Print status message - msgs_string = f'Reducing target {basename}\n' - msgs_string += f"Coadding frame sky-subtracted with {head2d['SKYSUB']}\n" - msgs_string += f"Searching for objects that are {head2d['FINDOBJ']}\n" - msgs_string += 'Combining frames in 2d coadd:\n' + log_string = f'Reducing target {basename}\n' + log_string += f"Coadding frame sky-subtracted with {head2d['SKYSUB']}\n" + log_string += f"Searching for objects that are {head2d['FINDOBJ']}\n" + log_string += 'Combining frames in 2d coadd:\n' for f, file in enumerate(spec2d_files): - msgs_string += f'Exp {f}: {Path(file).name}\n' - msgs.info(msgs_string) + log_string += f'Exp {f}: {Path(file).name}\n' + log.info(log_string) # Instantiate the sci_dict # TODO Why do we need this sci_dict at all?? JFH @@ -130,7 +130,7 @@ def main(args): detectors = spectrograph.select_detectors(subset=par['rdx']['detnum'] if par['coadd2d']['only_slits'] is None else par['coadd2d']['only_slits']) - msgs.info(f'Detectors to work on: {detectors}') + log.info(f'Detectors to work on: {detectors}') # container for specobjs all_specobjs = specobjs.SpecObjs() @@ -146,14 +146,14 @@ def main(args): only_dets, only_spat_ids = parse.parse_slitspatnum(par['coadd2d']['only_slits']) if par['coadd2d']['exclude_slits'] is not None: if par['coadd2d']['only_slits'] is not None: - msgs.warning('Both `only_slits` and `exclude_slits` are provided. They are mutually exclusive. ' + log.warning('Both `only_slits` and `exclude_slits` are provided. They are mutually exclusive. ' 'Using `only_slits` and ignoring `exclude_slits`') else: exclude_dets, exclude_spat_ids = parse.parse_slitspatnum(par['coadd2d']['exclude_slits']) # Loop on detectors for det in detectors: - msgs.info("Working on detector {0}".format(det)) + log.info("Working on detector {0}".format(det)) detname = spectrograph.get_det_name(det) this_only_slits = only_spat_ids[only_dets == detname] if np.any(only_dets == detname) else None @@ -173,7 +173,7 @@ def main(args): # Create the pseudo images pseudo_dict = coadd.create_pseudo_image(coadd_dict_list) # Reduce - msgs.info('Running the extraction') + log.info('Running the extraction') # TODO -- This should mirror what is in pypeit.extract_one diff --git a/pypeit/scripts/coadd_datacube.py b/pypeit/scripts/coadd_datacube.py index 7220898acf..47011b59f8 100644 --- a/pypeit/scripts/coadd_datacube.py +++ b/pypeit/scripts/coadd_datacube.py @@ -27,7 +27,7 @@ def get_parser(cls, width=None): def main(args): import time - from pypeit import msgs + from pypeit import log from pypeit import PypeItError from pypeit import par from pypeit import inputfiles @@ -36,7 +36,7 @@ def main(args): from pypeit.spectrographs.util import load_spectrograph # Set the verbosity, and create a logfile if verbosity == 2 -# msgs.set_logfile_and_verbosity('coadd_datacube', args.verbosity) +# log.set_logfile_and_verbosity('coadd_datacube', args.verbosity) # Check that a file has been provided if args.file is None: @@ -53,7 +53,7 @@ def main(args): # If detector was passed as an argument override whatever was in the coadd3d file if args.det is not None: - msgs.info("Restricting to detector={}".format(args.det)) + log.info("Restricting to detector={}".format(args.det)) parset['rdx']['detnum'] = int(args.det) # Extract the options @@ -73,4 +73,4 @@ def main(args): # Coadd the files coadd.run() - msgs.info(utils.get_time_string(time.time()-tstart)) + log.info(utils.get_time_string(time.time()-tstart)) diff --git a/pypeit/scripts/collate_1d.py b/pypeit/scripts/collate_1d.py index 8fbfc12a46..a1604cdad2 100644 --- a/pypeit/scripts/collate_1d.py +++ b/pypeit/scripts/collate_1d.py @@ -20,7 +20,7 @@ from pypeit.par import pypeitpar from pypeit.spectrographs.util import load_spectrograph from pypeit import coadd1d -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import par from pypeit.utils import is_float @@ -172,25 +172,25 @@ def exclude_source_objects(source_objects, exclude_map, par): if par['collate1d']['exclude_serendip'] and sobj.MASKDEF_OBJNAME == 'SERENDIP': msg = f'Excluding SERENDIP object from {sobj.NAME} in {spec1d_file}' - msgs.info(msg) + log.info(msg) excluded_messages.append(msg) continue if par['collate1d']['wv_rms_thresh'] is not None and sobj.WAVE_RMS > par['collate1d']['wv_rms_thresh']: msg = f'Excluding {sobj.NAME} in {spec1d_file} due to wave_rms {sobj.WAVE_RMS} > threshold {par["collate1d"]["wv_rms_thresh"]}' - msgs.info(msg) + log.info(msg) excluded_messages.append(msg) continue if sobj.MASKDEF_ID in exclude_map: msg = f'Excluding {sobj.NAME} with mask id: {sobj.MASKDEF_ID} in {spec1d_file} because of flags {exclude_map[sobj.MASKDEF_ID]}' - msgs.info(msg) + log.info(msg) excluded_messages.append(msg) continue if sobj.OPT_COUNTS is None and sobj.BOX_COUNTS is None: msg = f'Excluding {sobj.NAME} in {spec1d_file} because of missing both OPT_COUNTS and BOX_COUNTS' - msgs.warning(msg) + log.warning(msg) excluded_messages.append(msg) continue @@ -205,7 +205,7 @@ def exclude_source_objects(source_objects, exclude_map, par): msg = f'Excluding {sobj.NAME} in {spec1d_file} because all of OPT_COUNTS was masked out. Consider changing ex_value to "BOX".' if msg is not None: - msgs.warning(msg) + log.warning(msg) excluded_messages.append(msg) continue @@ -220,14 +220,14 @@ def exclude_source_objects(source_objects, exclude_map, par): msg = f'Excluding {sobj.NAME} in {spec1d_file} because all of BOX_COUNTS was masked out. Consider changing ex_value to "OPT".' if msg is not None: - msgs.warning(msg) + log.warning(msg) excluded_messages.append(msg) continue filtered_objects.append(source_object) return (filtered_objects, excluded_messages) -def read_spec1d_files(par, spec1d_files, failure_msgs): +def read_spec1d_files(par, spec1d_files, failure_log): """ Read spec1d files. @@ -236,7 +236,7 @@ def read_spec1d_files(par, spec1d_files, failure_msgs): Parameters for collating, fluxing, and coadding. spec1d_files (list of str): List of spec1d files to read. - failure_msgs(list of str): + failure_log(list of str): Return parameter describing any failures that occurred when reading. Returns: @@ -253,14 +253,14 @@ def read_spec1d_files(par, spec1d_files, failure_msgs): good_spec1d_files.append(spec1d_file) except Exception as e: formatted_exception = traceback.format_exc() - msgs.warning(formatted_exception) - msgs.warning(f"Failed to read {spec1d_file}, skipping it.") - failure_msgs.append(f"Failed to read {spec1d_file}, skipping it.") - failure_msgs.append(formatted_exception) + log.warning(formatted_exception) + log.warning(f"Failed to read {spec1d_file}, skipping it.") + failure_log.append(f"Failed to read {spec1d_file}, skipping it.") + failure_log.append(formatted_exception) return specobjs_list, good_spec1d_files -def flux(par, spectrograph, spec1d_files, failed_fluxing_msgs): +def flux(par, spectrograph, spec1d_files, failed_fluxing_log): """ Flux calibrate spec1d files using archived sens func files. @@ -271,7 +271,7 @@ def flux(par, spectrograph, spec1d_files, failed_fluxing_msgs): Spectrograph for the files to flux. spec1d_files (list of str): List of spec1d files to flux calibrate. - failed_fluxing_msgs(list of str): + failed_fluxing_log(list of str): Return parameter describing any failures that occurred when fluxing. Returns: @@ -293,25 +293,25 @@ def flux(par, spectrograph, spec1d_files, failed_fluxing_msgs): sens_file = sf_archive.get_archived_sensfile(spec1d_file) except Exception: formatted_exception = traceback.format_exc() - msgs.warning(formatted_exception) - msgs.warning(f"Could not find archived sensfunc to flux {spec1d_file}, skipping it.") - failed_fluxing_msgs.append(f"Could not find archived sensfunc to flux {spec1d_file}, skipping it.") - failed_fluxing_msgs.append(formatted_exception) + log.warning(formatted_exception) + log.warning(f"Could not find archived sensfunc to flux {spec1d_file}, skipping it.") + failed_fluxing_log.append(f"Could not find archived sensfunc to flux {spec1d_file}, skipping it.") + failed_fluxing_log.append(formatted_exception) continue # Flux calibrate the spec1d file try: - msgs.info(f"Running flux calibrate on {spec1d_file}") + log.info(f"Running flux calibrate on {spec1d_file}") FxCalib = fluxcalibrate.flux_calibrate([spec1d_file], [sens_file], par=par['fluxcalib'], chk_version=par['rdx']['chk_version']) flux_calibrated_files.append(spec1d_file) except Exception: formatted_exception = traceback.format_exc() - msgs.warning(formatted_exception) - msgs.warning(f"Failed to flux calibrate {spec1d_file}, skipping it.") - failed_fluxing_msgs.append(f"Failed to flux calibrate {spec1d_file}, skipping it.") - failed_fluxing_msgs.append(formatted_exception) + log.warning(formatted_exception) + log.warning(f"Failed to flux calibrate {spec1d_file}, skipping it.") + failed_fluxing_log.append(f"Failed to flux calibrate {spec1d_file}, skipping it.") + failed_fluxing_log.append(formatted_exception) continue # Return the succesfully fluxed files @@ -346,10 +346,10 @@ def build_coadd_file_name(source_object): return f'{coord_portion}_{instrument_name}_{date_portion}.fits' -def refframe_correction(par, spectrograph, spec1d_files, spec1d_failure_msgs): +def refframe_correction(par, spectrograph, spec1d_files, spec1d_failure_log): refframe = par['collate1d']['refframe'] - msgs.info(f"Performing a {refframe} correction") + log.info(f"Performing a {refframe} correction") for spec1d in spec1d_files: # Get values from the fits header needed to calculate the correction @@ -361,8 +361,8 @@ def refframe_correction(par, spectrograph, spec1d_files, spec1d_failure_msgs): obstime = Time(sobjs.header['MJD'], format='mjd') except Exception as e: msg = f'Failed to perform {refframe} correction on {spec1d}: {e}' - msgs.info(msg) - spec1d_failure_msgs.append(msg) + log.info(msg) + spec1d_failure_log.append(msg) continue corrected_at_least_one = False @@ -370,8 +370,8 @@ def refframe_correction(par, spectrograph, spec1d_files, spec1d_failure_msgs): if sobj['VEL_CORR'] is not None: # Don't double correct msg = f"Not performing {refframe} correction for {spec1d} object {sobj['NAME']} because it has already been corrected." - msgs.info(msg) - spec1d_failure_msgs.append(msg) + log.info(msg) + spec1d_failure_log.append(msg) continue # Use the SpecObj RA/DEC if it's available, otherwise use the value from the header if sobj['RA'] is not None and sobj['DEC'] is not None: @@ -385,7 +385,7 @@ def refframe_correction(par, spectrograph, spec1d_files, spec1d_failure_msgs): spectrograph.telescope['elevation'], refframe) # Apply correction to objects - msgs.info(f'Applying {refframe} correction to {spec1d} object {sobj["NAME"]} = {vel} km/s, {vel_corr}') + log.info(f'Applying {refframe} correction to {spec1d} object {sobj["NAME"]} = {vel} km/s, {vel_corr}') sobj.apply_helio(vel_corr, refframe) corrected_at_least_one = True if corrected_at_least_one: @@ -432,17 +432,17 @@ def coadd(par, coaddfile, source): if par['collate1d']['ignore_flux'] is True: # Use non fluxed if asked to - msgs.info(f"Ignoring flux for {coaddfile}.") + log.info(f"Ignoring flux for {coaddfile}.") par['coadd1d']['flux_value'] = False elif False in [x[flux_key] is not None for x in source.spec_obj_list]: # Do not use fluxed data if one or more objects have not been flux calibrated - msgs.info(f"Not all spec1ds for {coaddfile} are flux calibrated, using counts instead.") + log.info(f"Not all spec1ds for {coaddfile} are flux calibrated, using counts instead.") par['coadd1d']['flux_value'] = False else: # Use fluxed data - msgs.info(f"Using flux for {coaddfile}.") + log.info(f"Using flux for {coaddfile}.") par['coadd1d']['flux_value'] = True @@ -484,18 +484,18 @@ def find_spec2d_from_spec1d(spec1d_files): return spec2d_files -def write_warnings(par, excluded_obj_msgs, failed_source_msgs, spec1d_failure_msgs, start_time, total_time): +def write_warnings(par, excluded_obj_log, failed_source_log, spec1d_failure_log, start_time, total_time): """ Write gathered warning messages to a `collate_warnings.txt` file. Args: - excluded_obj_msgs (:obj:`list` of :obj:`str`): + excluded_obj_log (:obj:`list` of :obj:`str`): Messages about which objects were excluded from collating and why. - failed_source_msgs (:obj:`list` of :obj:`str`): + failed_source_log (:obj:`list` of :obj:`str`): Messages about which objects failed coadding and why. - spec1d_failure_msgs (:obj:`list` of :obj:`str`): + spec1d_failure_log (:obj:`list` of :obj:`str`): Messages about failures with spec1d files and why. """ @@ -506,17 +506,17 @@ def write_warnings(par, excluded_obj_msgs, failed_source_msgs, spec1d_failure_ms print(f"\nStarted {start_time.isoformat(sep=' ')}", file=f) print(f"Duration: {total_time}", file=f) - if len(spec1d_failure_msgs) > 0: + if len(spec1d_failure_log) > 0: print("\nspec1d_* failures\n", file=f) - for msg in spec1d_failure_msgs: + for msg in spec1d_failure_log: print(msg, file=f) print("\nExcluded Objects:\n", file=f) - for msg in excluded_obj_msgs: + for msg in excluded_obj_log: print(msg, file=f) print("\nFailed to Coadd:\n", file=f) - for msg in failed_source_msgs: + for msg in failed_source_log: print(msg, file=f) def build_parameters(args): @@ -730,7 +730,7 @@ def get_parser(cls, width=None): def main(args): # Set the verbosity, and create a logfile if verbosity == 2 -# msgs.set_logfile_and_verbosity('collate_1d', args.verbosity) +# log.set_logfile_and_verbosity('collate_1d', args.verbosity) start_time = datetime.now() (par, spectrograph, spec1d_files) = build_parameters(args) @@ -772,7 +772,7 @@ def main(args): exclude_map = dict() # Flux the spec1ds based on a archived sensfunc - spec1d_failure_msgs = [] + spec1d_failure_log = [] copied_spec1d = False if par['collate1d']['flux'] and not args.dry_run: if par['collate1d']['spec1d_outdir'] is not None: @@ -780,7 +780,7 @@ def main(args): # if requested spec1d_files = copy_spec1d_to_outdir(spec1d_files, par['collate1d']['spec1d_outdir']) copied_spec1d = True - spec1d_files = flux(par, spectrograph, spec1d_files, spec1d_failure_msgs) + spec1d_files = flux(par, spectrograph, spec1d_files, spec1d_failure_log) # Perform reference frame correction @@ -790,35 +790,35 @@ def main(args): # if requested and fluxing hasn't already done so spec1d_files = copy_spec1d_to_outdir(spec1d_files, par['collate1d']['spec1d_outdir']) - refframe_correction(par, spectrograph, spec1d_files, spec1d_failure_msgs) + refframe_correction(par, spectrograph, spec1d_files, spec1d_failure_log) # Read in the spec1d files - specobjs_to_coadd, spec1d_files = read_spec1d_files(par, spec1d_files, spec1d_failure_msgs) + specobjs_to_coadd, spec1d_files = read_spec1d_files(par, spec1d_files, spec1d_failure_log) # Build source objects from spec1d file, this list is not collated source_objects = SourceObject.build_source_objects(specobjs_to_coadd, spec1d_files, par['collate1d']['match_using']) # Filter out unwanted SpecObj objects based on parameters - (objects_to_coadd, excluded_obj_msgs) = exclude_source_objects(source_objects, exclude_map, par) + (objects_to_coadd, excluded_obj_log) = exclude_source_objects(source_objects, exclude_map, par) # Collate the spectra source_list = collate_spectra_by_source(objects_to_coadd, tolerance) # Coadd the spectra successful_source_list = [] - failed_source_msgs = [] + failed_source_log = [] for source in source_list: coaddfile = os.path.join(par['collate1d']['outdir'], build_coadd_file_name(source)) - msgs.info(f'Creating {coaddfile} from the following sources:') + log.info(f'Creating {coaddfile} from the following sources:') for i in range(len(source.spec_obj_list)): - msgs.info(f' {source.spec1d_file_list[i]}: {source.spec_obj_list[i].NAME} ' + log.info(f' {source.spec1d_file_list[i]}: {source.spec_obj_list[i].NAME} ' f'({source.spec_obj_list[i].MASKDEF_OBJNAME})') # Exclude sources with a single object to coadd if len(source.spec_obj_list) == 1: - excluded_obj_msgs.append(f"Excluding {source.spec_obj_list[0].NAME} in {source.spec1d_file_list[0]} because there's no other SpecObj to coadd with.") + excluded_obj_log.append(f"Excluding {source.spec_obj_list[0].NAME} in {source.spec1d_file_list[0]} because there's no other SpecObj to coadd with.") continue if not args.dry_run: @@ -827,10 +827,10 @@ def main(args): successful_source_list.append(source) except Exception: formatted_exception = traceback.format_exc() - msgs.warning(formatted_exception) - msgs.warning(f"Failed to coadd {coaddfile}, skipping") - failed_source_msgs.append(f"Failed to coadd {coaddfile}:") - failed_source_msgs.append(formatted_exception) + log.warning(formatted_exception) + log.warning(f"Failed to coadd {coaddfile}, skipping") + failed_source_log.append(f"Failed to coadd {coaddfile}:") + failed_source_log.append(formatted_exception) # Create collate_report.dat archive = create_report_archive(par) @@ -839,9 +839,9 @@ def main(args): total_time = datetime.now() - start_time - write_warnings(par, excluded_obj_msgs, failed_source_msgs, - spec1d_failure_msgs, start_time, total_time) + write_warnings(par, excluded_obj_log, failed_source_log, + spec1d_failure_log, start_time, total_time) - msgs.info(f'Total duration: {total_time}') + log.info(f'Total duration: {total_time}') return 0 diff --git a/pypeit/scripts/compile_wvarxiv.py b/pypeit/scripts/compile_wvarxiv.py index 138250f01e..e6d9d7ae64 100644 --- a/pypeit/scripts/compile_wvarxiv.py +++ b/pypeit/scripts/compile_wvarxiv.py @@ -6,7 +6,7 @@ .. include:: ../include/links.rst """ import time -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import par from pypeit import inputfiles @@ -82,9 +82,9 @@ def main(args): else: reid_table = join(old_table, reid_table) reid_table.write(out_path, format='fits', overwrite=args.append) - msgs.info(f'Wrote the compiled wvarxiv file to {out_path}.') + log.info(f'Wrote the compiled wvarxiv file to {out_path}.') # If the file does not exist, just write it out else: reid_table.write(out_path, format='fits') - msgs.info(f'Wrote the compiled wvarxiv file to {out_path}.') + log.info(f'Wrote the compiled wvarxiv file to {out_path}.') diff --git a/pypeit/scripts/extract_datacube.py b/pypeit/scripts/extract_datacube.py index 59e8d19276..65bb0e6036 100644 --- a/pypeit/scripts/extract_datacube.py +++ b/pypeit/scripts/extract_datacube.py @@ -36,7 +36,7 @@ def get_parser(cls, width=None): def main(args): import time - from pypeit import msgs + from pypeit import log from pypeit import PypeItError from pypeit import par from pypeit import inputfiles @@ -45,7 +45,7 @@ def main(args): from pypeit.coadd3d import DataCube # Set the verbosity, and create a logfile if verbosity == 2 -# msgs.set_logfile_and_verbosity('extract_datacube', args.verbosity) +# log.set_logfile_and_verbosity('extract_datacube', args.verbosity) # Check that a file has been provided if args.file is None: @@ -77,4 +77,4 @@ def main(args): extcube.extract_spec(parset['reduce'], outname=outname, boxcar_radius=boxcar_radius, overwrite=args.overwrite) # Report the extraction time - msgs.info(utils.get_time_string(time.time()-tstart)) + log.info(utils.get_time_string(time.time()-tstart)) diff --git a/pypeit/scripts/flux_calib.py b/pypeit/scripts/flux_calib.py index ec0a68393c..6af4ffc6e8 100644 --- a/pypeit/scripts/flux_calib.py +++ b/pypeit/scripts/flux_calib.py @@ -8,7 +8,7 @@ from astropy.io import fits -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import inputfiles from pypeit.spectrographs.util import load_spectrograph @@ -79,7 +79,7 @@ def main(args): chk_version = not args.try_old # Set the verbosity, and create a logfile if verbosity == 2 -# msgs.set_logfile_and_verbosity('flux_calib', args.verbosity) +# log.set_logfile_and_verbosity('flux_calib', args.verbosity) # Load the file fluxFile = inputfiles.FluxFile.from_file(args.flux_file) @@ -118,6 +118,6 @@ def main(args): # Instantiate fluxcalibrate.flux_calibrate(fluxFile.filenames, sensfiles, par=par['fluxcalib'], chk_version=chk_version) - msgs.info('Flux calibration complete') + log.info('Flux calibration complete') return 0 diff --git a/pypeit/scripts/flux_setup.py b/pypeit/scripts/flux_setup.py index f1a25e8b40..e65a1b1b8d 100644 --- a/pypeit/scripts/flux_setup.py +++ b/pypeit/scripts/flux_setup.py @@ -12,7 +12,7 @@ from astropy.table import Table -from pypeit import msgs +from pypeit import log from pypeit import io from pypeit.scripts import scriptbase from pypeit import inputfiles @@ -97,12 +97,12 @@ def main(args): sensfiles.append(ifile) unique_paths.add(str(ifile.parent)) else: - msgs.info('{:} is not a standard PypeIt output, skipping.'.format(ifile)) + log.info('{:} is not a standard PypeIt output, skipping.'.format(ifile)) if len(spec2dfiles) > len(spec1dfiles): - msgs.warning('The following exposures do not have 1D extractions:') + log.warning('The following exposures do not have 1D extractions:') for ii in range(len(spec2dfiles)): if (spec2dfiles[ii].parent / spec2dfiles[ii].name.replace("spec2d", "spec1d")).exists(): - msgs.info('\t {:}'.format(spec2dfiles[ii])) + log.info('\t {:}'.format(spec2dfiles[ii])) if len(spec1dfiles) > 0: with io.fits_open(spec1dfiles[0]) as hdul: diff --git a/pypeit/scripts/identify.py b/pypeit/scripts/identify.py index 9a5899ac33..d9f4ef6790 100644 --- a/pypeit/scripts/identify.py +++ b/pypeit/scripts/identify.py @@ -60,7 +60,7 @@ def main(args): import numpy as np - from pypeit import msgs + from pypeit import log from pypeit import PypeItError from pypeit.spectrographs.util import load_spectrograph from pypeit.core.gui.identify import Identify @@ -74,7 +74,7 @@ def main(args): chk_version = not args.try_old # Set the verbosity, and create a logfile if verbosity == 2 -# msgs.set_logfile_and_verbosity('identify', args.verbosity) +# log.set_logfile_and_verbosity('identify', args.verbosity) # Load the Arc file msarc = ArcImage.from_file(args.arc_file, chk_version=chk_version) @@ -97,7 +97,7 @@ def main(args): # Reset the mask slits.mask = slits.mask_init - msgs.info('Loading in Solution if desired and exists') + log.info('Loading in Solution if desired and exists') # Check if a solution exists solnname = WaveCalib.construct_file_name(msarc.calib_key, calib_dir=msarc.calib_dir) wv_calib = WaveCalib.from_file(solnname, chk_version=chk_version) \ @@ -116,7 +116,7 @@ def main(args): # print to screen the slit widths if maskdef_designtab is available if slits.maskdef_designtab is not None: - msgs.info("Slit widths (arcsec): {}".format(np.round(slits.maskdef_designtab['SLITWID'].data, 2))) + log.info("Slit widths (arcsec): {}".format(np.round(slits.maskdef_designtab['SLITWID'].data, 2))) # Generate a map of the instrumental spectral FWHM # TODO nsample should be a parameter diff --git a/pypeit/scripts/install_extinctfile.py b/pypeit/scripts/install_extinctfile.py index 068860d075..df14eaa3e1 100644 --- a/pypeit/scripts/install_extinctfile.py +++ b/pypeit/scripts/install_extinctfile.py @@ -24,7 +24,7 @@ def get_parser(cls, width=None): @staticmethod def main(args): import numpy as np - from pypeit import msgs + from pypeit import log # Grab all the files files = np.concatenate([sorted(scriptbase.ScriptBase.expandpath(f)) for f in args.files]) @@ -34,8 +34,8 @@ def main(args): # Loop through the files passed for f in files: if not f.is_file(): - msgs.warning(f'{f} is not a file.') + log.warning(f'{f} is not a file.') continue # Copy the user-created file to the cache - msgs.info(f'Installing {f}') + log.info(f'Installing {f}') cache.write_file_to_cache(str(f), f.name, 'extinction') diff --git a/pypeit/scripts/install_linelist.py b/pypeit/scripts/install_linelist.py index c074166bcb..08b034b041 100644 --- a/pypeit/scripts/install_linelist.py +++ b/pypeit/scripts/install_linelist.py @@ -23,7 +23,7 @@ def get_parser(cls, width=None): @staticmethod def main(args): import numpy as np - from pypeit import msgs + from pypeit import log # Grab all the files files = np.concatenate([sorted(scriptbase.ScriptBase.expandpath(f)) for f in args.files]) @@ -33,8 +33,8 @@ def main(args): # Loop through the files passed for f in files: if not f.is_file(): - msgs.warning(f'{f} is not a file.') + log.warning(f'{f} is not a file.') continue # Copy the user-created file to the cache - msgs.info(f'Installing {f}') + log.info(f'Installing {f}') cache.write_file_to_cache(str(f), f.name, 'arc_lines/lists') diff --git a/pypeit/scripts/install_wvarxiv.py b/pypeit/scripts/install_wvarxiv.py index c57d3c6d18..c1a683ca61 100644 --- a/pypeit/scripts/install_wvarxiv.py +++ b/pypeit/scripts/install_wvarxiv.py @@ -23,7 +23,7 @@ def get_parser(cls, width=None): @staticmethod def main(args): import numpy as np - from pypeit import msgs + from pypeit import log # Grab all the files files = np.concatenate([sorted(scriptbase.ScriptBase.expandpath(f)) for f in args.files]) @@ -33,7 +33,7 @@ def main(args): # Loop through the files passed for f in files: if not f.is_file(): - msgs.warning(f'{f} is not a file.') + log.warning(f'{f} is not a file.') continue # Copy the user-created file to the cache cache.write_file_to_cache(str(f), f.name, 'arc_lines/reid_arxiv') diff --git a/pypeit/scripts/multislit_flexure.py b/pypeit/scripts/multislit_flexure.py index 1691361687..4213d70f2d 100644 --- a/pypeit/scripts/multislit_flexure.py +++ b/pypeit/scripts/multislit_flexure.py @@ -10,7 +10,7 @@ import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import inputfiles from pypeit.spectrographs.util import load_spectrograph from pypeit.par import pypeitpar @@ -68,30 +68,30 @@ def main(args): # Instantiate mdFlex = flexure.MultiSlitFlexure(s1dfile=filename) # Initalize - msgs.info("Setup") + log.info("Setup") mdFlex.init(spectrograph, par['flexure']) # INITIAL SKY LINE STUFF - msgs.info("Measuring sky lines") + log.info("Measuring sky lines") mdFlex.measure_sky_lines() # FIT SURFACES - msgs.info("Fitting the surface") + log.info("Fitting the surface") mdFlex.fit_mask_surfaces() # Apply - msgs.info("Applying flexure correction") + log.info("Applying flexure correction") mdFlex.update_fit() # REFIT FOR QA PLOTS - msgs.info("Generate QA") + log.info("Generate QA") mask = header['TARGET'].strip() fnames = header['FILENAME'].split('.') root = mask+'_'+fnames[2] mdFlex.qa_plots('./', root) # Write - msgs.info("Write to disk") + log.info("Write to disk") mdFlex.to_file(args.outroot+root+'.fits', overwrite=args.clobber) # Apply?? diff --git a/pypeit/scripts/parse_slits.py b/pypeit/scripts/parse_slits.py index b818d50c54..7285cce8c3 100644 --- a/pypeit/scripts/parse_slits.py +++ b/pypeit/scripts/parse_slits.py @@ -10,7 +10,7 @@ from pypeit import slittrace from pypeit import spec2dobj -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from astropy.table import Table diff --git a/pypeit/scripts/print_bpm.py b/pypeit/scripts/print_bpm.py index 829c27f736..dd61e78627 100644 --- a/pypeit/scripts/print_bpm.py +++ b/pypeit/scripts/print_bpm.py @@ -10,7 +10,7 @@ from astropy.io import fits from pypeit import __version__ -from pypeit import msgs, spec2dobj +from pypeit import log, spec2dobj from pypeit.images.detector_container import DetectorContainer from pypeit.images.imagebitmask import ImageBitMask from pypeit import PypeItDataModelError @@ -50,13 +50,13 @@ def main(args): binvals = [int(x) for x in bin(args.bit)[2:]][::-1] if args.file is None: - msgs.info("Using the default PypeIt bad pixel mask.") + log.info("Using the default PypeIt bad pixel mask.") # Generate an Image BitMask object bpm = ImageBitMask() descr = bpm.descr else: # Read the spec2d file - msgs.info(f"Using the bad pixel mask from the following spec2d file:\n{args.file}.") + log.info(f"Using the bad pixel mask from the following spec2d file:\n{args.file}.") spec2d_file = args.file # Parse the detector name @@ -77,14 +77,14 @@ def main(args): file_pypeit_version = fits.getval(args.file, 'VERSPYP', 0) except KeyError: file_pypeit_version = '*unknown*' - msgs.warning(f'Your installed version of PypeIt ({__version__}) cannot be used to parse ' + log.warning(f'Your installed version of PypeIt ({__version__}) cannot be used to parse ' f'{args.file}, which was reduced using version {file_pypeit_version}. You ' 'are strongly encouraged to re-reduce your data using this (or, better yet, ' 'the most recent) version of PypeIt. Script will try to parse only the ' 'relevant bits from the spec2d file and continue (possibly with more ' 'limited functionality).') # Generate an Image BitMask object - msgs.info("Using the default PypeIt bad pixel mask.") + log.info("Using the default PypeIt bad pixel mask.") bpm = ImageBitMask() descr = bpm.descr else: @@ -101,8 +101,8 @@ def main(args): outstr += f"* {bitkeys[i].ljust(bitlen)} : {descr[i]}\n" # Print the message to the user - msgs.info(outstr) + log.info(outstr) # Finally, print out a message to point users to the online documentation - msgs.info("Please see the following website for more information:\n" + log.info("Please see the following website for more information:\n" "https://pypeit.readthedocs.io/en/release/out_masks.html") diff --git a/pypeit/scripts/ql.py b/pypeit/scripts/ql.py index 9b85035077..e5eda00976 100644 --- a/pypeit/scripts/ql.py +++ b/pypeit/scripts/ql.py @@ -49,7 +49,7 @@ from astropy.table import Table from pypeit import PypeItError -from pypeit import msgs +from pypeit import log from pypeit import pypeitsetup from pypeit import metadata from pypeit import io @@ -164,7 +164,7 @@ def quicklook_regroup(fitstbl): # Group the unique dither positions dith, inv = np.unique(fitstbl['dithoff'].data[is_type], return_inverse=True) if len(dith) == 1: - msgs.warning('All exposures have the same offset!') + log.warning('All exposures have the same offset!') fitstbl['comb_id'][is_type] = comb_strt else: # This creates comb+bkg pairs that match the absolute value of the offset @@ -309,7 +309,7 @@ def generate_sci_pypeitfile(redux_path:str, if std_spec1d is not None: # Found an existing reduction, so remove the standard frames. # NOTE: Should not need to regroup! - msgs.warning(f'Found existing standard star reduction: {std_spec1d}. This will be used ' + log.warning(f'Found existing standard star reduction: {std_spec1d}. This will be used ' 'and the standards will not be re-reduced! To force them to be ' 're-reduced, use the --clear_science option.') ps_sci.remove_table_rows(is_std) @@ -512,7 +512,7 @@ def match_to_calibs(ps:pypeitsetup.PypeItSetup, calib_dir:str, calibrated_setups matched_configs[setup] = None continue elif len(matched_configs[setup]['setup']) > 1: - msgs.warning('Existing calibrations have degenerate configurations! We recommend you ' + log.warning('Existing calibrations have degenerate configurations! We recommend you ' 'clean your calibrations parent directory. For now, using the first match.') matched_configs[setup]['setup'] = matched_configs[setup]['setup'][0] matched_configs[setup]['calib_dir'] = matched_configs[setup]['calib_dir'][0] @@ -848,7 +848,7 @@ def main(args): # TODO: This is now the only place bkg_redux is used... bkg_redux = 'bkg_id' in ps_sci.fitstbl.keys() and any(ps_sci.fitstbl['bkg_id'] != -1) if bkg_redux: - msgs.warning('Dither pattern automatically detected for these observations. Image ' + log.warning('Dither pattern automatically detected for these observations. Image ' 'combination and background subtraction sequences automatically set; ' 'confirm the behavior is what you want by checking the auto-generated ' 'pypeit file.') @@ -868,7 +868,7 @@ def main(args): # in generate_sci_pypeitfile, but it's useful to keep the warning # here. if any(ps_sci.fitstbl['calib'] != ps_sci.fitstbl['calib'][0]): - msgs.warning('Automated configuration assigned multiple calibration groups to your ' + log.warning('Automated configuration assigned multiple calibration groups to your ' 'science frames. Ignoring! Assigning all frames to the same group.') ps_sci.fitstbl['calib'] = ps_sci.fitstbl['calib'][0] @@ -883,10 +883,10 @@ def main(args): f'in provided parent directory: {args.parent_calib_dir}') # NOTE: Code above check that there is only one setup in ps_sci setup_calib_dir = setup_calib_dir[ps_sci.fitstbl['setup'][0]]['calib_dir'] - msgs.info(f'Attempting to use archived calibrations found in {setup_calib_dir}.') + log.info(f'Attempting to use archived calibrations found in {setup_calib_dir}.') elif not args.calibs_only: - msgs.warning('No science frames found among the files provided. Will only process ' + log.warning('No science frames found among the files provided. Will only process ' 'calibration frames. If you have provided science frames, you can specify ' 'which ones they are using the --sci_files option.') @@ -895,7 +895,7 @@ def main(args): # Calibrate, if necessary if setup_calib_dir is None: - msgs.info('Building the processed calibration frames.') + log.info('Building the processed calibration frames.') # Set the parent directory parent_calib_dir = args.redux_path if args.parent_calib_dir is None \ else args.parent_calib_dir @@ -944,7 +944,7 @@ def main(args): # relevant directory. calib_files = list(setup_calib_dir.glob('*')) if len(calib_files) > 0 and not args.overwrite_calibs: - msgs.info('Calibration files already exist. Skipping calibration.') + log.info('Calibration files already exist. Skipping calibration.') continue # Run @@ -955,7 +955,7 @@ def main(args): pypeIt.calib_all() if args.calibs_only or not any(sci_idx): - msgs.info('Only calibrations exist or requested calibration processing only. Done.') + log.info('Only calibrations exist or requested calibration processing only. Done.') return # Build the PypeIt file for the science frames and link to the existing @@ -1037,7 +1037,7 @@ def main(args): # screen output)? exec_s = np.around(time.perf_counter()-tstart, decimals=1) - msgs.info(f'Quicklook execution time: {datetime.timedelta(seconds=exec_s)}') + log.info(f'Quicklook execution time: {datetime.timedelta(seconds=exec_s)}') def print_offset_report(fitstbl:Table, platescale:float): @@ -1080,5 +1080,5 @@ def print_offset_report(fitstbl:Table, platescale:float): f'{offset_arcsec[iexp] / platescale:6.2f}' ) msg_string += '\n********************************************************' - msgs.info(msg_string) + log.info(msg_string) diff --git a/pypeit/scripts/run_pypeit.py b/pypeit/scripts/run_pypeit.py index 25c6703ee3..120a2e0e63 100644 --- a/pypeit/scripts/run_pypeit.py +++ b/pypeit/scripts/run_pypeit.py @@ -82,7 +82,7 @@ def main(args): from IPython import embed from pypeit import pypeit - from pypeit import msgs + from pypeit import log from pypeit import PypeItError # Load options from command line @@ -101,10 +101,10 @@ def main(args): pypeIt.calib_all() else: pypeIt.reduce_all() - msgs.info('Data reduction complete') + log.info('Data reduction complete') # QA HTML - msgs.info('Generating QA HTML') + log.info('Generating QA HTML') pypeIt.build_qa() return 0 diff --git a/pypeit/scripts/run_to_calibstep.py b/pypeit/scripts/run_to_calibstep.py index 7f925b190f..b2592af3ab 100644 --- a/pypeit/scripts/run_to_calibstep.py +++ b/pypeit/scripts/run_to_calibstep.py @@ -50,7 +50,7 @@ def main(args): from pathlib import Path from pypeit import pypeit - from pypeit import msgs + from pypeit import log from pypeit import PypeItError # Load options from command line @@ -63,7 +63,7 @@ def main(args): if args.science_frame is None and args.calib_group is None: raise PypeItError('Must provide either a science frame or a calibration group ID') elif args.science_frame is not None and args.calib_group is not None: - msgs.warning("Both science_frame and calib_group ID provided. Will use the science_frame") + log.warning("Both science_frame and calib_group ID provided. Will use the science_frame") # Instantiate the main pipeline reduction object pypeIt = pypeit.PypeIt(args.pypeit_file, verbosity=args.verbosity, @@ -94,7 +94,7 @@ def main(args): pypeIt.calib_one([row], det, stop_at_step=args.step) # QA HTML - msgs.info('Generating QA HTML') + log.info('Generating QA HTML') pypeIt.build_qa() return 0 diff --git a/pypeit/scripts/sensfunc.py b/pypeit/scripts/sensfunc.py index c4becdf532..2f864114f9 100644 --- a/pypeit/scripts/sensfunc.py +++ b/pypeit/scripts/sensfunc.py @@ -92,7 +92,7 @@ def main(args): import os - from pypeit import msgs + from pypeit import log from pypeit import PypeItError from pypeit import inputfiles from pypeit import io @@ -101,7 +101,7 @@ def main(args): from pypeit.spectrographs.util import load_spectrograph # Set the verbosity, and create a logfile if verbosity == 2 -# msgs.set_logfile_and_verbosity('sensfunc', args.verbosity) +# log.set_logfile_and_verbosity('sensfunc', args.verbosity) # Check parameter inputs if args.algorithm is not None and args.sens_file is not None: @@ -195,7 +195,7 @@ def main(args): # command line, overwrite the parset values read in from the .sens file # Write the par to disk - msgs.info(f'Writing the parameters to {args.par_outfile}') + log.info(f'Writing the parameters to {args.par_outfile}') par['sensfunc'].to_config(args.par_outfile, section_name='sensfunc', include_descr=False) # TODO JFH I would like to be able to run only diff --git a/pypeit/scripts/setup.py b/pypeit/scripts/setup.py index a448586b33..c688e9167c 100644 --- a/pypeit/scripts/setup.py +++ b/pypeit/scripts/setup.py @@ -79,7 +79,7 @@ def main(args): import time from pathlib import Path - from pypeit import msgs + from pypeit import log from pypeit.pypeitsetup import PypeItSetup from pypeit.calibrations import Calibrations @@ -100,7 +100,7 @@ def main(args): from pypeit.setup_gui.controller import start_gui start_gui(args) # else: -# msgs.set_logfile_and_verbosity("setup", args.verbosity) +# log.set_logfile_and_verbosity("setup", args.verbosity) # Initialize PypeItSetup based on the arguments ps = PypeItSetup.from_file_root(args.root, args.spectrograph, extension=args.extension) @@ -111,7 +111,7 @@ def main(args): user_cfgs = [l.rstrip() for l in user_par_fobj.readlines()] ps.append_user_cfg(user_cfgs) else: - msgs.warning(f"Could not open param_block file {args.param_block_file}. " + log.warning(f"Could not open param_block file {args.param_block_file}. " "Not adding any additional user parameters to the .pypeit file.") # Run the setup ps.run(setup_only=True, clean_config=not args.keep_bad_frames) diff --git a/pypeit/scripts/setup_coadd2d.py b/pypeit/scripts/setup_coadd2d.py index c5a947e1c0..ca90573461 100644 --- a/pypeit/scripts/setup_coadd2d.py +++ b/pypeit/scripts/setup_coadd2d.py @@ -92,7 +92,7 @@ def main(args): from astropy.table import Table - from pypeit import msgs + from pypeit import log from pypeit import PypeItError from pypeit import io from pypeit import utils @@ -125,10 +125,10 @@ def main(args): sci_dirs_exist = [sc.exists() for sc in sci_dirs] if not np.all(sci_dirs_exist): - msgs_string = 'The following science directories do not exist:\n' + log_string = 'The following science directories do not exist:\n' for s in np.array(sci_dirs)[np.logical_not(sci_dirs_exist)]: - msgs_string += f'{s}\n' - raise PypeItError(msgs_string) + log_string += f'{s}\n' + raise PypeItError(log_string) # Find all the spec2d files: spec2d_files = np.concatenate([sorted(sci_dir.glob('spec2d*')) for sci_dir in sci_dirs]).tolist() @@ -160,7 +160,7 @@ def main(args): for obj in objects: object_spec2d_files[obj] = [f for f in spec2d_files if obj.strip() in f.name] if len(object_spec2d_files[obj]) == 0: - msgs.warning(f'No spec2d files found for target={obj}.') + log.warning(f'No spec2d files found for target={obj}.') del object_spec2d_files[obj] # Check spec2d files exist for the selected objects diff --git a/pypeit/scripts/show_1dspec.py b/pypeit/scripts/show_1dspec.py index 7088f61957..18b86ba979 100644 --- a/pypeit/scripts/show_1dspec.py +++ b/pypeit/scripts/show_1dspec.py @@ -46,7 +46,7 @@ def main(args): from linetools.guis.xspecgui import XSpecGui from pypeit import specobjs - from pypeit import msgs + from pypeit import log from pypeit import PypeItError sobjs = specobjs.SpecObjs.from_fitsfile(args.file, chk_version=False) diff --git a/pypeit/scripts/show_2dspec.py b/pypeit/scripts/show_2dspec.py index 19f531f48d..cff31be25e 100644 --- a/pypeit/scripts/show_2dspec.py +++ b/pypeit/scripts/show_2dspec.py @@ -14,7 +14,7 @@ from astropy.io import fits from astropy.stats import sigma_clipped_stats -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import slittrace from pypeit import specobjs @@ -74,7 +74,7 @@ def show_trace(sobjs, det, viewer, ch): display.show_trace(viewer, ch, np.swapaxes(trace_list, 1,0), np.array(trc_name_list), maskdef_extr=np.array(maskdef_extr_list), manual_extr=np.array(manual_extr_list)) else: - msgs.warning('spec1d file found, but no objects were extracted for this detector.') + log.warning('spec1d file found, but no objects were extracted for this detector.') class Show2DSpec(scriptbase.ScriptBase): @@ -141,7 +141,7 @@ def main(args): return # Set the verbosity, and create a logfile if verbosity == 2 -# msgs.set_logfile_and_verbosity('show_2dspec', args.verbosity) +# log.set_logfile_and_verbosity('show_2dspec', args.verbosity) # Parse the detector name if args.det is None: @@ -194,7 +194,7 @@ def main(args): if check_version: raise PypeItError(message) else: - msgs.warning(message) + log.warning(message) spec2DObj = None if spec2DObj is None: @@ -223,7 +223,7 @@ def main(args): _ext = f'{detname}-SLITS' if _ext not in names: - msgs.warning(f'{args.file} missing extension {_ext}; cannot show slit edges.') + log.warning(f'{args.file} missing extension {_ext}; cannot show slit edges.') else: slit_columns = hdu[_ext].columns.names slit_spat_id = hdu[_ext].data['spat_id'] if 'spat_id' in slit_columns else None @@ -242,7 +242,7 @@ def main(args): = float(hdu[f'{detname}-SCIIMG'].header['SCI_SPAT_FLEXURE']) slit_left += sci_spat_flexure slit_right += sci_spat_flexure - msgs.info(f'Offseting slits by {sci_spat_flexure} pixels.') + log.info(f'Offseting slits by {sci_spat_flexure} pixels.') pypeline = hdu[f'{detname}-SCIIMG'].header['PYPELINE'] \ if 'PYPELINE' in hdu[f'{detname}-SCIIMG'].header else None if pypeline in ['MultiSlit', 'SlicerIFU']: @@ -267,14 +267,14 @@ def main(args): img_gpm = spec2DObj.select_flag(invert=True) if not np.any(img_gpm): - msgs.warning('The full science image is masked!') + log.warning('The full science image is masked!') model_gpm = img_gpm.copy() if args.ignore_extract_mask: model_gpm |= spec2DObj.select_flag(flag='EXTRACT') if spec2DObj.sci_spat_flexure is not None: - msgs.info(f'Offseting slits by {spec2DObj.sci_spat_flexure}') + log.info(f'Offseting slits by {spec2DObj.sci_spat_flexure}') slit_left, slit_right, slit_mask \ = spec2DObj.slits.select_edges(flexure=spec2DObj.sci_spat_flexure) slit_spat_id = spec2DObj.slits.spat_id @@ -289,7 +289,7 @@ def main(args): sobjs = specobjs.SpecObjs.from_fitsfile(spec1d_file, chk_version=False) else: sobjs = None - msgs.warning(f'Could not find spec1d file: {spec1d_file}\n' + log.warning(f'Could not find spec1d file: {spec1d_file}\n' 'No objects were extracted.') # TODO: This may be too restrictive, i.e. ignore BADFLTCALIB?? diff --git a/pypeit/scripts/show_pixflat.py b/pypeit/scripts/show_pixflat.py index 732e83ad66..37aedc2a4d 100644 --- a/pypeit/scripts/show_pixflat.py +++ b/pypeit/scripts/show_pixflat.py @@ -24,7 +24,7 @@ def get_parser(cls, width=None): @staticmethod def main(args): import numpy as np - from pypeit import msgs + from pypeit import log from pypeit import PypeItError from pypeit import io from pypeit.display import display @@ -49,7 +49,7 @@ def main(args): # if some of the provided detectors are not in the file, warn the user elif np.any(np.logical_not(in_file)): det_not_in_file = np.array(args.det)[np.logical_not(in_file)] - msgs.warning(f"Detector(s) {det_not_in_file} not found in the file. Available detectors are {file_dets}") + log.warning(f"Detector(s) {det_not_in_file} not found in the file. Available detectors are {file_dets}") # show the image display.connect_to_ginga(raise_err=True, allow_new=True) diff --git a/pypeit/scripts/skysub_regions.py b/pypeit/scripts/skysub_regions.py index b9de03b6df..b3b2ddc394 100644 --- a/pypeit/scripts/skysub_regions.py +++ b/pypeit/scripts/skysub_regions.py @@ -41,7 +41,7 @@ def main(args): from pypeit import spec2dobj import os import astropy.io.fits as fits - from pypeit import msgs + from pypeit import log from pypeit import PypeItError from pypeit import io from pypeit.core.gui.skysub_regions import SkySubGUI @@ -79,7 +79,7 @@ def main(args): det = spec2DObj.detector.parse_name(detname) # Setup for PypeIt imports - msgs.init(level=msgs.level) + log.init(level=log.level) # Grab the slit edges slits = spec2DObj.slits diff --git a/pypeit/scripts/tellfit.py b/pypeit/scripts/tellfit.py index c670cbd862..7823952292 100644 --- a/pypeit/scripts/tellfit.py +++ b/pypeit/scripts/tellfit.py @@ -71,7 +71,7 @@ def main(args): from astropy.io import fits - from pypeit import msgs + from pypeit import log from pypeit import PypeItError from pypeit import dataPaths from pypeit.par import pypeitpar @@ -80,7 +80,7 @@ def main(args): from pypeit import inputfiles # Set the verbosity, and create a logfile if verbosity == 2 -# msgs.set_logfile_and_verbosity('tellfit', args.verbosity) +# log.set_logfile_and_verbosity('tellfit', args.verbosity) # Determine the spectrograph header = fits.getheader(args.spec1dfile) @@ -115,7 +115,7 @@ def main(args): par['telluric']['telgridfile'] = par['sensfunc']['IR']['telgridfile'] else: par['telluric']['telgridfile'] = 'TellPCA_3000_26000_R10000.fits' - msgs.warning(f"No telluric file given. Using PCA method with {par['telluric']['telgridfile']}.") + log.warning(f"No telluric file given. Using PCA method with {par['telluric']['telgridfile']}.") # Checks if par['telluric']['telgridfile'] is None: @@ -127,14 +127,14 @@ def main(args): # Write the par to disk # TODO: Make it optional to write this file? Is the relevant metadata # saved to the main output file? - msgs.info(f'Writing the telluric fitting parameters to {args.par_outfile}') + log.info(f'Writing the telluric fitting parameters to {args.par_outfile}') par['telluric'].to_config(args.par_outfile, section_name='telluric', include_descr=False) # Parse the output filename outfile = (os.path.basename(args.spec1dfile)).replace('.fits','_tellcorr.fits') modelfile = (os.path.basename(args.spec1dfile)).replace('.fits','_tellmodel.fits') - msgs.info(f'Telluric-corrected spectrum will be saved to: {outfile}.') - msgs.info(f'Best-fit telluric model will be saved to: {modelfile}.') + log.info(f'Telluric-corrected spectrum will be saved to: {outfile}.') + log.info(f'Best-fit telluric model will be saved to: {modelfile}.') # Run the telluric fitting procedure. if par['telluric']['objmodel']=='qso': diff --git a/pypeit/scripts/trace_edges.py b/pypeit/scripts/trace_edges.py index f7c6985711..d5238276d0 100644 --- a/pypeit/scripts/trace_edges.py +++ b/pypeit/scripts/trace_edges.py @@ -65,7 +65,7 @@ def main(args): import time from pathlib import Path import numpy as np - from pypeit import msgs + from pypeit import log from pypeit import PypeItError from pypeit.spectrographs.util import load_spectrograph from pypeit import edgetrace @@ -75,10 +75,10 @@ def main(args): from IPython import embed # Set the verbosity, and create a logfile if verbosity == 2 -# msgs.set_logfile_and_verbosity('trace_edges', args.verbosity) +# log.set_logfile_and_verbosity('trace_edges', args.verbosity) if args.show: - msgs.warning('"show" option is deprecated. Setting debug = 1.') + log.warning('"show" option is deprecated. Setting debug = 1.') args.debug = 1 if args.pypeit_file is not None: @@ -209,10 +209,10 @@ def main(args): edges = edgetrace.EdgeTraceSet(traceImage, spec, trace_par, auto=True, debug=args.debug, qa_path=qa_path) if not edges.success: - msgs.warning(f'Edge tracing for detector {det} failed. Continuing...') + log.warning(f'Edge tracing for detector {det} failed. Continuing...') continue - msgs.info(f'Tracing for detector {det} finished in { time.perf_counter()-t:.1f} s.') + log.info(f'Tracing for detector {det} finished in { time.perf_counter()-t:.1f} s.') # Write the two calibration frames edges.to_file() edges.get_slits().to_file() diff --git a/pypeit/scripts/view_fits.py b/pypeit/scripts/view_fits.py index f829391a8e..681298393b 100644 --- a/pypeit/scripts/view_fits.py +++ b/pypeit/scripts/view_fits.py @@ -48,7 +48,7 @@ def get_parser(cls, width=None): @staticmethod def main(args): - from pypeit import msgs + from pypeit import log from pypeit import PypeItError from pypeit.display import display from pypeit.spectrographs import util @@ -62,7 +62,7 @@ def main(args): return # TODO: Update verbosity - msgs.init(level=msgs.level) + log.init(level=log.level) if args.proc and args.exten is not None: raise PypeItError('You cannot specify --proc and --exten, since --exten shows the raw image') @@ -131,7 +131,7 @@ def main(args): if args.showmask: if not args.proc: - msgs.info("You need to use --proc with --showmask to show the mask. Ignoring your argument") + log.info("You need to use --proc with --showmask to show the mask. Ignoring your argument") else: viewer, ch_mask = display.show_image(Img.bpm, chname="BPM") diff --git a/pypeit/sensfilearchive.py b/pypeit/sensfilearchive.py index f340f01cc5..76a76e7f6e 100644 --- a/pypeit/sensfilearchive.py +++ b/pypeit/sensfilearchive.py @@ -8,7 +8,7 @@ from astropy.io import fits -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import dataPaths @@ -89,6 +89,6 @@ def get_archived_sensfile(self, fitsfile, symlink_in_pkgdir=False): to_pkg = 'symlink' if symlink_in_pkgdir else None archived_file = dataPaths.sensfunc.get_file_path(f"keck_deimos_{grating}_sensfunc.fits", to_pkg=to_pkg) - msgs.info(f"Found archived sensfile '{archived_file}'") + log.info(f"Found archived sensfile '{archived_file}'") return archived_file diff --git a/pypeit/sensfunc.py b/pypeit/sensfunc.py index c9bc6d0994..825d93dfc3 100644 --- a/pypeit/sensfunc.py +++ b/pypeit/sensfunc.py @@ -15,7 +15,7 @@ from astropy.io import fits from astropy import table -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import specobjs from pypeit import specobj @@ -289,7 +289,7 @@ def __init__(self, spec1dfile, sensfile, par, par_fluxcalib=None, debug=False, raise PypeItError(f'No wavelength overlap between the archival and observed standard star spectrum. ' 'This is not the right standard star for your observations.') elif np.sum(overlap)/self.nspec_in < 0.8: - msgs.warning(f'Only {np.sum(overlap)/self.nspec_in:.1%} of the observed wavelength range is covered by the ' + log.warning(f'Only {np.sum(overlap)/self.nspec_in:.1%} of the observed wavelength range is covered by the ' f'archival standard star. This may not be the right standard star for your observations. ') def unpack_std(self): @@ -341,7 +341,7 @@ def unpack_std(self): raise PypeItError('"use_flat" set to True, but standard star 1D spectrum from OneSpec class ' 'does not contain the flat spectrum. The blaze function cannot be estimated.') if spec.ext_mode != self.par['extr']: - msgs.warning(f'Standard star 1D spectrum from OneSpec class was obtained using the {spec.ext_mode} ' + log.warning(f'Standard star 1D spectrum from OneSpec class was obtained using the {spec.ext_mode} ' f'extraction, while the requested extraction is {self.par["extr"]}. ' f'The available {spec.ext_mode} extraction will be used instead.') self.extr = spec.ext_mode @@ -595,7 +595,7 @@ def splice(self): zero-point array """ - msgs.info(f"Merging sensfunc for {self.norderdet} detectors {self.par['multi_spec_det']}") + log.info(f"Merging sensfunc for {self.norderdet} detectors {self.par['multi_spec_det']}") wave_splice_min = self.wave[self.wave > 1.0].min() wave_splice_max = self.wave[self.wave > 1.0].max() wave_splice_1d, _, _ = wvutils.get_wave_grid(waves=self.wave, wave_method='linear', @@ -624,7 +624,7 @@ def splice(self): # Interpolate over gaps zeros = zeropoint_splice_1d == 0. if np.any(zeros): - msgs.info("Interpolating over gaps (and extrapolating with fill_value=1, if need be)") + log.info("Interpolating over gaps (and extrapolating with fill_value=1, if need be)") interp_func = scipy.interpolate.interp1d(wave_splice_1d[np.invert(zeros)], zeropoint_splice_1d[np.invert(zeros)], kind='nearest', fill_value=0., @@ -893,7 +893,7 @@ def sensfunc_weights(cls, sensfile, waves, ech_order_vec=None, debug=False, extr if waves.ndim == 2: nspec, norder = waves.shape if ech_order_vec is not None and ech_order_vec.size != norder: - msgs.warning('The number of orders in the wave grid does not match the ' + log.warning('The number of orders in the wave grid does not match the ' 'number of orders in the unpacked sobjs. Echelle order vector not used.') ech_order_vec = None nexp = 1 @@ -913,7 +913,7 @@ def sensfunc_weights(cls, sensfile, waves, ech_order_vec=None, debug=False, extr if norder != sens.zeropoint.shape[1] and ech_order_vec is None: raise PypeItError('The number of orders in {:} does not agree with your data. Wrong sensfile?'.format(sensfile)) elif norder != sens.zeropoint.shape[1] and ech_order_vec is not None: - msgs.warning('The number of orders in {:} does not match the number of orders in the data. ' + log.warning('The number of orders in {:} does not match the number of orders in the data. ' 'Using only the matching orders.'.format(sensfile)) # array of order to loop through diff --git a/pypeit/setup_gui/controller.py b/pypeit/setup_gui/controller.py index df7fa60679..991d326095 100644 --- a/pypeit/setup_gui/controller.py +++ b/pypeit/setup_gui/controller.py @@ -24,7 +24,7 @@ from pypeit.setup_gui.text_viewer import TextViewerWindow from pypeit.setup_gui.dialog_helpers import prompt_to_save, display_error, FileDialog, FileType from pypeit.setup_gui.model import PypeItSetupGUIModel, ModelState, PypeItFileModel -from pypeit import msgs +from pypeit import log from pypeit.display import display from pypeit import io as pypeit_io @@ -64,7 +64,7 @@ def run(self): canceled = False exc_info = (None, None, None) try: - msgs.info("Running operation") + log.info("Running operation") self._operation.run() except OpCanceledError: canceled=True @@ -107,7 +107,7 @@ def _op_complete(self, canceled, exc_info): canceled (bool): Whether or not the operation was canceled. exc_info (tuple): The exception information if the operation failed. None if it succeeded """ - msgs.info("Op complete") + log.info("Op complete") with lock_qt_mutex(self._mutex): if self._operation is not None: operation = self._operation @@ -129,7 +129,7 @@ def startOperation(self, operation): Args: operation (MetadataOperation): The MetadataOperation to start in the background thread. """ - msgs.info("Starting operation") + log.info("Starting operation") self._operation = operation if operation.preRun(): operation.progressMade.connect(self._op_progress, type=Qt.QueuedConnection) @@ -173,7 +173,7 @@ def preRun(self): def _buildingMetadata(self, name, match): """Callback used to find the total number of files being read when building metadata.""" self._max_progress = int(match.group(1)) - msgs.info(f"Found max progress {self._max_progress}") + log.info(f"Found max progress {self._max_progress}") def _addedMetadata(self, name, match): """Callback used to report progress on reading files when building metadata.""" @@ -195,7 +195,7 @@ def postRun(self, canceled, exc_info): if exc_info[0] is not None: traceback_string = "".join(traceback.format_exception(*exc_info)) - msgs.warning(f"Failed to {self.name.lower()}:\n" + traceback_string) + log.warning(f"Failed to {self.name.lower()}:\n" + traceback_string) display_error(self._main_window, f"Failed to {self.name.lower()} {exc_info[0]}: {exc_info[1]}") self._model.reset() elif canceled: @@ -432,7 +432,7 @@ def view_file(self, n=None): display.connect_to_ginga(raise_err=True, allow_new=True) except Exception as e: display_error(self._main_controller.main_window, f"Could not start ginga to view FITS files: {e}") - msgs.warning(f"Failed to connect to ginga:\n" + traceback.format_exc()) + log.warning(f"Failed to connect to ginga:\n" + traceback.format_exc()) # Display each file in its own ginga tab @@ -448,13 +448,13 @@ def view_file(self, n=None): img = self._model.spectrograph.get_rawimage(str(file), n)[1] except Exception as e: display_error(self._main_controller.main_window, f"Failed to read image {file.name}: {e}") - msgs.warning(f"Failed get raw image:\n" + traceback.format_exc()) + log.warning(f"Failed get raw image:\n" + traceback.format_exc()) try: display.show_image(img, chname = f"{file.name} {det_name}") except Exception as e: display_error(self._main_controller.main_window, f"Failed to send image {file.name} to ginga: {e}") - msgs.warning(f"Failed send image to ginga:\n" + traceback.format_exc()) + log.warning(f"Failed send image to ginga:\n" + traceback.format_exc()) def view_header(self): """ Display the header of one or more selected files in the metadata. @@ -474,7 +474,7 @@ def view_header(self): hdu.header.totextfile(header_string_buffer) except Exception as e: display_error(self._main_controller.main_window, f"Failed to read header from file {file.name} in {file.parent}: {e}") - msgs.warning(f"Failed to read header from {file}:\n" + traceback.format_exc()) + log.warning(f"Failed to read header from {file}:\n" + traceback.format_exc()) return header_string_buffer.seek(0) window = TextViewerWindow(title=f"{file.name} Header", width=80, height=50,start_at_top=True, filename=file.parent / (file.name+".txt"), text_stream=header_string_buffer) @@ -497,7 +497,7 @@ def copy_metadata_rows(self) -> bool: return False row_indices = self._view.selectedRows() - msgs.info(f"Copying {len(row_indices)} rows to the clipboard.") + log.info(f"Copying {len(row_indices)} rows to the clipboard.") if len(row_indices) > 0: row_model = self._model.createCopyForRows(row_indices) self._main_controller.model.clipboard = row_model @@ -521,11 +521,11 @@ def paste_metadata_rows(self): clipboard = self._main_controller.model.clipboard if clipboard.rowCount() > 0: try: - msgs.info(f"Pasting {clipboard.rowCount()} rows") + log.info(f"Pasting {clipboard.rowCount()} rows") self._model.pasteFrom(clipboard) except Exception as e: traceback_string = "".join(traceback.format_exc()) - msgs.warning(f"Failed to paste metadata rows:\n" + traceback_string) + log.warning(f"Failed to paste metadata rows:\n" + traceback_string) display_error(self._main_controller.main_window, f"Could not paste rows to this PypeIt file: {e}") @@ -534,7 +534,7 @@ def comment_out_metadata_rows(self): if self._view is None: return row_indices = self._view.selectedRows() - msgs.info(f"Commenting out {len(row_indices)} rows.") + log.info(f"Commenting out {len(row_indices)} rows.") if len(row_indices) > 0: self._model.commentMetadataRows(row_indices) @@ -543,7 +543,7 @@ def uncomment_metadata_rows(self): if self._view is None: return row_indices = self._view.selectedRows() - msgs.info(f"Unommenting out {len(row_indices)} rows.") + log.info(f"Unommenting out {len(row_indices)} rows.") if len(row_indices) > 0: self._model.uncommentMetadataRows(row_indices) @@ -557,7 +557,7 @@ def delete_metadata_rows(self) -> bool: if self._view is None: return False row_indices = self._view.selectedRows() - msgs.info(f"Removing {len(row_indices)} rows.") + log.info(f"Removing {len(row_indices)} rows.") if len(row_indices) > 0: self._model.removeMetadataRows(row_indices) return True @@ -596,7 +596,7 @@ def removePaths(self, rows): def addNewPath(self, new_path): """Add a new path to the observation log""" - msgs.info(f"Adding new path {new_path}") + log.info(f"Adding new path {new_path}") self._model.add_raw_data_directory(new_path) class PypeItFileController(QObject): @@ -659,10 +659,10 @@ def __init__(self, app : QApplication, verbosity : int, spectrograph : str|None= self.model.obslog_model.default_extension = extension defaultFont = self.app.font() - msgs.info(f"Default font pixel size: {defaultFont.pixelSize()}") - msgs.info(f"Default font point size: {defaultFont.pointSizeF()}") + log.info(f"Default font pixel size: {defaultFont.pixelSize()}") + log.info(f"Default font point size: {defaultFont.pointSizeF()}") if defaultFont.pointSizeF() < 12.0: - msgs.info(f"Setting font to 12.") + log.info(f"Setting font to 12.") defaultFont.setPointSize(12) self.app.setFont(defaultFont) @@ -745,7 +745,7 @@ def save_one(self): # Shouldn't really happen, it would mean the save tab button was enabled # when it shouldn't be. We'll handle this case and log it to prevent a crash # just in case though. - msgs.warning(f"Attempt to save a tab that is *not* a PypeItFileView!") + log.warning(f"Attempt to save a tab that is *not* a PypeItFileView!") def _save_file(self, file_model : PypeItFileModel, prompt_for_all : bool=False) -> DialogResponses: @@ -760,7 +760,7 @@ def _save_file(self, file_model : PypeItFileModel, prompt_for_all : bool=False) The DialogResponse from the user, or DialogResponses.ACCEPT if it wasn't neccessary to prompt the user. """ - msgs.info(f"Saving config {file_model.name_stem}") + log.info(f"Saving config {file_model.name_stem}") if file_model.save_location is None: dialog = FileDialog.create_save_location_dialog(self.main_window, file_model.name_stem, prompt_for_all=prompt_for_all) response = dialog.show() @@ -835,7 +835,7 @@ def run_setup(self): self.save_all() elif response == DialogResponses.CANCEL: return - msgs.info("run_setup starting operation") + log.info("run_setup starting operation") self.operation_thread.startOperation(SetupOperation(self.model, self)) def createNewPypeItFile(self): @@ -878,7 +878,7 @@ def open_pypeit_file(self): open_dialog = FileDialog.create_open_file_dialog(self.main_window, "Select PypeIt File", file_type=FileType("PypeIt input files",".pypeit")) result = open_dialog.show() if result != DialogResponses.CANCEL: - msgs.info("open_pypeit_file starting operation") + log.info("open_pypeit_file starting operation") self.operation_thread.startOperation(OpenFileOperation(self.model, open_dialog.selected_path, self)) def start_gui(args): @@ -888,7 +888,7 @@ def start_gui(args): # Setup application/window icon TODO this doesn't work in windows. iconPath = Path(__file__).parent / "images/window_icon.png" if not iconPath.exists(): - msgs.info("Icon path does not exist") + log.info("Icon path does not exist") else: app.setWindowIcon(QIcon(str(iconPath))) diff --git a/pypeit/setup_gui/dialog_helpers.py b/pypeit/setup_gui/dialog_helpers.py index 3cfefb5fe5..c647071b5c 100644 --- a/pypeit/setup_gui/dialog_helpers.py +++ b/pypeit/setup_gui/dialog_helpers.py @@ -13,7 +13,7 @@ from pathlib import Path from dataclasses import dataclass -from pypeit import msgs +from pypeit import log from qtpy.QtWidgets import QFileDialog, QMessageBox, QCheckBox, QDialog, QWidget from qtpy.QtCore import QStringListModel, QSettings @@ -231,7 +231,7 @@ def display_error(parent : QWidget, message: str) -> None: parent: The parent widget of the pop-up dialog message: The message to display. """ - msgs.warning(message) # Make sure the message also goes to the logs + log.warning(message) # Make sure the message also goes to the logs QMessageBox.warning(parent, "PypeIt Setup Error", message, QMessageBox.Ok) def prompt_to_save(parent : QWidget) -> DialogResponses: diff --git a/pypeit/setup_gui/model.py b/pypeit/setup_gui/model.py index c44a5aac79..e2aaf6f387 100644 --- a/pypeit/setup_gui/model.py +++ b/pypeit/setup_gui/model.py @@ -21,7 +21,7 @@ from configobj import ConfigObj from datetime import datetime, timezone -from pypeit import msgs, spectrographs +from pypeit import log, spectrographs from pypeit.spectrographs import available_spectrographs from pypeit.pypeitsetup import PypeItSetup from pypeit.metadata import PypeItMetaData @@ -44,7 +44,7 @@ class ModelState(enum.Enum): class LogBuffer(io.TextIOBase): - """Imitation file object that is passed to the PypeIt msgs logging system. It maintains a buffer + """Imitation file object that is passed to the PypeIt log logging system. It maintains a buffer of log messages that the user can view through the GUI. It is also used to monitor progress of background operations, by registering regular expressions to watch the log for. @@ -365,7 +365,7 @@ def setData(self, index, value, role=Qt.EditRole): try: self.metadata[colname][index.row()] = value except ValueError as e: - msgs.warning(f"Failed to set {colname} row {index.row()} to '{value}'. ValueError: {e}") + log.warning(f"Failed to set {colname} row {index.row()} to '{value}'. ValueError: {e}") self.dataChanged.emit(index,index,[Qt.DisplayRole, Qt.EditRole]) return True @@ -508,7 +508,7 @@ def removeMetadataRows(self, rows): # We could try to group these rows into ranges, but # it doesn't seem worth it self.beginRemoveRows(QModelIndex(), row, row) - msgs.info(f"Removing metadata row {row}") + log.info(f"Removing metadata row {row}") self.metadata.remove_rows([row]) self.endRemoveRows() @@ -591,7 +591,7 @@ def createCopyForConfig(self, config_name): Return: PypeItMetadataModel: A deep copy of the meatdata matching the config_name """ - msgs.info(f"Creating new metadata for config {config_name}") + log.info(f"Creating new metadata for config {config_name}") config_rows = [ config_name in setup for setup in self.metadata.table['setup'] ] return self.createCopyForRows(config_rows) @@ -829,7 +829,7 @@ def rowCount(self, parent=QModelIndex()): # Column 1 does not have children return 0 - #msgs.info("rowCount valid") + #log.info("rowCount valid") node = parent.internalPointer() return len(node.children) @@ -1024,7 +1024,7 @@ def _update_state(self): """Signal handler that detects changes to the metadata model and updates our model state to ModelState.CHANGED """ - msgs.info("Updating state") + log.info("Updating state") if self.state == ModelState.NEW: # Only move to "CHANGED" if there are rows in the metadata. if self.metadata_model.rowCount() > 0: @@ -1069,14 +1069,14 @@ def save(self): pf = PypeItFile(self.params_model.getConfigLines(),self.metadata_model.getPathsModel().getPaths(), metadata_table, setup_dict,vet=False,preserve_comments=True) - msgs.info(f"Saving filename: {self.filename}") + log.info(f"Saving filename: {self.filename}") if self.save_location is not None: os.makedirs(self.save_location,exist_ok=True) pf.write(self.filename) except Exception as e: - msgs.warning(f"Failed saving setup {self.name_stem} to {self.save_location}.") - msgs.warning(traceback.format_exc()) + log.warning(f"Failed saving setup {self.name_stem} to {self.save_location}.") + log.warning(traceback.format_exc()) # Raise an exception that will look nice when displayed to the GUI raise RuntimeError(f"Failed saving setup {self.name_stem} to {self.save_location}.\nException: {e}") self.state = ModelState.UNCHANGED @@ -1102,10 +1102,10 @@ def __init__(self): def state(self): """ModelState: The state of the obslog model. Either NEW or UNCHANGED.""" if self.metadata_model.metadata is None: - msgs.info("Obslog state is NEW") + log.info("Obslog state is NEW") return ModelState.NEW else: - msgs.info("Obslog state is UNCHANGED") + log.info("Obslog state is UNCHANGED") return ModelState.UNCHANGED def set_spectrograph(self, new_spec): @@ -1114,7 +1114,7 @@ def set_spectrograph(self, new_spec): Args: new_spec (str): The name of the new spectrograph. """ - msgs.info(f"Spectrograph is now {new_spec}") + log.info(f"Spectrograph is now {new_spec}") self._spectrograph = spectrographs.util.load_spectrograph(new_spec) if self.metadata_model.spectrograph is not None and self.metadata_model.spectrograph.name != new_spec: self.metadata_model.reset() @@ -1149,7 +1149,7 @@ def add_raw_data_directory(self, new_directory): Args: new_directory (str): The new directory containing raw data. """ - msgs.info(f"Adding raw directory: {new_directory}, current spec is {self._spectrograph}") + log.info(f"Adding raw directory: {new_directory}, current spec is {self._spectrograph}") if new_directory not in self.paths_model.stringList(): row_number = self.paths_model.rowCount() self.paths_model.insertRows(row_number, 1) @@ -1170,7 +1170,7 @@ def scan_raw_data_directories(self): raw_data_files = [] for directory in self.paths_model.stringList(): - msgs.info(f"Scanning directory: {directory}") + log.info(f"Scanning directory: {directory}") for extension in allowed_extensions: # The command line may set a root, which isn't a directory but a prefix if not os.path.isdir(directory): @@ -1178,7 +1178,7 @@ def scan_raw_data_directories(self): else: glob_pattern = os.path.join(directory, "*" + extension) - msgs.info(f"Searching for raw data files with {glob_pattern}") + log.info(f"Searching for raw data files with {glob_pattern}") raw_data_files += glob.glob(glob_pattern) return raw_data_files @@ -1187,7 +1187,7 @@ def scan_raw_data_directories(self): def reset(self): """Reset the model to an empty state.""" - msgs.info(f"Resetting to empty setup.") + log.info(f"Resetting to empty setup.") self.raw_data_files = [] self.raw_data_dirs = [] self.metadata_model.setMetadata(None) @@ -1238,12 +1238,12 @@ def setup_logging(self, verbosity): # TODO: Need help from Dusty to update this self.log_buffer = LogBuffer(logfile,verbosity) - msgs.init(level=msgs.level, log_file=self.log_buffer) -# msgs.reset(verbosity=verbosity, log=self.log_buffer, log_to_stderr=False) - msgs.info(f"QT Version: {qtpy.QT_VERSION}") - msgs.info(f"PySide version: {qtpy.PYSIDE_VERSION}") - msgs.info(f"PyQt version: {qtpy.PYQT_VERSION}") - msgs.info(f"QtPy API_NAME: {qtpy.API_NAME}") + log.init(level=log.level, log_file=self.log_buffer) +# log.reset(verbosity=verbosity, log=self.log_buffer, log_to_stderr=False) + log.info(f"QT Version: {qtpy.QT_VERSION}") + log.info(f"PySide version: {qtpy.PYSIDE_VERSION}") + log.info(f"PyQt version: {qtpy.PYQT_VERSION}") + log.info(f"QtPy API_NAME: {qtpy.API_NAME}") @property def state(self): @@ -1251,20 +1251,20 @@ def state(self): """ if len(self.pypeit_files) == 0: # No pypeit files, just use the obslog state - msgs.info(f"GUI state is {self.obslog_model.state}") + log.info(f"GUI state is {self.obslog_model.state}") return self.obslog_model.state else: if any([file.state!=ModelState.UNCHANGED for file in self.pypeit_files.values()]): - msgs.info("GUI state is CHANGED") + log.info("GUI state is CHANGED") return ModelState.CHANGED else: - msgs.info("GUI state is UNCHANGED") + log.info("GUI state is UNCHANGED") return ModelState.UNCHANGED def reset(self): """Reset the model to an empty state.""" - msgs.info(f"Resetting to NEW state.") + log.info(f"Resetting to NEW state.") self.closeAllFiles() self.obslog_model.reset() self.stateChanged.emit() @@ -1393,7 +1393,7 @@ def open_pypeit_file(self, pypeit_file): pf_model.stateChanged.connect(self.stateChanged) self.pypeit_files[setup_name] = pf_model - msgs.info("Adding empty file model in open_pypeit_file") + log.info("Adding empty file model in open_pypeit_file") self.filesAdded.emit([pf_model]) self.stateChanged.emit() @@ -1404,7 +1404,7 @@ def removeFile(self, name): def createEmptyPypeItFile(self, new_name): # Create a new empty configuration. - msgs.info(f"Creating new pypeit file model for {new_name}") + log.info(f"Creating new pypeit file model for {new_name}") # Create an empty copy of the obslog metadata for the new file empty_metadata = self.obslog_model.metadata_model.createCopyForRows([]) @@ -1418,7 +1418,7 @@ def createEmptyPypeItFile(self, new_name): pf_model.stateChanged.connect(self.stateChanged) self.pypeit_files[new_name] = pf_model - msgs.info("Adding emtpy pypeit file in createEmptyPypeItFile") + log.info("Adding emtpy pypeit file in createEmptyPypeItFile") self.filesAdded.emit([pf_model]) self.stateChanged.emit() return pf_model @@ -1442,9 +1442,9 @@ def createFilesForConfigs(self, configs=None, state=ModelState.CHANGED): """ if configs is None: configs = self.obslog_model.metadata_model.metadata.unique_configurations() - msgs.info(f"Creating files for all unique configurations: {configs}") + log.info(f"Creating files for all unique configurations: {configs}") else: - msgs.info(f"Creating files for configs {configs}") + log.info(f"Creating files for configs {configs}") # Create a new PypeItFileModel for each unique configuration config_names = list(configs.keys()) @@ -1460,8 +1460,8 @@ def createFilesForConfigs(self, configs=None, state=ModelState.CHANGED): pf_model.stateChanged.connect(self.stateChanged) self.pypeit_files[config_name] = pf_model - msgs.info(f"Current files: {self.pypeit_files}") + log.info(f"Current files: {self.pypeit_files}") if len(config_names) > 0: - msgs.info("Adding pypeit files in createFilesForConfigs") + log.info("Adding pypeit files in createFilesForConfigs") self.filesAdded.emit(list(self.pypeit_files.values())) diff --git a/pypeit/setup_gui/text_viewer.py b/pypeit/setup_gui/text_viewer.py index 143b112490..a5752e063e 100644 --- a/pypeit/setup_gui/text_viewer.py +++ b/pypeit/setup_gui/text_viewer.py @@ -13,7 +13,7 @@ from qtpy.QtGui import QIcon, QFont,QTextCursor,QFontDatabase from qtpy.QtCore import Qt, Signal, QSettings, QEvent -from pypeit import msgs +from pypeit import log from pypeit.setup_gui.dialog_helpers import display_error, FileDialog, FileType, PersistentStringListModel, DialogResponses from pypeit.setup_gui.model import LogBuffer @@ -120,7 +120,7 @@ def save(self): self._text_stream.seek(0) for message in self._text_stream: f.write(message) - msgs.info(f"File saved to {save_dialog.selected_path}.") + log.info(f"File saved to {save_dialog.selected_path}.") self._filename = save_dialog.selected_path except Exception as e: display_error(self, str(e)) diff --git a/pypeit/setup_gui/view.py b/pypeit/setup_gui/view.py index 2ae4f69b65..8f6b7a86ad 100644 --- a/pypeit/setup_gui/view.py +++ b/pypeit/setup_gui/view.py @@ -17,22 +17,22 @@ from pypeit.setup_gui.model import ModelState, PypeItMetadataModel from pypeit.setup_gui.text_viewer import LogWindow, TextViewerWindow from pypeit.setup_gui.dialog_helpers import DialogResponses, FileDialog, PersistentStringListModel -from pypeit import msgs +from pypeit import log def debugSizeStuff(widget:QWidget, name="widget"): """Helper method for logging sizxing information about a wdiget and its layout.""" - msgs.info(f"{name} (width/height): {widget.width()}/{widget.height()} geometry x/y/w/h: {widget.geometry().x()}/{widget.geometry().y()}/{widget.geometry().width()}/{widget.geometry().height()} min w/h {widget.minimumWidth()}/{widget.minimumHeight()} hint w/h {widget.sizeHint().width()}/{widget.sizeHint().height()} min hint w/h {widget.minimumSizeHint().width()}/{widget.minimumSizeHint().height()} cm tlbr: {widget.contentsMargins().top()}/{widget.contentsMargins().left()}/{widget.contentsMargins().bottom()}/{widget.contentsMargins().right()} frame w/h {widget.frameSize().width()}/{widget.frameSize().height()}") + log.info(f"{name} (width/height): {widget.width()}/{widget.height()} geometry x/y/w/h: {widget.geometry().x()}/{widget.geometry().y()}/{widget.geometry().width()}/{widget.geometry().height()} min w/h {widget.minimumWidth()}/{widget.minimumHeight()} hint w/h {widget.sizeHint().width()}/{widget.sizeHint().height()} min hint w/h {widget.minimumSizeHint().width()}/{widget.minimumSizeHint().height()} cm tlbr: {widget.contentsMargins().top()}/{widget.contentsMargins().left()}/{widget.contentsMargins().bottom()}/{widget.contentsMargins().right()} frame w/h {widget.frameSize().width()}/{widget.frameSize().height()}") layout = widget.layout() if layout is None: - msgs.info(f"{name} layout is None") + log.info(f"{name} layout is None") else: - msgs.info(f"{name} layout size constraint {layout.sizeConstraint()} spacing: {layout.spacing()} cm: tlbr {layout.contentsMargins().top()}/{layout.contentsMargins().left()}/{layout.contentsMargins().bottom()}/{layout.contentsMargins().right()} totalMinSize (w/h): {layout.totalMinimumSize().width()}/{layout.totalMinimumSize().width()} totalMaxSize (w/h): {layout.totalMaximumSize().width()}/{layout.totalMaximumSize().width()} totalHint (w/h): {layout.totalSizeHint().width()}/{layout.totalSizeHint().width()}") + log.info(f"{name} layout size constraint {layout.sizeConstraint()} spacing: {layout.spacing()} cm: tlbr {layout.contentsMargins().top()}/{layout.contentsMargins().left()}/{layout.contentsMargins().bottom()}/{layout.contentsMargins().right()} totalMinSize (w/h): {layout.totalMinimumSize().width()}/{layout.totalMinimumSize().width()} totalMaxSize (w/h): {layout.totalMaximumSize().width()}/{layout.totalMaximumSize().width()} totalHint (w/h): {layout.totalSizeHint().width()}/{layout.totalSizeHint().width()}") fm = widget.fontMetrics() if fm is None: - msgs.info(f"{name} fm is None") + log.info(f"{name} fm is None") else: - msgs.info(f"{name} fm lineSpacing: {fm.lineSpacing()} maxWidth: {fm.maxWidth()}, avg width: {fm.averageCharWidth()}") + log.info(f"{name} fm lineSpacing: {fm.lineSpacing()} maxWidth: {fm.maxWidth()}, avg width: {fm.averageCharWidth()}") def calculateButtonMinSize(button_widget : QPushButton) -> QSize: """Calculates and sets the minimum size of a budget widget @@ -64,7 +64,7 @@ def calculateButtonMinSize(button_widget : QPushButton) -> QSize: # The QT code doubles the frame size but not the margin, so we do the same min_size = QSize(text_size.width() + button_margin + button_default_frame*2 + default_indicator*2, text_size.height() + button_margin + button_default_frame*2 + default_indicator*2) - msgs.info(f"Calculated button {button_widget.text()} minimum size ({min_size.width()}/{min_size.height()}) with text_size ({text_size.width()}/{text_size.height()}) margin size ({button_margin}) frame width ({button_default_frame}) and default indicator width ({default_indicator})") + log.info(f"Calculated button {button_widget.text()} minimum size ({min_size.width()}/{min_size.height()}) with text_size ({text_size.width()}/{text_size.height()}) margin size ({button_margin}) frame width ({button_default_frame}) and default indicator width ({default_indicator})") return min_size @@ -243,7 +243,7 @@ def __init__(self, parent, allowed_values, index, num_lines): if checkbox.width() > max_checkbox_width: max_checkbox_width = checkbox.width() - msgs.info(f"Max checkbox width: {max_checkbox_width}") + log.info(f"Max checkbox width: {max_checkbox_width}") scroll_area.setWidget(checkbox_container) # Figure out the minimum width @@ -281,7 +281,7 @@ def __init__(self, parent, allowed_values, index, num_lines): button_min_width = max(ok_button_min_size.width(), cancel_button_min_size.width()) ok_cancel_container_min_width = button_min_width*2 + ok_cancel_layout.spacing() + ok_cancel_layout_margins.left() + ok_cancel_layout_margins.right() - msgs.info(f"Okay cancel container min_width: {ok_cancel_container_min_width}") + log.info(f"Okay cancel container min_width: {ok_cancel_container_min_width}") if min_width < ok_cancel_container_min_width: min_width = ok_cancel_container_min_width @@ -303,7 +303,7 @@ def __init__(self, parent, allowed_values, index, num_lines): self.setMinimumSize(min_width, min_height) self._button_group.buttonToggled.connect(self._choiceChecked) - msgs.info(f"min_width/height: {min_width}/{min_height}") + log.info(f"min_width/height: {min_width}/{min_height}") debugSizeStuff(self, "Enum Editor") debugSizeStuff(checkbox_container, "Checkbox Container") debugSizeStuff(ok_cancel_container, "OK/Cancel Container") @@ -415,12 +415,12 @@ def createEditor(self, parent, option, index): column_name = model.getColumnNameFromNum(index) if column_name == "frametype": - msgs.info("Creating enum list editor for frametype") + log.info("Creating enum list editor for frametype") editor= PypeItEnumListEditor(parent=parent, index=index, num_lines=5, allowed_values=model.getAllFrameTypes()) editor.closed.connect(self.editorClosed) return editor - msgs.info(f"Creating default editor for {column_name}") + log.info(f"Creating default editor for {column_name}") return super().createEditor(parent, option, index) def setEditorData(self, editor, index): @@ -431,10 +431,10 @@ def setEditorData(self, editor, index): index (QModelIndex): The index of the item being edited. """ if isinstance(editor, PypeItEnumListEditor): - msgs.info(f"Setting editor data {index.data(Qt.EditRole)}") + log.info(f"Setting editor data {index.data(Qt.EditRole)}") editor.setSelectedValues(index.data(Qt.EditRole)) else: - msgs.info("Setting default editor data") + log.info("Setting default editor data") super().setEditorData(editor, index) def setModelData(self,editor,model,index): @@ -446,10 +446,10 @@ def setModelData(self,editor,model,index): index (QModelIndex): The index of the item being edited. """ if isinstance(editor, PypeItEnumListEditor): - msgs.info(f"Setting choice model data: {editor.selectedValues()}") + log.info(f"Setting choice model data: {editor.selectedValues()}") model.setData(index, editor.selectedValues()) else: - msgs.info("Setting default model data") + log.info("Setting default model data") super().setModelData(editor,model,index) def updateEditorGeometry(self, editor, option, index): @@ -468,9 +468,9 @@ def updateEditorGeometry(self, editor, option, index): parent_geometry = editor.parent().geometry() editor_min_size = editor.minimumSize() - msgs.info(f"Given rect: {(option.rect.x(), option.rect.y(), option.rect.width(), option.rect.height())}") - msgs.info(f"parent_geometry: {(parent_geometry.x(), parent_geometry.y(), parent_geometry.width(), parent_geometry.height())}") - msgs.info(f"editor min size: {editor_min_size.width()}, {editor_min_size.height()}") + log.info(f"Given rect: {(option.rect.x(), option.rect.y(), option.rect.width(), option.rect.height())}") + log.info(f"parent_geometry: {(parent_geometry.x(), parent_geometry.y(), parent_geometry.width(), parent_geometry.height())}") + log.info(f"editor min size: {editor_min_size.width()}, {editor_min_size.height()}") editor_x = option.rect.x() editor_y = option.rect.y() @@ -502,7 +502,7 @@ def updateEditorGeometry(self, editor, option, index): geometry = QRect(editor_x, editor_y, editor_width, editor_min_size.height()) - msgs.info(f"Updating editor geometry to {(geometry.x(), geometry.y(), geometry.width(), geometry.height())}") + log.info(f"Updating editor geometry to {(geometry.x(), geometry.y(), geometry.width(), geometry.height())}") editor.setGeometry(geometry) else: super().updateEditorGeometry(editor, option, index) @@ -554,8 +554,8 @@ def __init__(self, parent, model, controller): min_height = (self.contentsMargins().top() + self.contentsMargins().bottom() + self.horizontalScrollBar().sizeHint().height() + 11*row_height) - msgs.info(f"current min_height/height/hint h: {self.minimumHeight()}/{self.height()}/{self.sizeHint().height()}, scrollbar hint h {self.horizontalScrollBar().sizeHint().height()}, currentmargin top/bottom: {self.contentsMargins().top()}/{self.contentsMargins().bottom()} hdr min_height/height/hint h: {self.horizontalHeader().minimumHeight()}/{self.horizontalHeader().height()}/{self.horizontalHeader().sizeHint().height()}") - msgs.info(f"rowHeight: {row_height} current min_height {self.minimumHeight()} new min_height {min_height}") + log.info(f"current min_height/height/hint h: {self.minimumHeight()}/{self.height()}/{self.sizeHint().height()}, scrollbar hint h {self.horizontalScrollBar().sizeHint().height()}, currentmargin top/bottom: {self.contentsMargins().top()}/{self.contentsMargins().bottom()} hdr min_height/height/hint h: {self.horizontalHeader().minimumHeight()}/{self.horizontalHeader().height()}/{self.horizontalHeader().sizeHint().height()}") + log.info(f"rowHeight: {row_height} current min_height {self.minimumHeight()} new min_height {min_height}") if min_height > self.minimumHeight(): self.setMinimumHeight(min_height) @@ -636,9 +636,9 @@ def _handleModelReset(self): Fix column and row sizes after the model is reset. """ colCount = self.model().columnCount() - msgs.info(f"# Cols: {colCount}") + log.info(f"# Cols: {colCount}") colSizeHints = [self.sizeHintForColumn(i) for i in range(colCount)] - msgs.info(f"Col size hints: {colSizeHints}") + log.info(f"Col size hints: {colSizeHints}") self.resizeColumnsToContents() self.resizeRowsToContents() @@ -773,7 +773,7 @@ def __init__(self, spec_name, config, lines_to_display, parent=None): # Figure out the correct height for this panel, so that only the spectrograph and self.number_of_lines # config keys are visible - msgs.info(f"font height: {fm.height()} vertical spacing {self._form_widget_layout.verticalSpacing()}") + log.info(f"font height: {fm.height()} vertical spacing {self._form_widget_layout.verticalSpacing()}") self.setMaximumHeight(self.computeHeight(max(self.lines_to_display, len(self._config_labels)))) @@ -790,7 +790,7 @@ def computeHeight(self, lines_to_display:int) ->int: if verticalSpacing == -1: verticalSpacing = fm.leading() self._form_widget_layout.setVerticalSpacing(fm.leading()) - msgs.info(f"Set vertical spacing to {verticalSpacing}") + log.info(f"Set vertical spacing to {verticalSpacing}") min_fw_height = (verticalSpacing)*(lines_to_display-1) + fm.height()*lines_to_display # The height of this panel is that height plus the margins + the group box title @@ -799,19 +799,19 @@ def computeHeight(self, lines_to_display:int) ->int: form_widget_margins = self._form_widget.contentsMargins() layout_margins = self.layout().contentsMargins() - msgs.info(f"verticalSpacing: {self._form_widget_layout.verticalSpacing()}") - msgs.info(f"fontMetrics height/leading: {fm.height()}/{fm.leading()}") - msgs.info(f"group_box_margins (t/b) ({group_box_margins.top()}/{group_box_margins.bottom()})") - msgs.info(f"scroll_area_margins (t/b) ({scroll_area_margins.top()}/{scroll_area_margins.bottom()})") - msgs.info(f"layout_margins (t/b) ({layout_margins.top()}/{layout_margins.bottom()})") - msgs.info(f"form_widget_margins (t/b) ({form_widget_margins.top()}/{form_widget_margins.bottom()})") + log.info(f"verticalSpacing: {self._form_widget_layout.verticalSpacing()}") + log.info(f"fontMetrics height/leading: {fm.height()}/{fm.leading()}") + log.info(f"group_box_margins (t/b) ({group_box_margins.top()}/{group_box_margins.bottom()})") + log.info(f"scroll_area_margins (t/b) ({scroll_area_margins.top()}/{scroll_area_margins.bottom()})") + log.info(f"layout_margins (t/b) ({layout_margins.top()}/{layout_margins.bottom()})") + log.info(f"form_widget_margins (t/b) ({form_widget_margins.top()}/{form_widget_margins.bottom()})") computedHeight = (min_fw_height + # fm.height() + # Group Box Title group_box_margins.top() + group_box_margins.bottom() + scroll_area_margins.top() + scroll_area_margins.bottom() + layout_margins.top() + layout_margins.bottom() + form_widget_margins.top() + form_widget_margins.bottom()) - msgs.info(f"computedHeight: {computedHeight}") + log.info(f"computedHeight: {computedHeight}") return computedHeight def setNewValues(self, config_dict: dict) -> None: @@ -862,7 +862,7 @@ def _getMinWidth(self) -> int: if len(self._config_labels) > self.lines_to_display: if self._scroll_area.verticalScrollBar(): min_width += self._scroll_area.verticalScrollBar().sizeHint().width() - msgs.info(f"new minWidth: {min_width} max key width: {max_key_width} max_value width {max_value_width} horizontal spacing {self._form_widget_layout.horizontalSpacing()} margins left: {margins.left()} margins right: {margins.right()}") + log.info(f"new minWidth: {min_width} max key width: {max_key_width} max_value width {max_value_width} horizontal spacing {self._form_widget_layout.horizontalSpacing()} margins left: {margins.left()} margins right: {margins.right()}") return min_width class TabManagerBaseTab(QWidget): @@ -1044,7 +1044,7 @@ def __init__(self, model, controller): self.model.stateChanged.connect(self.update_from_model) debugSizeStuff(self.config_panel,"Config Panel") - msgs.info(f"config panel flat: {self.config_panel.isFlat()}") + log.info(f"config panel flat: {self.config_panel.isFlat()}") def update_from_model(self): """ @@ -1222,7 +1222,7 @@ def __init__(self, model, controller, parent=None): def _deletePaths(self, parent): """Signal handler that removes raw data paths from the obslog""" - msgs.info(f"Delete selection") + log.info(f"Delete selection") selection = self._paths_viewer.selectedIndexes() rows = [index.row() for index in selection] self._controller.removePaths(rows) @@ -1236,7 +1236,7 @@ def setModel(self,model): self.model=model if model.spec_name is not None: self.spectrograph.setCurrentIndex(self.spectrograph.findText(model.spec_name)) - msgs.info(f"Set current text to {model.spec_name}, current index {self.spectrograph.currentIndex()}") + log.info(f"Set current text to {model.spec_name}, current index {self.spectrograph.currentIndex()}") self.update_raw_data_paths_state() self.obslog_table.setModel(model.metadata_model) self._controller.setModel(model) @@ -1370,7 +1370,7 @@ def addNewTab(self, tab): """ index = self.count()-1 index=self.insertTab(index, tab, tab.name) - msgs.info(f"Added {tab.name} at index {index}") + log.info(f"Added {tab.name} at index {index}") self._tabNames.insert(index,tab.name) self.updateTabText(tab.name,tab.state) if tab.closeable: @@ -1388,7 +1388,7 @@ def closeTab(self, tab_name): try: index = self._tabNames.index(tab_name) except ValueError : - msgs.warning(f"Failed to find tab named {tab_name} in list.") + log.warning(f"Failed to find tab named {tab_name} in list.") return tab = self.widget(index) if tab.closeable: @@ -1408,7 +1408,7 @@ def updateTabText(self, tab_name, tab_state): try: index = self._tabNames.index(tab_name) except ValueError : - msgs.warning(f"Failed to find tab named {tab_name} in list.") + log.warning(f"Failed to find tab named {tab_name} in list.") return tab = self.widget(index) @@ -1507,7 +1507,7 @@ def create_progress_dialog(self, op_caption, max_progress_value, cancel_func): cancel_func (:class:`collections.abc.Callable`): A callable to deal with cancel being pressed in the progress dialog. """ - msgs.info(f"Starting operation {op_caption} max progress: {max_progress_value}") + log.info(f"Starting operation {op_caption} max progress: {max_progress_value}") self.current_op_progress_dialog = QProgressDialog(self.tr(op_caption), self.tr("Cancel"), 0, max_progress_value, parent=self) self.current_op_progress_dialog.setMinimumWidth(380) self.current_op_progress_dialog.setWindowTitle(op_caption) @@ -1523,9 +1523,9 @@ def show_operation_progress(self, increase, message=None): increase (int): How much to increase the current progress by. message (str, Optional): A message indicating what step has been performed. """ - msgs.info(f"dialog is none {self.current_op_progress_dialog is None}") + log.info(f"dialog is none {self.current_op_progress_dialog is None}") if self.current_op_progress_dialog is not None: - msgs.info(f"increase {increase} message{message} current value {self.current_op_progress_dialog.value()}") + log.info(f"increase {increase} message{message} current value {self.current_op_progress_dialog.value()}") self.current_op_progress_dialog.setValue(self.current_op_progress_dialog.value() + increase) if message is not None: self.current_op_progress_dialog.setLabelText(message) @@ -1534,7 +1534,7 @@ def operation_complete(self): """ Stop displaying progress for an operation because it has completed.. """ - msgs.info(f"Ending operation, dialog is none {self.current_op_progress_dialog is None}") + log.info(f"Ending operation, dialog is none {self.current_op_progress_dialog is None}") if self.current_op_progress_dialog is not None: self.current_op_progress_dialog.done(QDialog.Accepted) self.current_op_progress_dialog = None @@ -1553,7 +1553,7 @@ def update_setup_button(self): data directories have been selected.""" # Setup can only be run if the spectrograph is set and there's at least one # raw data directory - msgs.info(f"Checking setup button status spec: {self.model.obslog_model.spec_name} dirs {self.model.obslog_model.raw_data_directories}") + log.info(f"Checking setup button status spec: {self.model.obslog_model.spec_name} dirs {self.model.obslog_model.raw_data_directories}") if (self.model.obslog_model.spec_name is not None and len(self.model.obslog_model.raw_data_directories) > 0): self.setupButton.setEnabled(True) @@ -1591,9 +1591,9 @@ def _helpButton(self): result = QDesktopServices.openUrl(QUrl("https://pypeit.readthedocs.io/en/latest/")) if result: - msgs.info("Opened PypeIT docs.") + log.info("Opened PypeIT docs.") else: - msgs.warning("Failed to open PypeIt docs at 'https://pypeit.readthedocs.io/en/latest/'") + log.warning("Failed to open PypeIt docs at 'https://pypeit.readthedocs.io/en/latest/'") def _create_button_box(self): """Create the box with action buttons. @@ -1685,7 +1685,7 @@ def create_file_tabs(self, pypeit_file_models): Args: pypeit_file_models (list of :class:`pypeit.setup_gui.model.PypeItFileModel`): Models for the tabs to add. """ - msgs.info(f"create_file_tabs for {len(pypeit_file_models)} unique configs") + log.info(f"create_file_tabs for {len(pypeit_file_models)} unique configs") try: self.tab_widget.setUpdatesEnabled(False) # To prevent flickering when updating for model in pypeit_file_models: @@ -1701,7 +1701,7 @@ def delete_tabs(self, file_list): Args: tab_list (list of str): List of the configuration names removed. """ - msgs.info(f"View Deleting tabs {file_list}") + log.info(f"View Deleting tabs {file_list}") if len(file_list) == 0: return diff --git a/pypeit/slittrace.py b/pypeit/slittrace.py index bbec5ba012..70d4424df4 100644 --- a/pypeit/slittrace.py +++ b/pypeit/slittrace.py @@ -18,7 +18,7 @@ from astropy.io import fits from pypeit import PypeItBitMaskError -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import datamodel from pypeit import calibframe @@ -533,7 +533,7 @@ def get_radec_image(self, wcs, alignSplines, tilts, slit_compute=None, slice_off # Prepare the print out substring = '' if slice_offset is None else f' with slice_offset={slice_offset:.3f}' - msgs.info("Generating an RA/DEC image"+substring) + log.info("Generating an RA/DEC image"+substring) # Check the input if slice_offset is None: slice_offset = 0.0 @@ -773,7 +773,7 @@ def spatial_coordinate_image(self, slitidx=None, full=False, slitid_img=None, if np.any(indx[:,_slitidx]): bad_slits = np.where(np.any(indx, axis=0))[0] # TODO: Shouldn't this fault? - msgs.warning('Slits {0} have negative (or 0) slit width!'.format(bad_slits)) + log.warning('Slits {0} have negative (or 0) slit width!'.format(bad_slits)) # Output image coo_img = np.zeros((self.nspec,self.nspat), dtype=float) @@ -861,7 +861,7 @@ def mask_add_missing_obj(self, sobjs, spat_flexure, fwhm, boxcar_rad): been found and traced """ - msgs.info('Add undetected objects at the expected location from slitmask design.') + log.info('Add undetected objects at the expected location from slitmask design.') if fwhm is None: raise PypeItError('A FWHM for the optimal extraction must be provided. See `find_fwhm` in ' @@ -909,7 +909,7 @@ def mask_add_missing_obj(self, sobjs, spat_flexure, fwhm, boxcar_rad): # If we keep what follows, probably should add some tolerance to be off the edge # Otherwise things break in skysub if (SPAT_PIXPOS > right_tweak[specmid, islit]) or (SPAT_PIXPOS < left_tweak[specmid, islit]): - msgs.warning("Targeted object is off the detector") + log.warning("Targeted object is off the detector") continue # Generate a new specobj @@ -1023,13 +1023,13 @@ def assign_maskinfo(self, sobjs, plate_scale, spat_flexure, TOLER=1.): on_det = (sobjs.DET == self.detname) & (sobjs.OBJID > 0) # use only positive detections cut_sobjs = sobjs[on_det] if cut_sobjs.nobj == 0: - msgs.warning('NO detected objects.') + log.warning('NO detected objects.') return sobjs else: - msgs.warning('NO detected objects.') + log.warning('NO detected objects.') return sobjs - msgs.info('Assign slitmask design info to detected objects. ' + log.info('Assign slitmask design info to detected objects. ' 'Matching tolerance includes user-provided tolerance, slit tracing uncertainties and object size.') # get slits edges init @@ -1076,8 +1076,8 @@ def assign_maskinfo(self, sobjs, plate_scale, spat_flexure, TOLER=1.): # Within TOLER? # separation in pixels separ = measured[idx] - (expected[idx] + self.maskdef_offset) - msgs.info('MASKDEF_ID:{}'.format(maskid)) - msgs.info('Difference between expected and detected object ' + log.info('MASKDEF_ID:{}'.format(maskid)) + log.info('Difference between expected and detected object ' 'positions: {} arcsec'.format(np.round(separ*plate_scale, 2))) # we include in the tolerance the rms of the slit edges matching and the size of # the detected object with the highest peak flux @@ -1262,13 +1262,13 @@ def get_maskdef_offset(self, sobjs, platescale, spat_flexure, slitmask_off, brig # If slitmask offset provided by the user, just save it and return if slitmask_off is not None: self.maskdef_offset = slitmask_off - msgs.info('User-provided slitmask offset: {} pixels ({} arcsec)'.format(round(self.maskdef_offset, 2), + log.info('User-provided slitmask offset: {} pixels ({} arcsec)'.format(round(self.maskdef_offset, 2), round(self.maskdef_offset*platescale, 2))) return # If using the dither offeset recorde in the header, just save it and return if dither_off is not None: self.maskdef_offset = -dither_off/platescale - msgs.info('Slitmask offset from the dither pattern: {} pixels ({} arcsec)'. + log.info('Slitmask offset from the dither pattern: {} pixels ({} arcsec)'. format(round(self.maskdef_offset, 2), round(self.maskdef_offset*platescale, 2))) return @@ -1277,12 +1277,12 @@ def get_maskdef_offset(self, sobjs, platescale, spat_flexure, slitmask_off, brig on_det = (sobjs.DET == self.detname) & (sobjs.OBJID > 0) # use only positive detections cut_sobjs = sobjs[on_det] if cut_sobjs.nobj == 0: - msgs.warning('NO detected objects. Slitmask offset cannot be estimated in ' + log.warning('NO detected objects. Slitmask offset cannot be estimated in ' f'{self.detname}.') self.maskdef_offset = 0.0 return else: - msgs.warning('NO detected objects. Slitmask offset cannot be estimated in ' + log.warning('NO detected objects. Slitmask offset cannot be estimated in ' f'{self.detname}.') self.maskdef_offset = 0.0 return @@ -1341,12 +1341,12 @@ def get_maskdef_offset(self, sobjs, platescale, spat_flexure, slitmask_off, brig if align_offs.size > 0: mean, median_off, std = sigma_clipped_stats(align_offs, sigma=2.) self.maskdef_offset = median_off - msgs.info(f'Slitmask offset estimated using ALIGN BOXES in {self.detname}: ' + log.info(f'Slitmask offset estimated using ALIGN BOXES in {self.detname}: ' f'{round(self.maskdef_offset, 2)} pixels (' f'{round(self.maskdef_offset*platescale, 2)} arcsec).') else: self.maskdef_offset = 0.0 - msgs.info('NO objects detected in ALIGN BOXES. Slitmask offset ' + log.info('NO objects detected in ALIGN BOXES. Slitmask offset ' f'cannot be estimated in {self.detname}.') return @@ -1357,7 +1357,7 @@ def get_maskdef_offset(self, sobjs, platescale, spat_flexure, slitmask_off, brig sidx = np.where(cut_sobjs.MASKDEF_ID == bright_maskdefid)[0] if sidx.size == 0: self.maskdef_offset = 0.0 - msgs.info(f'Object in slit {bright_maskdefid} not detected. Slitmask offset ' + log.info(f'Object in slit {bright_maskdefid} not detected. Slitmask offset ' f'cannot be estimated in {self.detname}.') else: # Parse the peak fluxes @@ -1367,7 +1367,7 @@ def get_maskdef_offset(self, sobjs, platescale, spat_flexure, slitmask_off, brig bright_measured = measured[imx_sidx] bright_expected = expected[imx_sidx] self.maskdef_offset = bright_measured - bright_expected - msgs.info('Slitmask offset computed using bright object in slit ' + log.info('Slitmask offset computed using bright object in slit ' f'{bright_maskdefid} ({self.detname}): ' f'{round(self.maskdef_offset, 2)} pixels (' f'{round(self.maskdef_offset*platescale, 2)} arcsec)') @@ -1385,15 +1385,15 @@ def get_maskdef_offset(self, sobjs, platescale, spat_flexure, slitmask_off, brig off = highsnr_measured - highsnr_expected mean, median_off, std = sigma_clipped_stats(off, sigma=2.) self.maskdef_offset = median_off - msgs.info(f'Slitmask offset estimated in {self.detname}: ' + log.info(f'Slitmask offset estimated in {self.detname}: ' f'{round(self.maskdef_offset, 2)} pixels (' f'{round(self.maskdef_offset*platescale, 2)} arcsec)') else: - msgs.warning(f'Less than 3 objects detected above {snr_thrshd} sigma threshold. ' + log.warning(f'Less than 3 objects detected above {snr_thrshd} sigma threshold. ' f'Slitmask offset cannot be estimated in {self.detname}.') self.maskdef_offset = 0.0 else: - msgs.warning(f'Less than 3 objects detected above {snr_thrshd} sigma threshold. ' + log.warning(f'Less than 3 objects detected above {snr_thrshd} sigma threshold. ' f'Slitmask offset cannot be estimated in {self.detname}.') self.maskdef_offset = 0.0 @@ -1424,10 +1424,10 @@ def get_maskdef_extract_fwhm(self, sobjs, platescale, fwhm_parset, find_fwhm): :obj:`float`: FWHM in pixels to be used in the optimal extraction """ - msgs.info('Determining the FWHM to be used for the optimal extraction of `maskdef_extract` objects') + log.info('Determining the FWHM to be used for the optimal extraction of `maskdef_extract` objects') fwhm = None if fwhm_parset is not None: - msgs.info(f'Using user-provided FWHM = {fwhm_parset}"') + log.info(f'Using user-provided FWHM = {fwhm_parset}"') fwhm = fwhm_parset/platescale elif sobjs.nobj > 0: # Use average FWHM of detected objects, but remove the objects in the alignment boxes @@ -1444,10 +1444,10 @@ def get_maskdef_extract_fwhm(self, sobjs, platescale, fwhm_parset, find_fwhm): if all_fwhm.size > 0: # compute median _, fwhm, _ = sigma_clipped_stats(all_fwhm, sigma=2.) - msgs.info('Using median FWHM = {:.3f}" from detected objects.'.format(fwhm*platescale)) + log.info('Using median FWHM = {:.3f}" from detected objects.'.format(fwhm*platescale)) if fwhm is None: fwhm = find_fwhm - msgs.warning('The median FWHM cannot be determined because no objects were detected. ' + log.warning('The median FWHM cannot be determined because no objects were detected. ' 'Using `find_fwhm` = {:.3f}". if the user wants to provide a value ' 'set parameter `missing_objs_fwhm` in `SlitMaskPar`'.format(fwhm*platescale)) @@ -1617,7 +1617,7 @@ def average_maskdef_offset(calib_slits, platescale, list_detectors): calib_slits = np.array(calib_slits) if list_detectors is None: - msgs.warning('No average slitmask offset computed') + log.warning('No average slitmask offset computed') return calib_slits # unpack list_detectors @@ -1636,10 +1636,10 @@ def average_maskdef_offset(calib_slits, platescale, list_detectors): if slitmask_offsets.size == 0: # If all detectors have maskdef_offset=0 give a warning - msgs.warning('No slitmask offset could be measured. Assumed to be zero. ') - msgs.warning('RA, DEC, OBJNAME assignment and forced extraction of undetected objects MAY BE WRONG! ' + log.warning('No slitmask offset could be measured. Assumed to be zero. ') + log.warning('RA, DEC, OBJNAME assignment and forced extraction of undetected objects MAY BE WRONG! ' 'Especially for dithered observations!') - msgs.warning('To provide a value set `slitmask_offset` in `SlitMaskPar`') + log.warning('To provide a value set `slitmask_offset` in `SlitMaskPar`') return calib_slits @@ -1653,7 +1653,7 @@ def average_maskdef_offset(calib_slits, platescale, list_detectors): for cs in calib_slits: # assign median to each det cs.maskdef_offset = median_off - msgs.info('Average Slitmask offset: {:.2f} pixels ({:.2f} arcsec).'.format(median_off, median_off * platescale)) + log.info('Average Slitmask offset: {:.2f} pixels ({:.2f} arcsec).'.format(median_off, median_off * platescale)) return calib_slits @@ -1664,7 +1664,7 @@ def average_maskdef_offset(calib_slits, platescale, list_detectors): if cs.detname in spectrograph_dets[0]: # assign median to each blue det cs.maskdef_offset = median_off - msgs.info('Average Slitmask offset for the blue detectors: ' + log.info('Average Slitmask offset for the blue detectors: ' '{:.2f} pixels ({:.2f} arcsec).'.format(median_off, median_off * platescale)) # which dets from calib_slits are red? @@ -1678,7 +1678,7 @@ def average_maskdef_offset(calib_slits, platescale, list_detectors): for cs in calib_slits: if cs.detname in spectrograph_dets[1]: cs.maskdef_offset = median_off - msgs.info('Average Slitmask offset for the red detectors: ' + log.info('Average Slitmask offset for the red detectors: ' '{:.2f} pixels ({:.2f} arcsec).'.format(median_off, median_off * platescale)) return calib_slits @@ -1711,7 +1711,7 @@ def assign_addobjs_alldets(sobjs, calib_slits, spat_flexure, platescale, slitmas # grab corresponding detectors calib_dets = np.array([ss.detname for ss in calib_slits]) for i in range(calib_dets.size): - msgs.info('DET: {}'.format(calib_dets[i])) + log.info('DET: {}'.format(calib_dets[i])) # Assign RA,DEC, OBJNAME to detected objects and add undetected objects if calib_slits[i].maskdef_designtab is not None: # Assign slitmask design information to detected objects diff --git a/pypeit/spec2dobj.py b/pypeit/spec2dobj.py index 113b967c7d..7e68a3f540 100644 --- a/pypeit/spec2dobj.py +++ b/pypeit/spec2dobj.py @@ -18,7 +18,7 @@ from astropy.stats import mad_std from astropy import table -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import io from pypeit import datamodel @@ -525,7 +525,7 @@ def __setitem__(self, item, value): if not isinstance(value, Spec2DObj): raise KeyError('Any item not assigned to the meta dictionary must be a Spec2DObj.') if value.detname is not None and value.detname != item: - msgs.warning(f'Mismatch between keyword used to define the Spec2DObj item ({item}) ' + log.warning(f'Mismatch between keyword used to define the Spec2DObj item ({item}) ' f'and the name of the detector/mosaic ({value.detname}).') self.__dict__[item] = value @@ -638,7 +638,7 @@ def write_to_fits(self, outfile, pri_hdr=None, update_det=None, if _outfile.exists(): # Clobber? if not overwrite: - msgs.warning(f'File {_outfile} exits. Use -o to overwrite.') + log.warning(f'File {_outfile} exits. Use -o to overwrite.') return # Load up the original _allspecobj = AllSpec2DObj.from_fits(_outfile) @@ -691,10 +691,10 @@ def write_to_fits(self, outfile, pri_hdr=None, update_det=None, try: prihdu.header[self.hdr_prefix+key.upper()] = self['meta'][key] except: - msgs.warning(f'Cannot add meta entry {key} to primary header!') + log.warning(f'Cannot add meta entry {key} to primary header!') continue if key.lower() != key: - msgs.warning('Keywords in the meta dictionary are always read back in as lower case. ' + log.warning('Keywords in the meta dictionary are always read back in as lower case. ' f'Subsequent reads of {_outfile} will have converted {key} to ' f'{key.lower()}!') @@ -718,7 +718,7 @@ def write_to_fits(self, outfile, pri_hdr=None, update_det=None, # Finish hdulist = fits.HDUList(hdus) hdulist.writeto(_outfile, overwrite=overwrite) - msgs.info(f'Wrote: {_outfile}') + log.info(f'Wrote: {_outfile}') def __repr__(self): # Generate sets string diff --git a/pypeit/specobj.py b/pypeit/specobj.py index ae7213bc4f..59e83d1101 100644 --- a/pypeit/specobj.py +++ b/pypeit/specobj.py @@ -15,7 +15,7 @@ from linetools.spectra import xspectrum1d -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit.core import flexure from pypeit.core import flux_calib @@ -325,7 +325,7 @@ def to_hdu(self, **kwargs): set to True. """ if 'force_to_bintbl' in kwargs and not kwargs['force_to_bintbl']: - msgs.warning(f'Writing a {self.__class__.__name__} always requires force_to_bintbl=True') + log.warning(f'Writing a {self.__class__.__name__} always requires force_to_bintbl=True') del kwargs['force_to_bintbl'] return super().to_hdu(force_to_bintbl=True, **kwargs) @@ -525,7 +525,7 @@ def apply_spectral_flexure(self, shift, sky_spec): # Apply for attr in ['BOX', 'OPT']: if self[attr+'_WAVE'] is not None: - msgs.info( + log.info( f"Applying flexure correction to {attr:s} extraction for object:\n{self.NAME}" ) self[attr+'_WAVE'] = flexure.flexure_interp(shift, self[attr+'_WAVE']).copy() @@ -589,7 +589,7 @@ def apply_flux_calib(self, wave_zp, zeropoint, exptime, tellmodel=None, extinct_ for attr in ['BOX', 'OPT']: if self[attr+'_WAVE'] is None: continue - msgs.info(f"Fluxing {attr} extraction for:\n{self}") + log.info(f"Fluxing {attr} extraction for:\n{self}") wave = self[attr+'_WAVE'] # Interpolate the sensitivity function onto the wavelength grid of the data @@ -607,7 +607,7 @@ def apply_flux_calib(self, wave_zp, zeropoint, exptime, tellmodel=None, extinct_ flam_ivar = self[attr+'_COUNTS_IVAR']/sens_factor**2 # Mask bad pixels - msgs.info(" Masking bad pixels") + log.info(" Masking bad pixels") msk = np.zeros_like(sens_factor).astype(bool) msk[sens_factor <= 0.] = True msk[self[attr+'_COUNTS_IVAR'] <= 0.] = True @@ -637,7 +637,7 @@ def apply_helio(self, vel_corr, refframe): # Apply for attr in ['BOX', 'OPT']: if self[attr+'_WAVE'] is not None: - msgs.info( + log.info( f'Applying {refframe} correction to {attr} extraction for object:\n{self.NAME}' ) self[attr+'_WAVE'] *= vel_corr @@ -717,8 +717,8 @@ def ready_for_extraction(self): passed = True for key in required: if self[key] is None: - msgs.warning("Item {} is missing from SpecObj. Failing vette".format(key)) - msgs.warning('{}'.format(self)) + log.warning("Item {} is missing from SpecObj. Failing vette".format(key)) + log.warning('{}'.format(self)) passed = False # return passed diff --git a/pypeit/specobjs.py b/pypeit/specobjs.py index 81cc7846f0..7f80037ef4 100644 --- a/pypeit/specobjs.py +++ b/pypeit/specobjs.py @@ -18,7 +18,7 @@ from astropy.table import Table from astropy.time import Time -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import specobj from pypeit import io @@ -250,7 +250,7 @@ def unpack_object(self, ret_flam=False, log10blaze=False, min_blaze_value=1e-3, raise PypeItError(msg) else: msg += f"\n-- The missing data will be removed --" - msgs.warning(msg) + log.warning(msg) # Remove missing data r_indx = np.where(none_flux)[0] self.remove_sobj(r_indx) @@ -407,7 +407,7 @@ def append_neg(self, sobjs_neg): """ if sobjs_neg.nobj == 0: - msgs.warning("No negative objects found...") + log.warning("No negative objects found...") return # Assign the sign and the objids sobjs_neg.sign = -1.0 @@ -684,7 +684,7 @@ def apply_flux_calib(self, par, spectrograph, sens, tell=False): extinct_file=par['extinct_file'], airmass=float(self.header['AIRMASS'])) elif indx.size == 0: - msgs.info('Unable to flux calibrate order = {:} as it is not in your sensitivity function. ' + log.info('Unable to flux calibrate order = {:} as it is not in your sensitivity function. ' 'Something is probably wrong with your sensitivity function.'.format(sci_obj.ECH_ORDER)) else: raise PypeItError('This should not happen') @@ -800,7 +800,7 @@ def write_to_fits(self, subheader, outfile, overwrite=True, update_det=None, If True, run in debug mode. """ if os.path.isfile(outfile) and not overwrite: - msgs.warning(f'{outfile} exits. Set overwrite=True to overwrite it.') + log.warning(f'{outfile} exits. Set overwrite=True to overwrite it.') return # If the file exists and update_det (and slit_spat_num) is provided, use the existing header @@ -914,7 +914,7 @@ def write_to_fits(self, subheader, outfile, overwrite=True, update_det=None, #embed() #exit() hdulist.writeto(outfile, overwrite=overwrite) - msgs.info(f'Wrote 1D spectra to {outfile}') + log.info(f'Wrote 1D spectra to {outfile}') def write_info(self, outfile, pypeline): """ diff --git a/pypeit/spectrographs/aat_uhrf.py b/pypeit/spectrographs/aat_uhrf.py index d5138dd3a9..1823aa1748 100644 --- a/pypeit/spectrographs/aat_uhrf.py +++ b/pypeit/spectrographs/aat_uhrf.py @@ -11,7 +11,7 @@ from astropy.time import Time -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch @@ -228,7 +228,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def config_specific_par(self, scifile, inp_par=None): @@ -251,7 +251,7 @@ def config_specific_par(self, scifile, inp_par=None): par = super().config_specific_par(scifile, inp_par=inp_par) if par['calibrations']['wavelengths']['reid_arxiv'] is None: - msgs.warning( + log.warning( "Wavelength setup not supported!\n\n" "Please perform your own wavelength calibration, and provide the path+filename " "using the reid_arxiv parameter." diff --git a/pypeit/spectrographs/apf_levy.py b/pypeit/spectrographs/apf_levy.py index 4c2ed79ad6..a38d968c2e 100644 --- a/pypeit/spectrographs/apf_levy.py +++ b/pypeit/spectrographs/apf_levy.py @@ -10,7 +10,7 @@ from astropy.time import Time from IPython import embed -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit import io @@ -329,7 +329,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['pinhole']: return good_exp & (fitstbl['idname'] == 'NarrowFlat') & (fitstbl['decker'] == 'Pinhole') - msgs.debug(f'Cannot determine if frames are of type {ftype}.') + log.debug(f'Cannot determine if frames are of type {ftype}.') return np.zeros(len(fitstbl), dtype=bool) def is_science(self, fitstbl): diff --git a/pypeit/spectrographs/bok_bc.py b/pypeit/spectrographs/bok_bc.py index f89bf3618e..6b2cced24e 100644 --- a/pypeit/spectrographs/bok_bc.py +++ b/pypeit/spectrographs/bok_bc.py @@ -7,7 +7,7 @@ from astropy.time import Time -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit import io @@ -306,7 +306,7 @@ def bpm(self, filename, det, shape=None, msbias=None): bpm_img = super().bpm(filename, det, shape=shape, msbias=msbias) if det == 1: - msgs.info("Using hard-coded BPM for Bok B&C") + log.info("Using hard-coded BPM for Bok B&C") bpm_img[:, -1] = 1 @@ -387,7 +387,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return np.zeros(len(fitstbl), dtype=bool) if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['lampstat01'] != 'off') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/spectrographs/gemini_flamingos.py b/pypeit/spectrographs/gemini_flamingos.py index c7a960b2dd..cde806fc82 100644 --- a/pypeit/spectrographs/gemini_flamingos.py +++ b/pypeit/spectrographs/gemini_flamingos.py @@ -5,7 +5,7 @@ """ import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch @@ -207,7 +207,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & (fitstbl['idname'] == 'OBJECT') if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['idname'] == 'OBJECT') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @@ -338,6 +338,6 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & (fitstbl['idname'] == 'Science') if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['idname'] == 'Arc') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/spectrographs/gemini_gmos.py b/pypeit/spectrographs/gemini_gmos.py index 5427323730..5a70ea7870 100644 --- a/pypeit/spectrographs/gemini_gmos.py +++ b/pypeit/spectrographs/gemini_gmos.py @@ -13,7 +13,7 @@ from astropy.wcs import wcs from astropy.io import fits -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit.spectrographs import spectrograph from pypeit import telescopes @@ -183,7 +183,7 @@ def compound_meta(self, headarr, meta_key): if obsepoch is not None: return time.Time(obsepoch, format='jyear').mjd else: - msgs.warning('OBSEPOCH header keyword not found. Using today as the date.') + log.warning('OBSEPOCH header keyword not found. Using today as the date.') return time.Time.now().mjd def config_independent_frames(self): @@ -284,7 +284,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype == 'bias': return good_exp & (fitstbl['target'] == 'Bias')#& (fitstbl['idname'] == 'BIAS') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @classmethod @@ -412,7 +412,7 @@ def get_rawimage(self, raw_file, det): pixel. Pixels unassociated with any amplifier are set to 0. """ # Read - msgs.info(f'Attempting to read GMOS file: {raw_file}') + log.info(f'Attempting to read GMOS file: {raw_file}') # NOTE: io.fits_open checks that the file exists hdu = io.fits_open(raw_file) head0 = hdu[0].header @@ -925,7 +925,7 @@ def get_mosaic_par(self, mosaic, hdu=None, msc_ord=0): if obs_date >= t_upgrade: self.detid = 'BI11-41-4k-2,BI13-19-4k-3,BI12-34-4k-1' - msgs.info(f'Using the detector parameters for GMOS-S Hamamatsu after the upgrade on ' + log.info(f'Using the detector parameters for GMOS-S Hamamatsu after the upgrade on ' f'{t_upgrade.iso.split(" ")[0]}') else: self.detid = 'BI5-36-4k-2,BI11-33-4k-1,BI12-34-4k-1' @@ -996,13 +996,13 @@ def bpm(self, filename, det, shape=None, msbias=None): # Add the detector-specific, hard-coded bad columns if 1 in _det: - msgs.info("Using hard-coded BPM for det=1 on GMOSs") + log.info("Using hard-coded BPM for det=1 on GMOSs") i = _det.index(1) # Apply the mask badc = 616//bin_spec _bpm_img[i,badc,:] = 1 if 2 in _det: - msgs.info("Using hard-coded BPM for det=2 on GMOSs") + log.info("Using hard-coded BPM for det=2 on GMOSs") i = _det.index(2) # Apply the mask # Up high @@ -1017,7 +1017,7 @@ def bpm(self, filename, det, shape=None, msbias=None): badr = (768*2)//bin_spec _bpm_img[i,badr:,:] = 1 if 3 in _det: - msgs.info("Using hard-coded BPM for det=3 on GMOSs") + log.info("Using hard-coded BPM for det=3 on GMOSs") i = _det.index(3) # Apply the mask badr = (281*2)//bin_spec # Transposed diff --git a/pypeit/spectrographs/gemini_gnirs.py b/pypeit/spectrographs/gemini_gnirs.py index c51bc03e91..f47561e03a 100644 --- a/pypeit/spectrographs/gemini_gnirs.py +++ b/pypeit/spectrographs/gemini_gnirs.py @@ -8,7 +8,7 @@ from astropy.coordinates import SkyCoord, EarthLocation from astropy.time import Time -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch, parse @@ -145,34 +145,34 @@ def compound_meta(self, headarr, meta_key): try: return Time(headarr[0]['DATE-OBS'] + "T" + headarr[0]['TIME-OBS']) except KeyError: - msgs.warning("Time of observation is not in header") + log.warning("Time of observation is not in header") return 0.0 elif meta_key == 'pressure': try: return headarr[0]['PRESSUR2']/100.0 # Must be in astropy.units.mbar except KeyError: - msgs.warning("Pressure is not in header - The default pressure (611 mbar) will be assumed") + log.warning("Pressure is not in header - The default pressure (611 mbar) will be assumed") return 611.0 elif meta_key == 'temperature': try: return headarr[0]['TAMBIENT'] # Must be in astropy.units.deg_C except KeyError: - msgs.warning("Temperature is not in header - The default temperature (1.5 deg C) will be assumed") + log.warning("Temperature is not in header - The default temperature (1.5 deg C) will be assumed") return 1.5 # van Kooten & Izett, arXiv:2208.11794 elif meta_key == 'humidity': try: # Humidity expressed as a percentage, not a fraction return headarr[0]['HUMIDITY'] except KeyError: - msgs.warning("Humidity is not in header - The default relative humidity (20 %) will be assumed") + log.warning("Humidity is not in header - The default relative humidity (20 %) will be assumed") return 20.0 # van Kooten & Izett, arXiv:2208.11794 elif meta_key == 'parangle': try: # Humidity expressed as a percentage, not a fraction - msgs.warning("Parallactic angle is not available for GNIRS - DAR correction may be incorrect") + log.warning("Parallactic angle is not available for GNIRS - DAR correction may be incorrect") return headarr[0]['PARANGLE'] # Must be expressed in radians except KeyError: - msgs.warning("Parallactic angle is not in header - The default parallactic angle (0 degrees) will be assumed") + log.warning("Parallactic angle is not in header - The default parallactic angle (0 degrees) will be assumed") return 0.0 else: raise PypeItError("Not ready for this compound meta") @@ -261,7 +261,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): elif '10/mmLBSX' in fitstbl['dispname'][0]: return good_exp & (fitstbl['idname'] == 'ARC') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @classmethod @@ -429,7 +429,7 @@ def bpm(self, filename, det, shape=None, msbias=None): to 1 and an unmasked value set to 0. All values are set to 0. """ - msgs.info("Custom bad pixel mask for GNIRS") + log.info("Custom bad pixel mask for GNIRS") # Call the base-class method to generate the empty bpm bpm_img = super().bpm(filename, det, shape=shape, msbias=msbias) @@ -717,17 +717,17 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): Returns: `astropy.wcs.WCS`_: The world-coordinate system. """ - msgs.info("Calculating the WCS") + log.info("Calculating the WCS") # Get the x and y binning factors, and the typical slit length binspec, binspat = parse.parse_binning(self.get_meta_value([hdr], 'binning')) # Get the pixel and slice scales pxscl = platescale * binspat / 3600.0 # Need to convert arcsec to degrees - msgs.debug("NEED TO WORK OUT SLICER SCALE AND PIXEL SCALE") + log.debug("NEED TO WORK OUT SLICER SCALE AND PIXEL SCALE") slscl = self.get_meta_value([hdr], 'slitwid') if spatial_scale is not None: if pxscl > spatial_scale / 3600.0: - msgs.warning("Spatial scale requested ({0:f}'') is less than the pixel scale ({1:f}'')".format(spatial_scale, pxscl*3600.0)) + log.warning("Spatial scale requested ({0:f}'') is less than the pixel scale ({1:f}'')".format(spatial_scale, pxscl*3600.0)) # Update the pixel scale pxscl = spatial_scale / 3600.0 # 3600 is to convert arcsec to degrees @@ -742,7 +742,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): coord = SkyCoord(raval, decval, unit=(units.deg, units.deg)) # Get rotator position - msgs.warning("CURRENTLY A HACK --- NEED TO FIGURE OUT RPOS and RREF FOR HRIFU FROM HEADER INFO") + log.warning("CURRENTLY A HACK --- NEED TO FIGURE OUT RPOS and RREF FOR HRIFU FROM HEADER INFO") if 'ROTPOSN' in hdr: rpos = hdr['ROTPOSN'] else: @@ -776,7 +776,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): crpix2 = slitlength / 2. crpix3 = 1. # Get the offset - msgs.warning("HACK FOR HRIFU --- Need to obtain offset from header?") + log.warning("HACK FOR HRIFU --- Need to obtain offset from header?") off1 = 0. off2 = 0. off1 /= binspec @@ -785,7 +785,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): crpix2 += off2 # Create a new WCS object. - msgs.info("Generating GNIRS IFU WCS") + log.info("Generating GNIRS IFU WCS") w = wcs.WCS(naxis=3) w.wcs.equinox = hdr['EQUINOX'] w.wcs.name = 'GNIRS IFU' diff --git a/pypeit/spectrographs/gtc_osiris.py b/pypeit/spectrographs/gtc_osiris.py index 6fa4dec6c6..8f0ddd4f0c 100644 --- a/pypeit/spectrographs/gtc_osiris.py +++ b/pypeit/spectrographs/gtc_osiris.py @@ -5,7 +5,7 @@ """ import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit.core import parse @@ -188,26 +188,26 @@ def compound_meta(self, headarr, meta_key): try: return headarr[0]['PRESSURE'] # Must be in astropy.units.mbar except KeyError: - msgs.warning("Pressure is not in header") - msgs.info("The default pressure will be assumed: 611 mbar") + log.warning("Pressure is not in header") + log.info("The default pressure will be assumed: 611 mbar") return 611.0 elif meta_key == 'temperature': try: return headarr[0]['TAMBIENT'] # Must be in astropy.units.deg_C except KeyError: - msgs.warning("Temperature is not in header") - msgs.info("The default temperature will be assumed: 1.5 deg C") + log.warning("Temperature is not in header") + log.info("The default temperature will be assumed: 1.5 deg C") return 1.5 elif meta_key == 'humidity': try: return headarr[0]['HUMIDITY'] except KeyError: - msgs.warning("Humidity is not in header") - msgs.info("The default relative humidity will be assumed: 20 %") + log.warning("Humidity is not in header") + log.info("The default relative humidity will be assumed: 20 %") return 20.0 elif meta_key == 'parangle': try: - msgs.debug("Parallactic angle is not available for MAAT - DAR correction may be incorrect") + log.debug("Parallactic angle is not available for MAAT - DAR correction may be incorrect") return headarr[0]['PARANG'] # Must be expressed in radians except KeyError: raise PypeItError("Parallactic angle is not in header") @@ -217,7 +217,7 @@ def compound_meta(self, headarr, meta_key): return headarr[0]['GAIN'] elif meta_key == 'slitwid': if self.name == "gtc_maat": - msgs.warning("HACK FOR MAAT SIMS --- NEED TO GET SLICER SCALE FROM HEADER, IDEALLY") + log.warning("HACK FOR MAAT SIMS --- NEED TO GET SLICER SCALE FROM HEADER, IDEALLY") return 0.305 / 3600.0 elif self.name == "gtc_osiris_plus": return headarr[0]['SLITW']/3600.0 # Convert slit width from arcseconds to degrees @@ -291,7 +291,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype == 'bias': return good_exp & (np.char.lower(fitstbl['target']) == 'bias') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def config_independent_frames(self): @@ -399,7 +399,7 @@ def config_specific_par(self, scifile, inp_par=None): par['sensfunc']['algorithm'] = 'IR' par['sensfunc']['IR']['telgridfile'] = "TellPCA_3000_26000_R10000.fits" else: - msgs.warning('gtc_osiris.py: template arc missing for this grism! Trying holy-grail...') + log.warning('gtc_osiris.py: template arc missing for this grism! Trying holy-grail...') par['calibrations']['wavelengths']['method'] = 'holy-grail' # Return @@ -440,7 +440,7 @@ def bpm(self, filename, det, shape=None, msbias=None): head0 = fits.getheader(filename, ext=0) binning = self.get_meta_value([head0], 'binning') - msgs.warning("Bad pixel mask is not available for det={0:d} binning={1:s}".format(det, binning)) + log.warning("Bad pixel mask is not available for det={0:d} binning={1:s}".format(det, binning)) # Construct a list of the bad columns bc = [] # TODO :: Add BPM @@ -637,7 +637,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): Returns: `astropy.wcs.WCS`_: The world-coordinate system. """ - msgs.info("Calculating the WCS") + log.info("Calculating the WCS") # Get the x and y binning factors, and the typical slit length binspec, binspat = parse.parse_binning(self.get_meta_value([hdr], 'binning')) @@ -646,7 +646,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): slscl = self.get_meta_value([hdr], 'slitwid') if spatial_scale is not None: if pxscl > spatial_scale / 3600.0: - msgs.warning("Spatial scale requested ({0:f}'') is less than the pixel scale ({1:f}'')".format(spatial_scale, pxscl*3600.0)) + log.warning("Spatial scale requested ({0:f}'') is less than the pixel scale ({1:f}'')".format(spatial_scale, pxscl*3600.0)) # Update the pixel scale pxscl = spatial_scale / 3600.0 # 3600 is to convert arcsec to degrees @@ -661,7 +661,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): coord = SkyCoord(raval, decval, unit=(units.deg, units.deg)) # Get rotator position - msgs.warning("HACK FOR MAAT SIMS --- NEED TO FIGURE OUT RPOS and RREF FOR MAAT FROM HEADER INFO") + log.warning("HACK FOR MAAT SIMS --- NEED TO FIGURE OUT RPOS and RREF FOR MAAT FROM HEADER INFO") if 'ROTPOSN' in hdr: rpos = hdr['ROTPOSN'] else: @@ -695,7 +695,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): crpix2 = slitlength / 2. crpix3 = 1. # Get the offset - msgs.warning("HACK FOR MAAT SIMS --- Need to obtain offset from header?") + log.warning("HACK FOR MAAT SIMS --- Need to obtain offset from header?") off1 = 0. off2 = 0. off1 /= binspec @@ -704,7 +704,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): crpix2 += off2 # Create a new WCS object. - msgs.info("Generating MAAT WCS") + log.info("Generating MAAT WCS") w = wcs.WCS(naxis=3) w.wcs.equinox = hdr['EQUINOX'] w.wcs.name = 'MAAT' @@ -1001,7 +1001,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype == 'bias': return good_exp & (fitstbl['target'] == 'BIAS') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def config_independent_frames(self): @@ -1092,7 +1092,7 @@ def config_specific_par(self, scifile, inp_par=None): par['sensfunc']['algorithm'] = 'IR' par['sensfunc']['IR']['telgridfile'] = "TellPCA_3000_26000_R10000.fits" else: - msgs.warning('gtc_osiris.py: template arc missing for this grism! Trying holy-grail...') + log.warning('gtc_osiris.py: template arc missing for this grism! Trying holy-grail...') par['calibrations']['wavelengths']['method'] = 'holy-grail' # Return @@ -1141,14 +1141,14 @@ def bpm(self, filename, det, shape=None, msbias=None): elif det == 2: if binning == '1 1': # The BPM is based on 2x2 binning data, so the 2x2 numbers are just multiplied by two - msgs.warning("BPM is likely over-estimated for 1x1 binning") + log.warning("BPM is likely over-estimated for 1x1 binning") bc = [[220, 222, 3892, 4100], [952, 954, 2304, 4100]] elif binning == '2 2': bc = [[110, 111, 1946, 2050], [476, 477, 1154, 2050]] else: - msgs.warning("Bad pixel mask is not available for det={0:d} binning={1:s}".format(det, binning)) + log.warning("Bad pixel mask is not available for det={0:d} binning={1:s}".format(det, binning)) bc = [] # Apply these bad columns to the mask diff --git a/pypeit/spectrographs/jwst_nircam.py b/pypeit/spectrographs/jwst_nircam.py index 61bc5ad2c6..6f5903e92d 100644 --- a/pypeit/spectrographs/jwst_nircam.py +++ b/pypeit/spectrographs/jwst_nircam.py @@ -8,7 +8,7 @@ from astropy.io import fits from astropy.time import Time -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch diff --git a/pypeit/spectrographs/jwst_nirspec.py b/pypeit/spectrographs/jwst_nirspec.py index 887ad39196..bfee1b5de6 100644 --- a/pypeit/spectrographs/jwst_nirspec.py +++ b/pypeit/spectrographs/jwst_nirspec.py @@ -5,7 +5,7 @@ """ import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit import utils @@ -237,7 +237,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype == 'science': return np.ones(len(fitstbl), dtype=bool) - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @@ -292,7 +292,7 @@ def get_rawimage(self, raw_file, det): fil = utils.find_single_file(f'{raw_file}*', required=True) # Read - msgs.info(f'Reading JWST/NIRSpec file: {fil}') + log.info(f'Reading JWST/NIRSpec file: {fil}') hdu = io.fits_open(fil) head0 = hdu[0].header diff --git a/pypeit/spectrographs/keck_deimos.py b/pypeit/spectrographs/keck_deimos.py index 443f3ce225..8ea734cf0b 100644 --- a/pypeit/spectrographs/keck_deimos.py +++ b/pypeit/spectrographs/keck_deimos.py @@ -22,7 +22,7 @@ import linetools -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit import io @@ -234,7 +234,7 @@ def get_detector_par(self, det, hdu=None): measured_amptype = tab_measure['col4'] measured_gain = tab_measure['col5'] # [e-/DN] measured_ronoise = tab_measure['col7'] # [e-] - msgs.info(f"We are using DEIMOS gain/RN values for AMPMODE = {amp} " + log.info(f"We are using DEIMOS gain/RN values for AMPMODE = {amp} " f"based on WMKO estimates on {measure_dates[close_idx]}.") # find values for this amp and each detector this_amp = measured_amptype == 'A' if amp == 'SINGLE:A' else measured_amptype == 'B' @@ -351,7 +351,7 @@ def config_specific_par(self, scifile, inp_par=None): self.get_meta_value(headarr, 'amp') == 'SINGLE:A'): # give an info message if AMPMODE = SINGLE:A if self.get_meta_value(headarr, 'amp') == 'SINGLE:A': - msgs.info('Data taken with AMPMODE = SINGLE:A. Only detectors 3,7 will be reduced. To change this,' + log.info('Data taken with AMPMODE = SINGLE:A. Only detectors 3,7 will be reduced. To change this,' ' modify the detnum parameter in the pypeit file.') par['rdx']['detnum'] = [(3, 7)] @@ -509,7 +509,7 @@ def compound_meta(self, headarr, meta_key): elif headarr[0]['GRATEPOS'] == 4: return headarr[0]['G4TLTWAV'] else: - msgs.warning('This is probably a problem. Non-standard DEIMOS GRATEPOS={0}.'.format(headarr[0]['GRATEPOS'])) + log.warning('This is probably a problem. Non-standard DEIMOS GRATEPOS={0}.'.format(headarr[0]['GRATEPOS'])) elif meta_key == 'mjd': if headarr[0].get('MJD-OBS', None) is not None: return headarr[0]['MJD-OBS'] @@ -679,7 +679,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & (fitstbl['idname'] == 'Line') & (fitstbl['hatch'] == 'closed') \ & (fitstbl['lampstat01'] != 'Off') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def get_rawimage(self, raw_file, det): @@ -728,7 +728,7 @@ def get_rawimage(self, raw_file, det): pixel. Pixels unassociated with any amplifier are set to 0. """ # Read - msgs.info(f'Attempting to read DEIMOS file: {raw_file}') + log.info(f'Attempting to read DEIMOS file: {raw_file}') # NOTE: io.fits_open checks that the file exists hdu = io.fits_open(raw_file) @@ -1452,41 +1452,41 @@ def get_maskdef_slitedges(self, ccdnum=None, filename=None, debug=None, # `slitindex` because not always matches `SlitName` from the DEIMOS design file. if not debug: num = 0 - msgs.info('Expected slits on current detector') - msgs.info('*' * 18) - msgs.info('{0:^6s} {1:^12s}'.format('N.', 'dSlitId')) - msgs.info('{0:^6s} {1:^12s}'.format('-' * 5, '-' * 9)) + log.info('Expected slits on current detector') + log.info('*' * 18) + log.info('{0:^6s} {1:^12s}'.format('N.', 'dSlitId')) + log.info('{0:^6s} {1:^12s}'.format('-' * 5, '-' * 9)) for i in range(sortindx.shape[0]): if omodel_bspat[sortindx][i] != -1 or omodel_tspat[sortindx][i] != -1: - msgs.info('{0:^6d} {1:^12d}'.format(num, self.slitmask.slitid[sortindx][i])) + log.info('{0:^6d} {1:^12d}'.format(num, self.slitmask.slitid[sortindx][i])) num += 1 - msgs.info('*' * 18) + log.info('*' * 18) # If instead we run this method in debug mode, we print more info useful for comparison, for example, with # the IDL-based pipeline. if debug: num = 0 - msgs.info('Expected slits on current detector') - msgs.info('*' * 92) - msgs.info('{0:^5s} {1:^10s} {2:^12s} {3:^12s} {4:^14s} {5:^16s} {6:^16s}'.format('N.', + log.info('Expected slits on current detector') + log.info('*' * 92) + log.info('{0:^5s} {1:^10s} {2:^12s} {3:^12s} {4:^14s} {5:^16s} {6:^16s}'.format('N.', 'dSlitId', 'slitLen(mm)', 'slitWid(mm)', 'spat_cen(mm)', 'omodel_bottom(pix)', 'omodel_top(pix)')) - msgs.info('{0:^5s} {1:^10s} {2:^12s} {3:^12s} {4:^14s} {5:^16s} {6:^14s}'.format('-' * 4, '-' * 9, '-' * 11, + log.info('{0:^5s} {1:^10s} {2:^12s} {3:^12s} {4:^14s} {5:^16s} {6:^14s}'.format('-' * 4, '-' * 9, '-' * 11, '-' * 11, '-' * 13, '-' * 18, '-' * 15)) for i in range(sortindx.size): if omodel_bspat[sortindx][i] != -1 or omodel_tspat[sortindx][i] != -1: - msgs.info('{0:^5d}{1:^14d} {2:^9.3f} {3:^12.3f} {4:^14.3f} {5:^16.2f} {6:^14.2f}' + log.info('{0:^5d}{1:^14d} {2:^9.3f} {3:^12.3f} {4:^14.3f} {5:^16.2f} {6:^14.2f}' .format(num, self.slitmask.slitid[sortindx][i], self.slitmask.length[sortindx][i], self.slitmask.width[sortindx][i], self.slitmask.center[:, 0][sortindx][i], omodel_bspat[sortindx][i], omodel_tspat[sortindx][i])) num += 1 - msgs.info('*' * 92) + log.info('*' * 92) return omodel_bspat, omodel_tspat, sortindx, self.slitmask @@ -1754,7 +1754,7 @@ def __init__(self): # raise PypeItError('Found {0} files matching {1}'.format(len(fil), inp + '*')) # # Read # try: -# msgs.info("Reading DEIMOS file: {:s}".format(fil[0])) +# log.info("Reading DEIMOS file: {:s}".format(fil[0])) # except AttributeError: # print("Reading DEIMOS file: {:s}".format(fil[0])) # # Open diff --git a/pypeit/spectrographs/keck_esi.py b/pypeit/spectrographs/keck_esi.py index a9da8515f6..1cad34cb26 100644 --- a/pypeit/spectrographs/keck_esi.py +++ b/pypeit/spectrographs/keck_esi.py @@ -12,7 +12,7 @@ from astropy.time import Time import datetime -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit import io @@ -217,7 +217,7 @@ def compound_meta(self, headarr, meta_key): return mjd_time.mjd except Exception as e: # A problem parsing the MJD, we'll try DATE-OBS and UT - msgs.warning("Problem parsing MJD-OBS, trying DATE-OBS and UT instead.") + log.warning("Problem parsing MJD-OBS, trying DATE-OBS and UT instead.") pass return Time('{}T{}'.format(headarr[0]['DATE-OBS'], headarr[0]['UT'])).mjd elif meta_key == 'dispname': @@ -282,7 +282,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & (fitstbl['idname'] == 'Object') if ftype == 'standard': return good_exp & (fitstbl['idname'] == 'Object') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def bpm(self, filename, det, shape=None, msbias=None): @@ -317,7 +317,7 @@ def bpm(self, filename, det, shape=None, msbias=None): # Get the binning - msgs.info("Custom bad pixel mask for ESI") + log.info("Custom bad pixel mask for ESI") hdu = io.fits_open(filename) binspatial, binspec = parse.parse_binning(hdu[0].header['BINNING']) hdu.close() diff --git a/pypeit/spectrographs/keck_hires.py b/pypeit/spectrographs/keck_hires.py index 91bd246c73..067d42a197 100644 --- a/pypeit/spectrographs/keck_hires.py +++ b/pypeit/spectrographs/keck_hires.py @@ -14,7 +14,7 @@ from astropy import time -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit import io @@ -427,7 +427,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): # Arc and tilt frames are typed together return good_exp & (fitstbl['idname'] == 'Line') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def vet_assigned_ftypes(self, type_bits, fitstbl): @@ -509,11 +509,11 @@ def parse_raw_files(self, fitstbl, det=1, ftype=None): if ftype == 'slitless_pixflat': # Check for the required info if len(fitstbl) == 0: - msgs.warning('Fitstbl provided is emtpy. No parsing done.') + log.warning('Fitstbl provided is emtpy. No parsing done.') # return empty array return np.array([], dtype=int) elif det is None: - msgs.warning('Detector number must be provided to parse slitless_pixflat frames. No parsing done.') + log.warning('Detector number must be provided to parse slitless_pixflat frames. No parsing done.') # return index array of length of fitstbl return np.arange(len(fitstbl)) @@ -534,7 +534,7 @@ def parse_raw_files(self, fitstbl, det=1, ftype=None): # red detector return np.where(np.int32(fitstbl['xdangle'].value) == -5)[0] else: - msgs.warning('The provided list of slitless_pixflat frames does not have exactly 3 unique XDANGLE values. ' + log.warning('The provided list of slitless_pixflat frames does not have exactly 3 unique XDANGLE values. ' 'Pypeit cannot determine which slitless_pixflat frame corresponds to the requested detector. ' 'All frames will be used.') return np.arange(len(fitstbl)) @@ -600,7 +600,7 @@ def get_rawimage(self, raw_file, det, spectrim=20): binning = self.get_meta_value(self.get_headarr(hdu), 'binning') # # TODO: JFH I think this works fine # if binning != '3,1': -# msgs.warning("This binning for HIRES might not work. But it might..") +# log.warning("This binning for HIRES might not work. But it might..") # We are flipping this because HIRES stores the binning oppostire of the (binspec, binspat) pypeit convention. binspatial, binspec = parse.parse_binning(head0['BINNING']) diff --git a/pypeit/spectrographs/keck_kcwi.py b/pypeit/spectrographs/keck_kcwi.py index f1637fbe1d..6d701a506d 100644 --- a/pypeit/spectrographs/keck_kcwi.py +++ b/pypeit/spectrographs/keck_kcwi.py @@ -15,7 +15,7 @@ from astropy.coordinates import EarthLocation from scipy.optimize import curve_fit -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit import utils @@ -150,8 +150,8 @@ def config_specific_par(self, scifile, inp_par=None): elif self.get_meta_value(headarr, 'dispname') == 'RH3': par['calibrations']['wavelengths']['reid_arxiv'] = 'keck_kcrm_RH3.fits' else: - msgs.warning("Full template solution is unavailable") - msgs.info("Adopting holy-grail algorithm - Check the wavelength solution!") + log.warning("Full template solution is unavailable") + log.info("Adopting holy-grail algorithm - Check the wavelength solution!") par['calibrations']['wavelengths']['method'] = 'holy-grail' # FWHM # binning = parse.parse_binning(self.get_meta_value(headarr, 'binning')) @@ -224,23 +224,23 @@ def compound_meta(self, headarr, meta_key): try: return headarr[0]['WXPRESS'] # Must be in astropy.units.mbar except KeyError: - msgs.warning("Pressure is not in header") - msgs.info("The default pressure will be assumed: 611 mbar") + log.warning("Pressure is not in header") + log.info("The default pressure will be assumed: 611 mbar") return 611.0 elif meta_key == 'temperature': try: return headarr[0]['WXOUTTMP'] # Must be in astropy.units.deg_C except KeyError: - msgs.warning("Temperature is not in header") - msgs.info("The default temperature will be assumed: 1.5 deg C") + log.warning("Temperature is not in header") + log.info("The default temperature will be assumed: 1.5 deg C") return 1.5 # van Kooten & Izett, arXiv:2208.11794 elif meta_key == 'humidity': try: # Humidity expressed as a percentage, not a fraction return headarr[0]['WXOUTHUM'] except KeyError: - msgs.warning("Humidity is not in header") - msgs.info("The default relative humidity will be assumed: 20 %") + log.warning("Humidity is not in header") + log.info("The default relative humidity will be assumed: 20 %") return 20.0 # van Kooten & Izett, arXiv:2208.11794 elif meta_key == 'parangle': try: @@ -403,7 +403,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): is_align = good_exp & (fitstbl['idname'] == 'CONTBARS') \ & (fitstbl['calpos'] == 'Mirror') & self.lamps(fitstbl, 'cont') if np.any(is_align & np.logical_not(self.lamps(fitstbl, 'cont_noarc'))): - msgs.warning('Alignment frames have both the continuum and arc lamps on (although ' + log.warning('Alignment frames have both the continuum and arc lamps on (although ' 'arc-lamp shutter might be closed)!') return is_align if ftype == 'arc': @@ -423,7 +423,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): # Don't type pinhole frames return np.zeros(len(fitstbl), dtype=bool) - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def lamps(self, fitstbl, status): @@ -560,7 +560,7 @@ def calc_pattern_freq(self, frame, rawdatasec_img, oscansec_img, hdu): patt_freqs : :obj:`list` List of pattern frequencies. """ - msgs.info("Calculating pattern noise frequency") + log.info("Calculating pattern noise frequency") # Make a copy of te original frame raw_img = frame.copy() @@ -592,7 +592,7 @@ def calc_pattern_freq(self, frame, rawdatasec_img, oscansec_img, hdu): # Calculate the pattern frequency freq = procimg.pattern_frequency(frame) patt_freqs.append(freq) - msgs.info("Pattern frequency of amplifier {0:d}/{1:d} = {2:f}".format(amp, num_amps, freq)) + log.info("Pattern frequency of amplifier {0:d}/{1:d} = {2:f}".format(amp, num_amps, freq)) # Return the list of pattern frequencies return patt_freqs @@ -624,7 +624,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): Returns: `astropy.wcs.WCS`_: The world-coordinate system. """ - msgs.info(f"Generating {self.camera} WCS") + log.info(f"Generating {self.camera} WCS") # Get the x and y binning factors, and the typical slit length binspec, binspat = parse.parse_binning(self.get_meta_value([hdr], 'binning')) @@ -633,7 +633,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): slscl = self.get_meta_value([hdr], 'slitwid') if spatial_scale is not None: if pxscl > spatial_scale / 3600.0: - msgs.warning("Spatial scale requested ({0:f}'') is less than the pixel scale ({1:f}'')".format(spatial_scale, pxscl*3600.0)) + log.warning("Spatial scale requested ({0:f}'') is less than the pixel scale ({1:f}'')".format(spatial_scale, pxscl*3600.0)) # Update the pixel scale pxscl = spatial_scale / 3600.0 # 3600 is to convert arcsec to degrees @@ -675,7 +675,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): # off1 = 0.05 # off2 = 5.6 # else: - # msgs.warning("Unknown IFU number: {0:d}".format(ifunum)) + # log.warning("Unknown IFU number: {0:d}".format(ifunum)) off1 = 0. off2 = 0. off1 /= binspec @@ -810,7 +810,7 @@ def bpm(self, filename, det, shape=None, msbias=None): # Check if the bad columns haven't been set if bc is None: - msgs.warning("KCRM bad pixel mask is not available for ampmode={0:s} binning={1:s}".format(ampmode, binning)) + log.warning("KCRM bad pixel mask is not available for ampmode={0:s} binning={1:s}".format(ampmode, binning)) bc = [] # Apply these bad columns to the mask @@ -1030,7 +1030,7 @@ def get_rawimage(self, raw_file, det): fil = utils.find_single_file(f'{raw_file}*', required=True) # Read - msgs.info(f'Reading KCWI file: {fil}') + log.info(f'Reading KCWI file: {fil}') hdu = io.fits_open(fil) detpar = self.get_detector_par(det if det is not None else 1, hdu=hdu) head0 = hdu[0].header @@ -1138,7 +1138,7 @@ def scattered_light_archive(self, binning, dispname): -0.004790394657721825, 0.0032481886185675036, # Polynomial terms (coefficients of "spat" and "spat*spec") 0.07823077510724392, -0.0644638013233617, 0.01819438897935518]) # Polynomial terms (coefficients of spec**index) else: - msgs.warning(f"Initial scattered light model parameters have not been setup for grating {dispname}") + log.warning(f"Initial scattered light model parameters have not been setup for grating {dispname}") x0 = np.array([54.843502304988725 / specbin, 71.36603219575882 / spatbin, # Gaussian kernel widths 166.5990017834228 / specbin, 164.45188033168876 / spatbin, # Lorentzian kernel widths -5.759623374637964 / specbin, 5.01392929142184 / spatbin, # pixel offsets @@ -1186,7 +1186,7 @@ def fit_2d_det_response(self, det_resp, gpmask): Returns: `numpy.ndarray`_: A model fit to the flatfield structure. """ - msgs.info("Performing a 2D fit to the detector response") + log.info("Performing a 2D fit to the detector response") # Define a 2D sine function, which is a good description of KCWI data def sinfunc2d(x, amp, scl, quad, phase, wavelength, angle): @@ -1416,7 +1416,7 @@ def get_rawimage(self, raw_file, det): fil = utils.find_single_file(f'{raw_file}*', required=True) # Read - msgs.info(f'Reading KCWI file: {fil}') + log.info(f'Reading KCWI file: {fil}') hdu = io.fits_open(fil) detpar = self.get_detector_par(det if det is not None else 1, hdu=hdu) head0 = hdu[0].header diff --git a/pypeit/spectrographs/keck_lris.py b/pypeit/spectrographs/keck_lris.py index 8096115bad..4ba4664e34 100644 --- a/pypeit/spectrographs/keck_lris.py +++ b/pypeit/spectrographs/keck_lris.py @@ -16,7 +16,7 @@ import linetools.utils -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit import utils @@ -210,13 +210,13 @@ def compound_meta(self, headarr, meta_key): # LRIS sometime misses RA and/or Dec in the header. When this happens, set them to 0 if meta_key == 'ra': if headarr[0].get('RA') is None: - msgs.warning('Keyword RA not found in header. Setting to 0') + log.warning('Keyword RA not found in header. Setting to 0') return '00:00:00.00' else: return headarr[0]['RA'] elif meta_key == 'dec': if headarr[0].get('DEC') is None: - msgs.warning('Keyword DEC not found in header. Setting to 0') + log.warning('Keyword DEC not found in header. Setting to 0') return '+00:00:00.0' else: return headarr[0]['DEC'] @@ -403,7 +403,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp & self.lamps(fitstbl, 'arcs') & (fitstbl['hatch'] == 'closed') & no_img - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def vet_assigned_ftypes(self, type_bits, fitstbl): @@ -551,7 +551,7 @@ def get_rawimage(self, raw_file, det): fil = utils.find_single_file(f'{raw_file}*', required=True) # Read - msgs.info(f'Reading LRIS file: {fil}') + log.info(f'Reading LRIS file: {fil}') hdu = io.fits_open(fil) head0 = hdu[0].header @@ -1083,7 +1083,7 @@ def bpm(self, filename, det, shape=None, msbias=None): # Only defined for det=1 if det == 1: - msgs.info("Using hard-coded BPM for det=1 on LRISb") + log.info("Using hard-coded BPM for det=1 on LRISb") bpm_img[:,:3] = 1 return bpm_img @@ -1285,7 +1285,7 @@ def get_detector_par(self, det, hdu=None): if date < t2020_1: pass elif date < t2020_2: # This is for the June 30 2020 run - msgs.warning("We are using LRISr gain/RN values based on WMKO estimates.") + log.warning("We are using LRISr gain/RN values based on WMKO estimates.") detector_dict1['gain'] = np.atleast_1d([37.6]) detector_dict2['gain'] = np.atleast_1d([1.26]) detector_dict1['ronoise'] = np.atleast_1d([99.]) @@ -1294,7 +1294,7 @@ def get_detector_par(self, det, hdu=None): # Note: We are unlikely to trip this. Other things probably failed first raise PypeItError("This is the new detector. Use keck_lris_red_mark4") else: # This is the 2020 July 29 run - msgs.warning("We are using LRISr gain/RN values based on WMKO estimates.") + log.warning("We are using LRISr gain/RN values based on WMKO estimates.") detector_dict1['gain'] = np.atleast_1d([1.45]) detector_dict2['gain'] = np.atleast_1d([1.25]) detector_dict1['ronoise'] = np.atleast_1d([4.47]) @@ -1555,7 +1555,7 @@ def bpm(self, filename, det, shape=None, msbias=None): # Only defined for det=2 if det == 2: - msgs.info("Using hard-coded BPM for det=2 on LRISr") + log.info("Using hard-coded BPM for det=2 on LRISr") # Get the binning hdu = io.fits_open(filename) @@ -1640,7 +1640,7 @@ def get_detector_par(self, det, hdu=None): # Deal with the intermediate headers if date < t_gdhead: amp_mode = hdu[0].header['AMPMODE'] - msgs.info("AMPMODE = {:s}".format(amp_mode)) + log.info("AMPMODE = {:s}".format(amp_mode)) # Load up translation dict ampmode_translate_file = dataPaths.spectrographs.get_file_path( 'keck_lris_red_mark4/dict_for_ampmode.json') @@ -2017,7 +2017,7 @@ def lris_read_amp(inp, ext): raise PypeItError("Something wrong in LRIS datasec or precol") xshape = 1024 // xbin * (4//n_ext) # Allow for single amp if (xshape+precol+postpix) != temp.shape[0]: - msgs.warning("Unexpected size for LRIS detector. We expect you did some windowing...") + log.warning("Unexpected size for LRIS detector. We expect you did some windowing...") xshape = temp.shape[0] - precol - postpix data = temp[precol:precol+xshape,:] postdata = temp[nxt-postpix:nxt, :] diff --git a/pypeit/spectrographs/keck_mosfire.py b/pypeit/spectrographs/keck_mosfire.py index ab5c8b3c67..8eef2544bd 100644 --- a/pypeit/spectrographs/keck_mosfire.py +++ b/pypeit/spectrographs/keck_mosfire.py @@ -8,7 +8,7 @@ import numpy as np from astropy.io import fits from astropy.stats import sigma_clipped_stats -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch, meta @@ -347,7 +347,7 @@ def compound_meta(self, headarr, meta_key): elif PWSTATA7 == 1 or PWSTATA8 == 1: return 'arclamp' else: - msgs.warning('Header keyword FLATSPEC, PWSTATA7, or PWSTATA8 may not exist') + log.warning('Header keyword FLATSPEC, PWSTATA7, or PWSTATA8 may not exist') return 'unknown' if meta_key == 'lampstat01': if headarr[0].get('PWSTATA7') == 1 or headarr[0].get('PWSTATA8') == 1: @@ -717,7 +717,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): is_arc = fitstbl['idname'] == 'arclamp' is_obj = (fitstbl['lampstat01'] == 'off') & (fitstbl['idname'] == 'object') & ('long2pos_specphot' not in fitstbl['decker']) return good_exp & (is_arc | is_obj) - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) # TODO: Is this supposed to be deprecated in favor of get_comb_group? @@ -1052,33 +1052,33 @@ def get_maskdef_slitedges(self, ccdnum=None, filename=None, debug=None, # This print a QA table with info on the slits sorted from left to right. if not debug: num = 0 - msgs.info('Expected slits') - msgs.info('*' * 18) - msgs.info('{0:^6s} {1:^12s}'.format('N.', 'Slit_Number')) - msgs.info('{0:^6s} {1:^12s}'.format('-' * 5, '-' * 13)) + log.info('Expected slits') + log.info('*' * 18) + log.info('{0:^6s} {1:^12s}'.format('N.', 'Slit_Number')) + log.info('{0:^6s} {1:^12s}'.format('-' * 5, '-' * 13)) for i in range(sortindx.shape[0]): - msgs.info('{0:^6d} {1:^12d}'.format(num, self.slitmask.slitid[sortindx][i])) + log.info('{0:^6d} {1:^12d}'.format(num, self.slitmask.slitid[sortindx][i])) num += 1 - msgs.info('*' * 18) + log.info('*' * 18) # If instead we run this method in debug mode, we print more info if debug: num = 0 - msgs.info('Expected slits') - msgs.info('*' * 92) - msgs.info('{0:^5s} {1:^10s} {2:^12s} {3:^12s} {4:^16s} {5:^16s}'.format('N.', 'Slit_Number', + log.info('Expected slits') + log.info('*' * 92) + log.info('{0:^5s} {1:^10s} {2:^12s} {3:^12s} {4:^16s} {5:^16s}'.format('N.', 'Slit_Number', 'slitLen(arcsec)', 'slitWid(arcsec)', 'top_edges(pix)', 'bot_edges(pix)')) - msgs.info('{0:^5s} {1:^10s} {2:^12s} {3:^12s} {4:^16s} {5:^14s}'.format('-' * 4, '-' * 13, '-' * 11, + log.info('{0:^5s} {1:^10s} {2:^12s} {3:^12s} {4:^16s} {5:^14s}'.format('-' * 4, '-' * 13, '-' * 11, '-' * 11, '-' * 18, '-' * 15)) for i in range(sortindx.size): - msgs.info('{0:^5d}{1:^14d} {2:^9.3f} {3:^12.3f} {4:^16.2f} {5:^14.2f}'.format(num, + log.info('{0:^5d}{1:^14d} {2:^9.3f} {3:^12.3f} {4:^16.2f} {5:^14.2f}'.format(num, self.slitmask.slitid[sortindx][i], self.slitmask.onsky[:,2][sortindx][i], self.slitmask.onsky[:,3][sortindx][i], top_edges[sortindx][i], bot_edges[sortindx][i])) num += 1 - msgs.info('*' * 92) + log.info('*' * 92) return top_edges, bot_edges, sortindx, self.slitmask diff --git a/pypeit/spectrographs/keck_nires.py b/pypeit/spectrographs/keck_nires.py index f116f1d86a..d4c2456ebd 100644 --- a/pypeit/spectrographs/keck_nires.py +++ b/pypeit/spectrographs/keck_nires.py @@ -5,7 +5,7 @@ """ import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch @@ -502,7 +502,7 @@ def bpm(self, filename, det, shape=None, msbias=None): to 1 and an unmasked value set to 0. All values are set to 0. """ - msgs.info("Custom bad pixel mask for NIRES") + log.info("Custom bad pixel mask for NIRES") # Call the base-class method to generate the empty bpm bpm_img = super().bpm(filename, det, shape=shape, msbias=msbias) diff --git a/pypeit/spectrographs/keck_nirspec.py b/pypeit/spectrographs/keck_nirspec.py index 402c86b892..9921b12036 100644 --- a/pypeit/spectrographs/keck_nirspec.py +++ b/pypeit/spectrographs/keck_nirspec.py @@ -6,7 +6,7 @@ """ import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import io from pypeit.images import detector_container @@ -302,7 +302,7 @@ def config_specific_par(self, scifile, inp_par=None): # wavelength calibration supported_filters = ['NIRSPEC-1', 'NIRSPEC-3', 'NIRSPEC-5', 'NIRSPEC-7', 'Kband-new', 'KL'] if (self.filter1 not in supported_filters) and (self.filter2 not in supported_filters): - msgs.warning(f'Filter {self.filter1} or {self.filter2} may not be supported!!') + log.warning(f'Filter {self.filter1} or {self.filter2} may not be supported!!') if self.filter1 == 'Kband-new' or self.filter2 == 'NIRSPEC-7': par['calibrations']['wavelengths']['n_final'] = 3 @@ -424,8 +424,8 @@ def get_echelle_angle_files(self): band = self.filter2 lamps_list = np.copy(self.lamps_list) - #msgs.info(lamps_list, 'Xe' in lamps_list[0]) - #msgs.info('filter1 = ', filter1) + #log.info(lamps_list, 'Xe' in lamps_list[0]) + #log.info('filter1 = ', filter1) if 'Xe' in lamps_list[0]: if band == 'NIRSPEC-1': angle_fits_file = 'keck_nirspec_y_angle_fits.fits' @@ -446,7 +446,7 @@ def get_echelle_angle_files(self): angle_fits_file = 'keck_nirspec_l_angle_fits.fits' composite_arc_file = 'keck_nirspec_l_composite_arc.fits' elif 'OH' in lamps_list[0]: - msgs.info('Using OH Lines') + log.info('Using OH Lines') if band == 'NIRSPEC-1': angle_fits_file = 'keck_nirspec_y_OH_angle_fits.fits' composite_arc_file = 'keck_nirspec_y_composite_OH.fits' @@ -499,7 +499,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): is_obj = self.lamps(fitstbl, 'off') & (hatch == 'Out') good_exp[is_obj] = fitstbl['exptime'].data[is_obj] > 60.0 return good_exp & (is_arc | is_obj) - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def lamps(self, fitstbl, status): @@ -718,7 +718,7 @@ def config_specific_par(self, scifile, inp_par=None): # wavelength calibration supported_filters = ['NIRSPEC-1', 'NIRSPEC-3', 'NIRSPEC-5', 'NIRSPEC-7', 'KL'] if (self.filter1 not in supported_filters) and (self.filter2 not in supported_filters): - msgs.warning(f'Filter {self.filter1} or {self.filter2} may not be supported!!') + log.warning(f'Filter {self.filter1} or {self.filter2} may not be supported!!') if self.filter2 == 'NIRSPEC-7': par['calibrations']['wavelengths']['n_final'] = 3 @@ -937,7 +937,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): is_obj = self.lamps(fitstbl, 'off') & (hatch == '0') good_exp[is_obj] = fitstbl['exptime'].data[is_obj] > 60.0 return good_exp & (is_arc | is_obj) - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def lamps(self, fitstbl, status): @@ -1367,7 +1367,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): is_obj = self.lamps(fitstbl, 'off') & (hatch == 0) \ & (fitstbl['idname'] == 'object') return good_exp & (is_arc | is_obj) - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def lamps(self, fitstbl, status): @@ -1439,7 +1439,7 @@ def bpm(self, filename, det, shape=None, msbias=None): bpm_img = super().bpm(filename, det, shape=shape, msbias=msbias) # Edges of the detector are junk - msgs.info("Custom bad pixel mask for NIRSPEC") + log.info("Custom bad pixel mask for NIRSPEC") bpm_img[:, :20] = 1. bpm_img[:, 1000:] = 1. diff --git a/pypeit/spectrographs/lbt_luci.py b/pypeit/spectrographs/lbt_luci.py index 2c57330cdc..b6fa15c33a 100644 --- a/pypeit/spectrographs/lbt_luci.py +++ b/pypeit/spectrographs/lbt_luci.py @@ -8,7 +8,7 @@ import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch @@ -246,7 +246,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return (good_exp & ((fitstbl['idname'] == 'object') | (fitstbl['idname'] == 'arc'))) - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) # Detector information from official LBT LUCI website diff --git a/pypeit/spectrographs/lbt_mods.py b/pypeit/spectrographs/lbt_mods.py index 67b27dc0e3..f7271afb4d 100644 --- a/pypeit/spectrographs/lbt_mods.py +++ b/pypeit/spectrographs/lbt_mods.py @@ -6,7 +6,7 @@ import numpy as np from astropy.io import fits -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit import utils @@ -183,7 +183,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['idname'] == 'COMP') & (fitstbl['dispname'] != 'Flat') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def get_rawimage(self, raw_file, det): @@ -220,7 +220,7 @@ def get_rawimage(self, raw_file, det): fil = utils.find_single_file(f'{raw_file}*', required=True) # Read - msgs.info(f'Reading LBT/MODS file: {fil}') + log.info(f'Reading LBT/MODS file: {fil}') hdu = io.fits_open(fil) head = hdu[0].header @@ -460,7 +460,7 @@ def bpm(self, filename, det, shape=None, msbias=None): # Call the base-class method to generate the empty bpm bpm_img = super().bpm(filename, det, shape=shape, msbias=msbias) - msgs.info("Using hard-coded BPM for MODS1R") + log.info("Using hard-coded BPM for MODS1R") # TODO: Fix this # Get the binning @@ -624,7 +624,7 @@ def bpm(self, filename, det, shape=None, msbias=None): """ # Call the base-class method to generate the empty bpm bpm_img = super().bpm(filename, det, shape=shape, msbias=msbias) - msgs.info("Using hard-coded BPM for MODS1B") + log.info("Using hard-coded BPM for MODS1B") # Get the binning hdu = io.fits_open(filename) @@ -784,7 +784,7 @@ def bpm(self, filename, det, shape=None, msbias=None): """ # Call the base-class method to generate the empty bpm bpm_img = super().bpm(filename, det, shape=shape, msbias=msbias) - msgs.info("Using hard-coded BPM for MODS2R") + log.info("Using hard-coded BPM for MODS2R") # Get the binning hdu = io.fits_open(filename) @@ -949,7 +949,7 @@ def bpm(self, filename, det, shape=None, msbias=None): """ # Call the base-class method to generate the empty bpm bpm_img = super().bpm(filename, det, shape=shape, msbias=msbias) - msgs.info("Using hard-coded BPM for MODS2B") + log.info("Using hard-coded BPM for MODS2B") # Get the binning hdu = io.fits_open(filename) diff --git a/pypeit/spectrographs/ldt_deveny.py b/pypeit/spectrographs/ldt_deveny.py index ff4a613a5c..aa34f11928 100644 --- a/pypeit/spectrographs/ldt_deveny.py +++ b/pypeit/spectrographs/ldt_deveny.py @@ -23,7 +23,7 @@ from astropy.table import Table from astropy.time import Time -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch @@ -182,7 +182,7 @@ def compound_meta(self, headarr:list, meta_key:str) -> object: if (grating_kwd := headarr[0]['GRATING']) not in gratings: raise PypeItError(f"Grating value {grating_kwd} not recognized.") if grating_kwd == "UNKNOWN": - msgs.warning(f"Grating not selected in the LOUI; \n" + log.warning(f"Grating not selected in the LOUI; \n" "Fix the header keyword GRATING before proceeding.") return f"{gratings[grating_kwd]} ({grating_kwd})" @@ -202,7 +202,7 @@ def compound_meta(self, headarr:list, meta_key:str) -> object: # Extract lines/mm, catch 'UNKNOWN' grating if (grating_kwd := headarr[0]["GRATING"]) == "UNKNOWN": lpmm = np.inf - msgs.warning(f"Grating angle not selected in the LOUI; \n" + log.warning(f"Grating angle not selected in the LOUI; \n" "Fix the header keyword GRANGLE before proceeding.") else: lpmm = float(grating_kwd.split("/")[0]) @@ -414,7 +414,7 @@ def check_frame_type(self, ftype:str, fitstbl:Table, exprng=None): if ftype in ['pinhole', 'align', 'sky', 'lampoffflats', 'scattlight']: # DeVeny doesn't have any of these types of frames return np.zeros(len(fitstbl), dtype=bool) - msgs.debug(f"Cannot determine if frames are of type {ftype}") + log.debug(f"Cannot determine if frames are of type {ftype}") return np.zeros(len(fitstbl), dtype=bool) def pypeit_file_keys(self): diff --git a/pypeit/spectrographs/magellan_fire.py b/pypeit/spectrographs/magellan_fire.py index fc39ac8e4b..0e19c231cf 100644 --- a/pypeit/spectrographs/magellan_fire.py +++ b/pypeit/spectrographs/magellan_fire.py @@ -11,7 +11,7 @@ """ import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch @@ -241,7 +241,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & (fitstbl['idname'] == 'Science') if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['idname'] == 'Science') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @property @@ -460,6 +460,6 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & (fitstbl['idname'] == 'Science') if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['idname'] == 'Arc') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/spectrographs/magellan_mage.py b/pypeit/spectrographs/magellan_mage.py index 2e91a1951a..0694b3bc28 100644 --- a/pypeit/spectrographs/magellan_mage.py +++ b/pypeit/spectrographs/magellan_mage.py @@ -10,7 +10,7 @@ from astropy.time import Time -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit import io @@ -274,7 +274,7 @@ def bpm(self, filename, det, shape=None, msbias=None): bpm_img = super().bpm(filename, det, shape=shape, msbias=msbias) # Get the binning - msgs.info("Custom bad pixel mask for MAGE") + log.info("Custom bad pixel mask for MAGE") hdu = io.fits_open(filename) binspatial, binspec = parse.parse_binning(hdu[0].header['BINNING']) hdu.close() diff --git a/pypeit/spectrographs/mdm_modspec.py b/pypeit/spectrographs/mdm_modspec.py index a20233c476..5563ba63db 100644 --- a/pypeit/spectrographs/mdm_modspec.py +++ b/pypeit/spectrographs/mdm_modspec.py @@ -9,7 +9,7 @@ from astropy.time import Time -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch @@ -286,7 +286,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['illumflat', 'trace']: # Twilight Flats return good_exp & (fitstbl['idname'] == 'Flat') & (fitstbl['mirror'] == 'OUT') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) \ No newline at end of file diff --git a/pypeit/spectrographs/mdm_osmos.py b/pypeit/spectrographs/mdm_osmos.py index a38ccb0d25..1ab89325df 100644 --- a/pypeit/spectrographs/mdm_osmos.py +++ b/pypeit/spectrographs/mdm_osmos.py @@ -5,7 +5,7 @@ """ import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch @@ -232,7 +232,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return np.zeros(len(fitstbl), dtype=bool) if ftype in ['arc','tilt']: return good_exp & np.array([ilamp in ['Ar','Xe'] for ilamp in fitstbl['lampstat01']]) & (fitstbl['idname'] == 'COMP') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @@ -325,7 +325,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return np.zeros(len(fitstbl), dtype=bool) if ftype in ['arc','tilt']: return good_exp & (fitstbl['idname'] == 'COMP') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @classmethod diff --git a/pypeit/spectrographs/mmt_binospec.py b/pypeit/spectrographs/mmt_binospec.py index b734bd4ec1..333b3eb2ec 100644 --- a/pypeit/spectrographs/mmt_binospec.py +++ b/pypeit/spectrographs/mmt_binospec.py @@ -5,7 +5,7 @@ """ import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit import utils @@ -282,7 +282,7 @@ def bpm(self, filename, det, shape=None, msbias=None): bpm_img = super().bpm(filename, det, shape=shape, msbias=msbias) if det == 1: - msgs.info("Using hard-coded BPM for det=1 on BINOSPEC") + log.info("Using hard-coded BPM for det=1 on BINOSPEC") # TODO: Fix this # Get the binning @@ -296,7 +296,7 @@ def bpm(self, filename, det, shape=None, msbias=None): bpm_img[2111 // xbin, 2056 // ybin:4112 // ybin] = 1 elif det == 2: - msgs.info("Using hard-coded BPM for det=2 on BINOSPEC") + log.info("Using hard-coded BPM for det=2 on BINOSPEC") # Get the binning hdu = io.fits_open(filename) @@ -347,7 +347,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['pixelflat', 'trace', 'illumflat']: return good_exp & (fitstbl['lampstat01'] == 'off') & (fitstbl['lampstat02'] == 'deployed') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def get_rawimage(self, raw_file, det): @@ -384,7 +384,7 @@ def get_rawimage(self, raw_file, det): fil = utils.find_single_file(f'{raw_file}*', required=True) # Read - msgs.info(f'Reading BINOSPEC file: {fil}') + log.info(f'Reading BINOSPEC file: {fil}') hdu = io.fits_open(fil) head1 = hdu[1].header diff --git a/pypeit/spectrographs/mmt_bluechannel.py b/pypeit/spectrographs/mmt_bluechannel.py index ed58006291..8d4337b79f 100644 --- a/pypeit/spectrographs/mmt_bluechannel.py +++ b/pypeit/spectrographs/mmt_bluechannel.py @@ -7,7 +7,7 @@ from astropy.io import fits from astropy.time import Time -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit import utils @@ -349,7 +349,7 @@ def bpm(self, filename, det, shape=None, msbias=None): bpm_img = super().bpm(filename, det, shape=shape, msbias=msbias) if det == 1: - msgs.info("Using hard-coded BPM for Blue Channel") + log.info("Using hard-coded BPM for Blue Channel") bpm_img[-1, :] = 1 @@ -460,7 +460,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): # i think the bright lamp, BC, is the only one ever used for this. imagetyp should always be set to flat. return good_exp & (fitstbl['lampstat01'] == 'off') & (fitstbl['target'] == 'skyflat') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def get_rawimage(self, raw_file, det): @@ -497,7 +497,7 @@ def get_rawimage(self, raw_file, det): fil = utils.find_single_file(f'{raw_file}*', required=True) # Read FITS image - msgs.info(f'Reading MMT Blue Channel file: {fil}') + log.info(f'Reading MMT Blue Channel file: {fil}') hdu = fits.open(fil) hdr = hdu[0].header diff --git a/pypeit/spectrographs/mmt_mmirs.py b/pypeit/spectrographs/mmt_mmirs.py index 515d6e1262..c712df45b4 100644 --- a/pypeit/spectrographs/mmt_mmirs.py +++ b/pypeit/spectrographs/mmt_mmirs.py @@ -10,7 +10,7 @@ from astropy.io import fits from astropy.stats import sigma_clipped_stats -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit import utils @@ -272,7 +272,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & (fitstbl['idname'] == 'object') if ftype == 'dark': return good_exp & (fitstbl['idname'] == 'dark') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def bpm(self, filename, det, shape=None, msbias=None): @@ -305,7 +305,7 @@ def bpm(self, filename, det, shape=None, msbias=None): # Call the base-class method to generate the empty bpm bpm_img = super().bpm(filename, det, shape=shape, msbias=msbias) - msgs.info("Using hard-coded BPM for det=1 on MMIRS") + log.info("Using hard-coded BPM for det=1 on MMIRS") # Get the binning hdu = io.fits_open(filename) @@ -352,7 +352,7 @@ def get_rawimage(self, raw_file, det): fil = utils.find_single_file(f'{raw_file}*', required=True) # Read - msgs.info(f'Reading MMIRS file: {fil}') + log.info(f'Reading MMIRS file: {fil}') hdu = io.fits_open(fil) head1 = fits.getheader(fil,1) diff --git a/pypeit/spectrographs/not_alfosc.py b/pypeit/spectrographs/not_alfosc.py index 4eff1b8725..ae5e669f13 100644 --- a/pypeit/spectrographs/not_alfosc.py +++ b/pypeit/spectrographs/not_alfosc.py @@ -9,7 +9,7 @@ from astropy.time import Time -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch @@ -267,7 +267,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return np.zeros(len(fitstbl), dtype=bool) if ftype in ['arc','tilt']: return good_exp & (fitstbl['idname'] == 'WAVE,LAMP') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def config_specific_par(self, scifile, inp_par=None): @@ -317,7 +317,7 @@ def config_specific_par(self, scifile, inp_par=None): elif self.get_meta_value(scifile, 'dispname') == 'Grism_#20': par['calibrations']['wavelengths']['reid_arxiv'] = 'not_alfosc_grism20.fits' else: - msgs.warning('not_alfosc.py: YOU NEED TO ADD IN THE WAVELENGTH SOLUTION FOR THIS GRISM') + log.warning('not_alfosc.py: YOU NEED TO ADD IN THE WAVELENGTH SOLUTION FOR THIS GRISM') # Return return par diff --git a/pypeit/spectrographs/ntt_efosc2.py b/pypeit/spectrographs/ntt_efosc2.py index 8bd1103632..4236a1c701 100644 --- a/pypeit/spectrographs/ntt_efosc2.py +++ b/pypeit/spectrographs/ntt_efosc2.py @@ -5,7 +5,7 @@ """ import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit.core import parse @@ -352,7 +352,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp & ((fitstbl['target'] == 'WAVE')) - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def bpm(self, filename, det, shape=None, msbias=None): @@ -386,7 +386,7 @@ def bpm(self, filename, det, shape=None, msbias=None): # Call the base-class method to generate the empty bpm bpm_img = super().bpm(filename, det, shape=shape, msbias=msbias) - msgs.info("Using hard-coded BPM for NTT EFOSC2") + log.info("Using hard-coded BPM for NTT EFOSC2") binning = self.get_meta_value(filename, 'binning') binspatial = int(binning[0]) binspec = int(binning[2]) diff --git a/pypeit/spectrographs/opticalmodel.py b/pypeit/spectrographs/opticalmodel.py index 13a2c832c1..50241760e1 100644 --- a/pypeit/spectrographs/opticalmodel.py +++ b/pypeit/spectrographs/opticalmodel.py @@ -9,7 +9,7 @@ import numpy import scipy -from pypeit import msgs +from pypeit import log from pypeit import PypeItError # ---------------------------------------------------------------------- @@ -143,7 +143,7 @@ def reflect(self, r, nslits, wave=None, order=1): if wave is None and self.central_wave is None: raise PypeItError('Must define a wavelength for the calculation.') if wave is None: - msgs.info('Using central wavelength for calculation.') + log.info('Using central wavelength for calculation.') _wave = numpy.array([self.central_wave]) if wave is None else numpy.atleast_1d(wave) if _wave.ndim > 1: raise NotImplementedError('Input wavelength must be one number or a vector.') diff --git a/pypeit/spectrographs/p200_dbsp.py b/pypeit/spectrographs/p200_dbsp.py index 5b87a7e879..7f21fd5dfd 100644 --- a/pypeit/spectrographs/p200_dbsp.py +++ b/pypeit/spectrographs/p200_dbsp.py @@ -12,7 +12,7 @@ from astropy import units as u from astropy.time import Time -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import io from pypeit import telescopes @@ -81,7 +81,7 @@ def compound_meta(self, headarr: List[fits.Header], meta_key: str): try: return Angle(headarr[0]['ANGLE'].lower()).deg except Exception as e: - msgs.warning("Could not read dispangle from header:\n" + str(headarr[0]['ANGLE'])) + log.warning("Could not read dispangle from header:\n" + str(headarr[0]['ANGLE'])) raise e else: return None @@ -164,7 +164,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return np.zeros(len(fitstbl), dtype=bool) if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['lampstat01'] != '0000000') & (fitstbl['idname'] == 'cal') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def get_rawimage(self, raw_file, det): @@ -381,7 +381,7 @@ def config_specific_par(self, scifile, inp_par=None): # blue wavelength coverage with a 1200 lines/mm grating is about 1550 A diff = np.abs(best_wv - cen_wv_AA) if diff > 775: - msgs.warning("Closest matching archived wavelength solutions" + log.warning("Closest matching archived wavelength solutions" f"differs in central wavelength by {diff:4.0f} A. The" "wavelength solution may be unreliable. If wavelength" "calibration fails, try using the holy grail method by" @@ -391,7 +391,7 @@ def config_specific_par(self, scifile, inp_par=None): "\t\tmethod = holy-grail") par['calibrations']['wavelengths']['reid_arxiv'] = reids[best_wv] except KeyError: - msgs.warning("Your grating " + grating + " doesn't have a template spectrum for the blue arm of DBSP.") + log.warning("Your grating " + grating + " doesn't have a template spectrum for the blue arm of DBSP.") else: if grating == '600/4000' and dichroic == 'D55': par['calibrations']['wavelengths']['reid_arxiv'] = 'p200_dbsp_blue_600_4000_d55.fits' @@ -400,7 +400,7 @@ def config_specific_par(self, scifile, inp_par=None): elif grating == '300/3990' and dichroic == 'D55': par['calibrations']['wavelengths']['reid_arxiv'] = 'p200_dbsp_blue_300_3990_d55.fits' else: - msgs.warning("Your grating " + grating + " doesn't have a template spectrum for the blue arm of DBSP.") + log.warning("Your grating " + grating + " doesn't have a template spectrum for the blue arm of DBSP.") return par @@ -611,7 +611,7 @@ def config_specific_par(self, scifile, inp_par=None): # red wavelength coverage with a 1200 lines/mm grating is about 1600 A diff = np.abs(best_wv - cen_wv_AA) if diff > 800: - msgs.warning("Closest matching archived wavelength solutions" + log.warning("Closest matching archived wavelength solutions" f"differs in central wavelength by {diff:4.0f} A. The" "wavelength solution may be unreliable. If wavelength" "calibration fails, try using the holy grail method by" @@ -621,14 +621,14 @@ def config_specific_par(self, scifile, inp_par=None): "\t\tmethod = holy-grail") par['calibrations']['wavelengths']['reid_arxiv'] = reids[best_wv] except KeyError: - msgs.warning("Your grating " + grating + " doesn't have a template spectrum for the red arm of DBSP.") + log.warning("Your grating " + grating + " doesn't have a template spectrum for the red arm of DBSP.") else: if grating == '316/7500' and dichroic == 'D55': par['calibrations']['wavelengths']['reid_arxiv'] = 'p200_dbsp_red_316_7500_d55.fits' elif grating == '600/10000' and dichroic == 'D55': par['calibrations']['wavelengths']['reid_arxiv'] = 'p200_dbsp_red_600_10000_d55.fits' else: - msgs.warning("Your grating " + grating + " doesn't have a template spectrum for the red arm of DBSP.") + log.warning("Your grating " + grating + " doesn't have a template spectrum for the red arm of DBSP.") return par @@ -648,7 +648,7 @@ def bpm(self, filename, det, shape=None, msbias=None): bpix : ndarray 0 = ok; 1 = Mask """ - msgs.info("Custom bad pixel mask for DBSPr") + log.info("Custom bad pixel mask for DBSPr") bpm_img = self.empty_bpm(filename, det, shape=shape) # Fill in bad pixels if a processed bias frame is provided diff --git a/pypeit/spectrographs/p200_ngps.py b/pypeit/spectrographs/p200_ngps.py index d6d193de57..fe6a2ebe90 100644 --- a/pypeit/spectrographs/p200_ngps.py +++ b/pypeit/spectrographs/p200_ngps.py @@ -10,7 +10,7 @@ from astropy.io import fits from astropy.time import Time -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch @@ -147,7 +147,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & (fitstbl['idname'] == 'THAR') # Temporary fix, do not use FEAR arcs - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/spectrographs/p200_tspec.py b/pypeit/spectrographs/p200_tspec.py index ee275717ee..2066ae7d54 100644 --- a/pypeit/spectrographs/p200_tspec.py +++ b/pypeit/spectrographs/p200_tspec.py @@ -7,7 +7,7 @@ from astropy.time import Time -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch @@ -305,7 +305,7 @@ def bpm(self, filename, det, shape=None, msbias=None): 0. """ # Call the base-class method to generate the empty bpm - msgs.info("Custom bad pixel mask for TSPEC") + log.info("Custom bad pixel mask for TSPEC") return super().bpm(filename, det, shape=shape, msbias=None) @property diff --git a/pypeit/spectrographs/shane_kast.py b/pypeit/spectrographs/shane_kast.py index 51d152a0e0..87c5166c6e 100644 --- a/pypeit/spectrographs/shane_kast.py +++ b/pypeit/spectrographs/shane_kast.py @@ -10,7 +10,7 @@ from astropy.time import Time -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch @@ -154,7 +154,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp & self.lamps(fitstbl, 'arcs')# & (fitstbl['target'] == 'Arcs') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def lamps(self, fitstbl, status): @@ -422,7 +422,7 @@ def get_detector_par(self, det, hdu=None): # Allow for reading only Amp 2! if x1_1 < 3: - msgs.warning("Only Amp 2 data was written. Ignoring Amp 1") + log.warning("Only Amp 2 data was written. Ignoring Amp 1") detector_dict['numamplifiers'] = 1 detector_dict['gain'] = np.atleast_1d(detector_dict['gain'][0]) detector_dict['ronoise'] = np.atleast_1d(detector_dict['ronoise'][0]) diff --git a/pypeit/spectrographs/soar_goodman.py b/pypeit/spectrographs/soar_goodman.py index a581e13be7..2ca97d4df7 100644 --- a/pypeit/spectrographs/soar_goodman.py +++ b/pypeit/spectrographs/soar_goodman.py @@ -7,7 +7,7 @@ from astropy.time import Time -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit.core import standard @@ -205,7 +205,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return np.zeros(len(fitstbl), dtype=bool) if ftype in ['arc', 'tilt']: return good_exp & self.lamps(fitstbl, 'arc') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @@ -413,7 +413,7 @@ def bpm(self, filename, det, shape=None, msbias=None): # Call the base-class method to generate the empty bpm bpm_img = super().bpm(filename, det, shape=shape, msbias=msbias) - msgs.info("Using hard-coded BPM for SOAR/Goodman") + log.info("Using hard-coded BPM for SOAR/Goodman") bpm_img[:, 0] = 1 return bpm_img @@ -602,7 +602,7 @@ def bpm(self, filename, det, shape=None, msbias=None): # Call the base-class method to generate the empty bpm bpm_img = super().bpm(filename, det, shape=shape, msbias=msbias) - msgs.info("Using hard-coded BPM for SOAR/Goodman") + log.info("Using hard-coded BPM for SOAR/Goodman") bpm_img[:, 0] = 1 return bpm_img diff --git a/pypeit/spectrographs/spectrograph.py b/pypeit/spectrographs/spectrograph.py index 9c4b60415a..999c7fcbbb 100644 --- a/pypeit/spectrographs/spectrograph.py +++ b/pypeit/spectrographs/spectrograph.py @@ -34,7 +34,7 @@ import numpy as np from astropy.io import fits -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import io from pypeit.core import parse @@ -447,7 +447,7 @@ def subheader_for_spec(self, row_fitstbl, raw_header, extra_header_cards=None, subheader[key] = row_fitstbl[key] except KeyError: # If configuration_key is not in row_fitstbl, warn but move on - msgs.warning(f"Configuration Key: {key} not present in your fitstbl/Header") + log.warning(f"Configuration Key: {key} not present in your fitstbl/Header") # Add a few more for key in ['filename']: # For fluxing subheader[key] = row_fitstbl[key] @@ -639,7 +639,7 @@ def bpm(self, filename, det, shape=None, msbias=None): # Not using bias to identify bad pixels, so we're done return bpm_img - msgs.info(f'Generating a BPM using bias for det={_det} for {self.name}') + log.info(f'Generating a BPM using bias for det={_det} for {self.name}') return self.bpm_frombias(msbias, bpm_img) def list_detectors(self, mosaic=False): @@ -1436,7 +1436,7 @@ def get_meta_value(self, inp, meta_key, required=False, if required: raise PypeItError("Need to allow for meta_key={} in your meta data".format(meta_key)) else: - msgs.warning("Requested meta data for meta_key={} does not exist...".format(meta_key)) + log.warning("Requested meta data for meta_key={} does not exist...".format(meta_key)) return None # Is this meta required for this frame type (Spectrograph specific) @@ -1468,7 +1468,7 @@ def get_meta_value(self, inp, meta_key, required=False, value = headarr[self.meta[meta_key]['ext']][self.meta[meta_key]['card']] except (KeyError, TypeError) as e: if ignore_bad_header or not required: - msgs.warning(f"Bad Header key ({meta_key}), but we'll try to continue on..") + log.warning(f"Bad Header key ({meta_key}), but we'll try to continue on..") else: raise e @@ -1483,7 +1483,7 @@ def get_meta_value(self, inp, meta_key, required=False, ra, dec = meta.convert_radec(self.get_meta_value(headarr, 'ra', no_fussing=True), self.get_meta_value(headarr, 'dec', no_fussing=True)) except: - msgs.warning('Encounter invalid value of your coordinates. Give zeros for both RA and DEC') + log.warning('Encounter invalid value of your coordinates. Give zeros for both RA and DEC') ra, dec = 0.0, 0.0 value = ra if meta_key == 'ra' else dec @@ -1528,7 +1528,7 @@ def get_meta_value(self, inp, meta_key, required=False, raise PypeItError('Required meta "{0}" did not load!'.format(meta_key) + 'You may have a corrupt header.') else: - msgs.warning('Required card {0} missing '.format(self.meta[meta_key]['card']) + log.warning('Required card {0} missing '.format(self.meta[meta_key]['card']) + 'from your header. Proceeding with risk...') return None @@ -1564,7 +1564,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): Returns: `astropy.wcs.WCS`_: The world-coordinate system. """ - msgs.warning("No WCS setup for spectrograph: {0:s}".format(self.name)) + log.warning("No WCS setup for spectrograph: {0:s}".format(self.name)) return None def get_datacube_bins(self, slitlength, minmax, num_wave): @@ -1589,7 +1589,7 @@ def get_datacube_bins(self, slitlength, minmax, num_wave): when constructing a histogram of the spec2d files. The elements are :math:`(x,y,\lambda)`. """ - msgs.warning("No datacube setup for spectrograph: {0:s}".format(self.name)) + log.warning("No datacube setup for spectrograph: {0:s}".format(self.name)) return None def fit_2d_det_response(self, det_resp, gpmask): @@ -1605,7 +1605,7 @@ def fit_2d_det_response(self, det_resp, gpmask): Returns: `numpy.ndarray`_: A model fit to the detector response. """ - msgs.warning("2D detector response is not implemented for spectrograph: {0:s}".format(self.name)) + log.warning("2D detector response is not implemented for spectrograph: {0:s}".format(self.name)) return np.ones_like(det_resp) def validate_metadata(self): @@ -1672,7 +1672,7 @@ def get_headarr(self, inp, strict=True): if strict: raise PypeItError(f'Cannot open {inp}.') else: - msgs.warning(f'Cannot open {inp}. Proceeding, but consider removing this file!') + log.warning(f'Cannot open {inp}. Proceeding, but consider removing this file!') return None elif isinstance(inp, (list, fits.HDUList)): # TODO: If a list, check that the list elements are HDUs? @@ -1735,9 +1735,9 @@ def vet_assigned_ftypes(self, type_bits, fitstbl): indx = fitstbl.type_bitmask.flagged(type_bits, flag='standard') & \ fitstbl.type_bitmask.flagged(type_bits, flag='science') if np.any(indx): - msgs.warning('Some frames are assigned both science and standard types. Choosing the most likely type.') + log.warning('Some frames are assigned both science and standard types. Choosing the most likely type.') if 'ra' not in fitstbl.keys() or 'dec' not in fitstbl.keys(): - msgs.warning('Sky coordinates are not available. Standard stars cannot be identified.') + log.warning('Sky coordinates are not available. Standard stars cannot be identified.') # turn off the standard flag for all frames type_bits[indx] = fitstbl.type_bitmask.turn_off(type_bits[indx], flag='standard') return type_bits @@ -1745,9 +1745,9 @@ def vet_assigned_ftypes(self, type_bits, fitstbl): none_coords = indx & ((fitstbl['ra'] == 'None') | (fitstbl['dec'] == 'None') | np.isnan(fitstbl['ra']) | np.isnan(fitstbl['dec'])) if np.any(none_coords): - msgs.warning('The following frames have None coordinates. ' + log.warning('The following frames have None coordinates. ' 'They could be a twilight flat frame that was missed by the automatic identification') - [msgs.warning(f' {f}') for f in fitstbl['filename'][none_coords]] + [log.warning(f' {f}') for f in fitstbl['filename'][none_coords]] # turn off the standard star flag for these frames type_bits[none_coords] = fitstbl.type_bitmask.turn_off(type_bits[none_coords], flag='standard') @@ -2009,7 +2009,7 @@ def tweak_standard(self, wave_in, counts_in, counts_ivar_in, gpm_in, meta_table, trim_gpm[s:-e] = True # Set the wave, counts, gpm, inverse variance and log10 blaze to zero in the masked pixels - msgs.info('Trimming standard star spectrum by {:d} pixels at the start and {:d} pixels at the end.'.format(s, e)) + log.info('Trimming standard star spectrum by {:d} pixels at the start and {:d} pixels at the end.'.format(s, e)) wave_out = wave_out* trim_gpm counts_out = counts_out * trim_gpm counts_ivar_out = counts_ivar_out * trim_gpm @@ -2045,7 +2045,7 @@ def calc_pattern_freq(self, frame, rawdatasec_img, oscansec_img, hdu): patt_freqs : :obj:`list` List of pattern frequencies. """ - msgs.warning(f"Pattern noise removal is not implemented for spectrograph {self.name}") + log.warning(f"Pattern noise removal is not implemented for spectrograph {self.name}") return [] def scattered_light_archive(self, binning, dispname): @@ -2070,7 +2070,7 @@ def scattered_light_archive(self, binning, dispname): # Grab the binning for convenience specbin, spatbin = parse.parse_binning(binning) - msgs.warning(f"Initial scattered light model parameters have not been setup for grating {dispname} of {self.name}") + log.warning(f"Initial scattered light model parameters have not been setup for grating {dispname} of {self.name}") x0 = np.array([200/specbin, 100/spatbin, # Gaussian kernel widths 200/specbin, 100/spatbin, # Lorentzian kernel widths 0.0/specbin, 0.0/spatbin, # pixel offsets diff --git a/pypeit/spectrographs/subaru_focas.py b/pypeit/spectrographs/subaru_focas.py index 571f847242..85bf06204d 100644 --- a/pypeit/spectrographs/subaru_focas.py +++ b/pypeit/spectrographs/subaru_focas.py @@ -4,7 +4,7 @@ .. include:: ../include/links.rst """ import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit.core import parse @@ -177,7 +177,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['idname'] == 'COMPARISON') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @@ -537,7 +537,7 @@ def get_rawimage(self, raw_file, det): ]) # Read image - msgs.info(f'Attempting to read FOCAS file: {raw_file}, det={det}') + log.info(f'Attempting to read FOCAS file: {raw_file}, det={det}') # NOTE: io.fits_open checks that the file exists hdu_l = io.fits_open(raw_file) diff --git a/pypeit/spectrographs/tng_dolores.py b/pypeit/spectrographs/tng_dolores.py index 4f61ea5d8d..9b6ab8be4e 100644 --- a/pypeit/spectrographs/tng_dolores.py +++ b/pypeit/spectrographs/tng_dolores.py @@ -8,7 +8,7 @@ from astropy.time import Time -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch @@ -126,7 +126,7 @@ def config_specific_par(self, scifile, inp_par=None): par['calibrations']['wavelengths']['lamps'] = ['NeI', 'HgI'] else: par['calibrations']['wavelengths']['method'] = 'holy-grail' - msgs.warning('Check wavelength calibration file.') + log.warning('Check wavelength calibration file.') # Return return par @@ -249,7 +249,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['idname'] == 'CALIB') & ( (fitstbl['lampstat01'] == 'Ne+Hg') | (fitstbl['lampstat01'] == 'Helium') ) \ & (fitstbl['dispname'] != 'OPEN') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/spectrographs/util.py b/pypeit/spectrographs/util.py index 59fb53e229..16313de7cd 100644 --- a/pypeit/spectrographs/util.py +++ b/pypeit/spectrographs/util.py @@ -10,7 +10,7 @@ from astropy.io import fits from pypeit import spectrographs -from pypeit import msgs +from pypeit import log from pypeit import PypeItError diff --git a/pypeit/spectrographs/vlt_fors.py b/pypeit/spectrographs/vlt_fors.py index 768b47d374..846f8e6766 100644 --- a/pypeit/spectrographs/vlt_fors.py +++ b/pypeit/spectrographs/vlt_fors.py @@ -4,7 +4,7 @@ .. include:: ../include/links.rst """ import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit.core import parse @@ -204,7 +204,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return good_exp & ((fitstbl['target'] == 'LAMP,WAVE') | (fitstbl['target'] == 'WAVE,LAMP')) - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @@ -437,7 +437,7 @@ def parse_dither_pattern(self, file_list, ext=None): ra, dec = meta.convert_radec(self.get_meta_value(hdr, 'ra', no_fussing=True), self.get_meta_value(hdr, 'dec', no_fussing=True)) except: - msgs.warning('Encounter invalid value of your coordinates. Give zeros for both RA and DEC. Check that this does not cause problems with the offsets') + log.warning('Encounter invalid value of your coordinates. Give zeros for both RA and DEC. Check that this does not cause problems with the offsets') ra, dec = 0.0, 0.0 if ifile == 0: coord_ref = SkyCoord(ra*units.deg, dec*units.deg) diff --git a/pypeit/spectrographs/vlt_sinfoni.py b/pypeit/spectrographs/vlt_sinfoni.py index 8a5717cc87..9c25195f4e 100644 --- a/pypeit/spectrographs/vlt_sinfoni.py +++ b/pypeit/spectrographs/vlt_sinfoni.py @@ -8,7 +8,7 @@ import numpy as np from astropy.io import fits -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch @@ -314,7 +314,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['sky']: return good_exp & (fitstbl['idname'] == 'SINFONI_IFS_SKY') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/spectrographs/vlt_xshooter.py b/pypeit/spectrographs/vlt_xshooter.py index 461292b524..c4f27067d5 100644 --- a/pypeit/spectrographs/vlt_xshooter.py +++ b/pypeit/spectrographs/vlt_xshooter.py @@ -8,7 +8,7 @@ from astropy.coordinates import SkyCoord from astropy import units -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit import io @@ -194,7 +194,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['target'] == 'LAMP,WAVE') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @@ -427,7 +427,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): if ftype in ['arc', 'tilt']: return good_exp & ((fitstbl['target'] == 'LAMP,WAVE') | (fitstbl['target'] == 'SCIENCE')) - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) def bpm(self, filename, det, shape=None, msbias=None): diff --git a/pypeit/spectrographs/wht_isis.py b/pypeit/spectrographs/wht_isis.py index 7292356a34..0ca7f9d4d8 100644 --- a/pypeit/spectrographs/wht_isis.py +++ b/pypeit/spectrographs/wht_isis.py @@ -5,7 +5,7 @@ """ import numpy as np -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import telescopes from pypeit.core import framematch @@ -263,7 +263,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return np.zeros(len(fitstbl), dtype=bool) if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['lampstat01'] == 'CuNe+CuAr') & (fitstbl['idname'] == 'arc') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) @@ -409,7 +409,7 @@ def check_frame_type(self, ftype, fitstbl, exprng=None): return np.zeros(len(fitstbl), dtype=bool) if ftype in ['arc', 'tilt']: return good_exp & (fitstbl['lampstat01'] == 'CuNe+CuAr') & (fitstbl['idname'] == 'arc') - msgs.debug('Cannot determine if frames are of type {0}.'.format(ftype)) + log.debug('Cannot determine if frames are of type {0}.'.format(ftype)) return np.zeros(len(fitstbl), dtype=bool) diff --git a/pypeit/specutils/pypeit_loaders.py b/pypeit/specutils/pypeit_loaders.py index acb8e1caa5..e6015b24fa 100644 --- a/pypeit/specutils/pypeit_loaders.py +++ b/pypeit/specutils/pypeit_loaders.py @@ -33,7 +33,7 @@ from pypeit import __version__ from pypeit import PypeItError -from pypeit import msgs +from pypeit import log from pypeit import specobjs from pypeit import onespec from pypeit import utils @@ -84,7 +84,7 @@ def _enforce_monotonic_wavelengths(wave, flux, ivar, strict=True): 'error in the data reduction!') # Wavelengths are not monotonic, but the user wants to keep going. - msgs.warning('Wavelengths are not monotonically increasing! Strict was set to False, so ' + log.warning('Wavelengths are not monotonically increasing! Strict was set to False, so ' 'measurements after a negative step in wavelength are removed from the constructed ' 'spectrum. BEWARE that this is likely the result of an error in the data ' 'reduction!') @@ -204,9 +204,9 @@ def pypeit_spec1d_loader(filename, extract=None, fluxed=True, strict=True, chk_v _wave, _flux, _ivar, _gpm = sobj.get_box_ext(fluxed=_cal) if _ext == 'BOX' \ else sobj.get_opt_ext(fluxed=_cal) if not np.all(_gpm): - msgs.warning(f'Ignoring {np.sum(np.logical_not(_gpm))} masked pixels.') + log.warning(f'Ignoring {np.sum(np.logical_not(_gpm))} masked pixels.') if not np.any(_gpm): - msgs.warning(f'Spectrum {sobj.NAME} is fully masked and will be ignored!') + log.warning(f'Spectrum {sobj.NAME} is fully masked and will be ignored!') continue _wave, _flux, _ivar = _enforce_monotonic_wavelengths(_wave[_gpm], _flux[_gpm], _ivar[_gpm], strict=strict) diff --git a/pypeit/tests/test_collate_1d.py b/pypeit/tests/test_collate_1d.py index 3919c0e006..a592d119d6 100644 --- a/pypeit/tests/test_collate_1d.py +++ b/pypeit/tests/test_collate_1d.py @@ -422,7 +422,7 @@ def test_exclude_source_objects(monkeypatch): par = pypeitpar.PypeItPar() par['collate1d']['exclude_serendip'] = True par['collate1d']['wv_rms_thresh'] = 0.1 - filtered_list, excluded_msgs = exclude_source_objects(uncollated_list, {'3003': 'Test Exclude`'}, par) + filtered_list, excluded_log = exclude_source_objects(uncollated_list, {'3003': 'Test Exclude`'}, par) assert [so.spec_obj_list[0].NAME for so in filtered_list] == ['SPAT5334_SLIT4934_DET02'] assert [so.spec1d_file_list[0] for so in filtered_list] == ['spec1d_file1'] @@ -430,7 +430,7 @@ def test_exclude_source_objects(monkeypatch): par['coadd1d']['ex_value'] = 'BOX' par['collate1d']['wv_rms_thresh'] = None - filtered_list, excluded_msgs = exclude_source_objects(uncollated_list, dict(), par) + filtered_list, excluded_log = exclude_source_objects(uncollated_list, dict(), par) assert [so.spec_obj_list[0].NAME for so in filtered_list] == ['SPAT1233_SLIT1235_DET07', 'SPAT6256_SLIT6245_DET05', 'SPAT6934_SLIT6245_DET05'] assert [so.spec1d_file_list[0] for so in filtered_list] == ['spec1d_file1', 'spec1d_file2', 'spec1d_file2' ] @@ -648,14 +648,14 @@ def mock_geomotion_correct(*args, **kwargs): spectrograph = load_spectrograph('keck_deimos') # Test that should fail due to no RA/DEC nor mjd in header - spec1d_failure_msgs = [] + spec1d_failure_log = [] spec1d_files = ["spec1d_file3"] - refframe_correction(par, spectrograph, spec1d_files, spec1d_failure_msgs) - assert len(spec1d_failure_msgs) == 1 - assert spec1d_failure_msgs[0].startswith('Failed to perform heliocentric correction on spec1d_file3') + refframe_correction(par, spectrograph, spec1d_files, spec1d_failure_log) + assert len(spec1d_failure_log) == 1 + assert spec1d_failure_log[0].startswith('Failed to perform heliocentric correction on spec1d_file3') # Test where onf of the SpecObjs already has a VEL_CORR that should not be overwritten - spec1d_failure_msgs = [] + spec1d_failure_log = [] spec1d_files = ["spec1d_file4"] # Test where one VEL_CORR is already set, and the SpecObj objects have no RA/DEC so the header RA/DEC must be used instead @@ -664,9 +664,9 @@ def mock_from_fitsfile(*args, **kwargs): return sobjs monkeypatch.setattr(specobjs.SpecObjs, "from_fitsfile", mock_from_fitsfile) - refframe_correction(par, spectrograph, spec1d_files, spec1d_failure_msgs) - assert len(spec1d_failure_msgs) == 1 - assert spec1d_failure_msgs[0].startswith('Not performing heliocentric correction for spec1d_file4 object SPAT3234_SLIT3236_DET03 because it has already been corrected') + refframe_correction(par, spectrograph, spec1d_files, spec1d_failure_log) + assert len(spec1d_failure_log) == 1 + assert spec1d_failure_log[0].startswith('Not performing heliocentric correction for spec1d_file4 object SPAT3234_SLIT3236_DET03 because it has already been corrected') assert sobjs[0].VEL_CORR == 2.0 # Original value, should not have been overwritten assert sobjs[1].VEL_CORR == 1.0 # New value, from apply_helio diff --git a/pypeit/tests/test_qa.py b/pypeit/tests/test_qa.py index 7834e41031..4b64c5aea3 100644 --- a/pypeit/tests/test_qa.py +++ b/pypeit/tests/test_qa.py @@ -1,7 +1,7 @@ """ Module to run tests on arqa """ -from pypeit import msgs +from pypeit import log from pypeit.core import qa def test_get_dimen(): diff --git a/pypeit/tests/test_specobj.py b/pypeit/tests/test_specobj.py index 753f01cecc..4dedf4d7e4 100644 --- a/pypeit/tests/test_specobj.py +++ b/pypeit/tests/test_specobj.py @@ -14,7 +14,7 @@ from pypeit import specobj from pypeit.tests.tstutils import data_output_path -from pypeit import msgs +from pypeit import log def test_init(): diff --git a/pypeit/tests/test_utils.py b/pypeit/tests/test_utils.py index fc34d84e6c..994719904a 100644 --- a/pypeit/tests/test_utils.py +++ b/pypeit/tests/test_utils.py @@ -10,7 +10,7 @@ import numpy as np from pypeit import utils -from pypeit import msgs +from pypeit import log from pypeit.tests.tstutils import data_output_path from pypeit import io diff --git a/pypeit/tracepca.py b/pypeit/tracepca.py index 758b8c3b99..ef094794e5 100644 --- a/pypeit/tracepca.py +++ b/pypeit/tracepca.py @@ -13,7 +13,7 @@ from astropy.io import fits -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit import utils from pypeit.io import hdu_iter_by_ext @@ -407,7 +407,7 @@ def pca_trace_object(trace_cen, order=None, trace_bpm=None, min_length=0.6, npca _order = np.clip(order - np.arange(cenpca.npca), 1, None).astype(int) if _order.size != cenpca.npca: raise PypeItError('Number of polynomial orders does not match the number of PCA components.') - msgs.info('Order of function fit to each component: {0}'.format(_order)) + log.info('Order of function fit to each component: {0}'.format(_order)) # Apply a 10% relative error to each coefficient. This performs # better than use_mad, since larger coefficients will always be diff --git a/pypeit/utils.py b/pypeit/utils.py index 836a7ec866..1e043bde0d 100644 --- a/pypeit/utils.py +++ b/pypeit/utils.py @@ -29,7 +29,7 @@ from astropy import stats from astropy.io import ascii -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit.move_median import move_median from pypeit import dataPaths @@ -764,7 +764,7 @@ def boxcar_smooth_rows(img, nave, wgt=None, mode='nearest', replace='original'): if wgt is not None and img.shape != wgt.shape: raise ValueError('Input image to smooth and weights must have the same shape.') if nave > img.shape[0]: - msgs.warning('Smoothing box is larger than the image size!') + log.warning('Smoothing box is larger than the image size!') # Construct the kernel for mean calculation _nave = np.fmin(nave, img.shape[0]) @@ -951,7 +951,7 @@ def rebinND(img, shape): rem0, rem1 = img.shape[0] % shape[0], img.shape[1] % shape[1] if rem0 != 0 or rem1 != 0: # In this case, the shapes are not an integer multiple... need to slice - msgs.warning("Input image shape is not an integer multiple of the requested shape. Flux is not conserved.") + log.warning("Input image shape is not an integer multiple of the requested shape. Flux is not conserved.") return rebin_slice(img, shape) # Convert input 2D image into a 4D array to make the rebinning easier sh = shape[0], img.shape[0] // shape[0], shape[1], img.shape[1] // shape[1] @@ -1113,9 +1113,9 @@ def fast_running_median(seq, window_size): # upon return (very bad). Added by JFH. Should we print out an error here? if (window_size > (len(seq) - 1)): - msgs.warning('window_size > len(seq)-1. Truncating window_size to len(seq)-1, but something is probably wrong....') + log.warning('window_size > len(seq)-1. Truncating window_size to len(seq)-1, but something is probably wrong....') if (window_size < 0): - msgs.warning( + log.warning( 'window_size is negative. This does not make sense something is probably wrong. Setting window size to 1') window_size = int(np.fmax(np.fmin(int(window_size), len(seq) - 1), 1)) @@ -1212,7 +1212,7 @@ def clip_ivar(flux, ivar, sn_clip, gpm=None, verbose=False): return ivar if verbose: - msgs.info('Inflating errors to keep S/N ratio below S/N_clip = {:5.3f}'.format(sn_clip)) + log.info('Inflating errors to keep S/N ratio below S/N_clip = {:5.3f}'.format(sn_clip)) _gpm = ivar > 0. if gpm is not None: @@ -1435,7 +1435,7 @@ def replace_bad(frame, bpm): if frame.shape != bpm.shape: raise PypeItError("Input frame and BPM have different shapes") # Replace bad pixels with the nearest (good) neighbour - msgs.info("Replacing bad pixels") + log.info("Replacing bad pixels") ind = scipy.ndimage.distance_transform_edt(bpm, return_distances=False, return_indices=True) return frame[tuple(ind)] @@ -1644,7 +1644,7 @@ def save_pickle(fname, obj): fname += '.pkl' with open(fname, 'wb') as f: pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) - msgs.info('File saved: {0:s}'.format(fname)) + log.info('File saved: {0:s}'.format(fname)) def load_pickle(fname): @@ -1660,7 +1660,7 @@ def load_pickle(fname): :class:`object` An object suitable for pickle serialization. """ - msgs.info('Loading file: {0:s}'.format(fname)) + log.info('Loading file: {0:s}'.format(fname)) with open(fname, 'rb') as f: return pickle.load(f) @@ -1945,7 +1945,7 @@ def find_single_file(file_pattern, required: bool=False) -> pathlib.Path: """ files = sorted(glob.glob(file_pattern)) if len(files) > 1: - msgs.warning(f'Found multiple files matching {file_pattern}; using {files[0]}') + log.warning(f'Found multiple files matching {file_pattern}; using {files[0]}') if len(files) == 0 and required: raise PypeItError(f'No files matching pattern: {file_pattern}') return None if len(files) == 0 else pathlib.Path(files[0]) diff --git a/pypeit/wavecalib.py b/pypeit/wavecalib.py index f46aefec08..f8c4d243f5 100644 --- a/pypeit/wavecalib.py +++ b/pypeit/wavecalib.py @@ -14,7 +14,7 @@ from astropy.table import Table from astropy.io import fits -from pypeit import msgs +from pypeit import log from pypeit import PypeItError from pypeit.core import arc, qa from pypeit.core import fitting @@ -627,7 +627,7 @@ def build_wv_calib(self, arccen, method, skip_QA=False, # print to screen the slit widths if maskdef_designtab is available if self.slits.maskdef_designtab is not None: - msgs.info("Slit widths (arcsec): {}".format(np.round(self.slits.maskdef_designtab['SLITWID'].data, 2))) + log.info("Slit widths (arcsec): {}".format(np.round(self.slits.maskdef_designtab['SLITWID'].data, 2))) # Generate a map of the instrumental spectral FWHM # TODO nsample should be a parameter @@ -659,7 +659,7 @@ def build_wv_calib(self, arccen, method, skip_QA=False, raise NotImplementedError('method = identify not yet implemented') final_fit = {} # Manually identify lines - msgs.info("Initializing the wavelength calibration tool") + log.info("Initializing the wavelength calibration tool") #embed(header='line 222 wavecalib.py') for slit_idx in ok_mask_idx: arcfitter = Identify.initialise(arccen, self.lamps, self.slits, slit=slit_idx, par=self.par) @@ -709,7 +709,7 @@ def build_wv_calib(self, arccen, method, skip_QA=False, angle_fits_file, composite_arc_file = self.spectrograph.get_echelle_angle_files() # Identify the echelle orders - msgs.info("Finding the echelle orders") + log.info("Finding the echelle orders") order_vec, wave_soln_arxiv, arcspec_arxiv = echelle.identify_ech_orders( arccen, self.meta_dict['echangle'], self.meta_dict['xdangle'], @@ -720,7 +720,7 @@ def build_wv_calib(self, arccen, method, skip_QA=False, cc_percent_ceil = self.par['cc_percent_ceil'], debug=False) # Put the order numbers in the slit object self.slits.ech_order = order_vec - msgs.info(f"The observation covers the following orders: {order_vec}") + log.info(f"The observation covers the following orders: {order_vec}") patt_dict, final_fit = autoid.echelle_wvcalib( arccen, order_vec, arcspec_arxiv, wave_soln_arxiv, @@ -792,7 +792,7 @@ def build_wv_calib(self, arccen, method, skip_QA=False, if not skip_QA: ok_mask_idx = np.where(np.invert(self.wvc_bpm))[0] for slit_idx in ok_mask_idx: - msgs.info(f"Preparing wavelength calibration QA for slit {slit_idx+1}/{self.slits.nslits}") + log.info(f"Preparing wavelength calibration QA for slit {slit_idx+1}/{self.slits.nslits}") # Obtain the output QA name for the wavelength solution outfile = qa.set_qa_filename( self.wv_calib.calib_key, 'arc_fit_qa', @@ -843,11 +843,11 @@ def redo_echelle_orders(self, bad_orders:np.ndarray, dets:np.ndarray, order_dets in_det = np.in1d(bad_orders, order_dets[idet]) if not np.any(in_det): continue - msgs.info(f"Attempting to refit bad orders in detector={dets[idet]}") + log.info(f"Attempting to refit bad orders in detector={dets[idet]}") # Are there few enough? max_bad = int(len(order_dets[idet])*bad_orders_maxfrac) if np.sum(in_det) > max_bad: - msgs.warning(f"Too many bad orders in detector={dets[idet]} to attempt a refit.") + log.warning(f"Too many bad orders in detector={dets[idet]} to attempt a refit.") continue # Loop for order in bad_orders[in_det]: @@ -872,7 +872,7 @@ def redo_echelle_orders(self, bad_orders:np.ndarray, dets:np.ndarray, order_dets fwhm=fwhm) if not patt_dict_slit['acceptable']: - msgs.warning(f"Order {order} is still not acceptable after attempt to reidentify.") + log.warning(f"Order {order} is still not acceptable after attempt to reidentify.") continue # Fit me -- RMS may be too high again @@ -886,11 +886,11 @@ def redo_echelle_orders(self, bad_orders:np.ndarray, dets:np.ndarray, order_dets sigrej_first=self.par['sigrej_first'], n_final=n_final, sigrej_final=self.par['sigrej_final']) - msgs.info(f"New RMS for redo of order={order}: {final_fit['rms']}") + log.info(f"New RMS for redo of order={order}: {final_fit['rms']}") # Keep? if final_fit['rms'] < frac_rms_thresh*wave_rms_thresh: - msgs.info('Updating wavelength solution.') + log.info('Updating wavelength solution.') # TODO -- This is repeated from build_wv_calib() # Would be nice to consolidate # QA @@ -910,7 +910,7 @@ def redo_echelle_orders(self, bad_orders:np.ndarray, dets:np.ndarray, order_dets self.wvc_bpm[iord] = False fixed = True else: - msgs.warning(f'New RMS is too high (>{frac_rms_thresh}xRMS threshold). ' + log.warning(f'New RMS is too high (>{frac_rms_thresh}xRMS threshold). ' f'Not updating wavelength solution.') # return fixed @@ -945,7 +945,7 @@ def echelle_2dfit(self, wv_calib, debug=False, skip_QA=False): if self.spectrograph.pypeline != 'Echelle': raise PypeItError('Cannot execute echelle_2dfit for a non-echelle spectrograph.') - msgs.info('Fitting 2-d wavelength solution for echelle....') + log.info('Fitting 2-d wavelength solution for echelle....') # Obtain a list of good slits ok_mask_idx = np.where(np.logical_not(self.wvc_bpm))[0] @@ -967,7 +967,7 @@ def echelle_2dfit(self, wv_calib, debug=False, skip_QA=False): save_order_dets = [] for idet in dets: order_in_dets = [] - msgs.info('Fitting detector {:d}'.format(idet)) + log.info('Fitting detector {:d}'.format(idet)) # Init all_wave = np.array([], dtype=float) all_pixel = np.array([],dtype=float) @@ -1004,7 +1004,7 @@ def echelle_2dfit(self, wv_calib, debug=False, skip_QA=False): # Fit if len(all_order) < 2: - msgs.warning(f"Fewer than 2 orders to fit for detector {idet}. Skipping") + log.warning(f"Fewer than 2 orders to fit for detector {idet}. Skipping") save_order_dets.append([]) # Add a dummy fit fit2ds.append(fitting.PypeItFit()) @@ -1022,7 +1022,7 @@ def echelle_2dfit(self, wv_calib, debug=False, skip_QA=False): # QA if not skip_QA: if wv_calib.calib_key is None: - msgs.warning('WaveCalib object provided does not have a defined calibration ' + log.warning('WaveCalib object provided does not have a defined calibration ' 'key. The QA files will not include this key in the file name, ' 'meaning that existing QA files may be overwritten.') calib_key = '' @@ -1144,7 +1144,7 @@ def run(self, skip_QA=False, debug=False, bad_rms = rms > wave_rms_thresh if np.any(bad_rms): self.wvc_bpm[bad_rms] = True - msgs.warning("Masking one or more bad orders (RMS)") + log.warning("Masking one or more bad orders (RMS)") # Fit fit2ds, dets, order_dets = self.echelle_2dfit( self.wv_calib, skip_QA = skip_QA, debug=debug) diff --git a/pypeit/wavemodel.py b/pypeit/wavemodel.py index 4ea3c190e7..3258994d9e 100644 --- a/pypeit/wavemodel.py +++ b/pypeit/wavemodel.py @@ -19,7 +19,7 @@ from astropy.table import Table from astropy import units -from pypeit import msgs +from pypeit import log from pypeit import dataPaths from pypeit.core import arc from pypeit import utils @@ -55,7 +55,7 @@ def blackbody(wavelength, T_BB=250., debug=False): K_BOLTZ = astropy.constants.k_B.cgs.value # erg/K RADIAN_PER_ARCSEC = 1./3600.*np.pi/180. - msgs.info("Creating BB spectrum at T={}K".format(T_BB)) + log.info("Creating BB spectrum at T={}K".format(T_BB)) lam = wavelength / 1e4 # convert wave in cm. blackbody_pol = 2.*PLANCK*np.power(C_LIGHT,2) / np.power(lam,5) blackbody_exp = np.exp(PLANCK*C_LIGHT/(lam*K_BOLTZ*T_BB)) - 1. @@ -65,7 +65,7 @@ def blackbody(wavelength, T_BB=250., debug=False): if debug: utils.pyplot_rcparams() - msgs.info("Plot of the blackbody spectrum.") + log.info("Plot of the blackbody spectrum.") plt.figure() plt.plot(wavelength, blackbody, color='navy', linestyle='-', alpha=0.8, @@ -74,7 +74,7 @@ def blackbody(wavelength, T_BB=250., debug=False): plt.xlabel(r"Wavelength [micron]") plt.ylabel(r"Spectral Radiance") plt.title(r"Planck's law") - msgs.info("Close the Figure to continue.") + log.info("Close the Figure to continue.") plt.show(block=True) plt.close() utils.pyplot_rcparams_default() @@ -119,14 +119,14 @@ def addlines2spec(wavelength, wl_line, fl_line, resolution, # define sigma of the gaussians sigma = wl_line_good / resolution / 2.355 - msgs.info("Creating line spectrum") + log.info("Creating line spectrum") for ii in np.arange(len(wl_line_good)): line_spec += scale_spec*fl_line_good[ii]*\ np.exp(-np.power((wl_line_good[ii]-wavelength),2.)/(2.*np.power(sigma[ii],2.))) if debug: utils.pyplot_rcparams() - msgs.info("Plot of the line spectrum.") + log.info("Plot of the line spectrum.") plt.figure() plt.plot(wavelength, line_spec, color='navy', linestyle='-', alpha=0.8, @@ -134,7 +134,7 @@ def addlines2spec(wavelength, wl_line, fl_line, resolution, plt.legend() plt.xlabel(r'Wavelength') plt.ylabel(r'Flux') - msgs.info("Close the Figure to continue.") + log.info("Close the Figure to continue.") plt.show(block=True) plt.close() utils.pyplot_rcparams_default() @@ -152,7 +152,7 @@ def oh_lines(): amplitude : `numpy.ndarray`_ Amplitude of the OH lines. """ - msgs.info("Reading in the Rousselot (2000) OH line list") + log.info("Reading in the Rousselot (2000) OH line list") oh = np.loadtxt(dataPaths.skisim.get_file_path('rousselot2000.dat'), usecols=(0, 1)) return oh[:,0]/10000., oh[:,1] # wave converted to microns @@ -175,7 +175,7 @@ def transparency(wavelength, debug=False): fully transparent and 0. fully opaque """ - msgs.info("Reading in the atmospheric transmission model") + log.info("Reading in the atmospheric transmission model") transparency = np.loadtxt(dataPaths.skisim.get_file_path('atm_transmission_secz1.5_1.6mm.dat')) wave_mod = transparency[:,0] tran_mod = transparency[:,1] @@ -198,7 +198,7 @@ def transparency(wavelength, debug=False): if debug: utils.pyplot_rcparams() - msgs.info("Plot of the sky transmission template") + log.info("Plot of the sky transmission template") plt.figure() plt.plot(wave_mod, tran_mod, color='navy', linestyle='-', alpha=0.8, @@ -210,7 +210,7 @@ def transparency(wavelength, debug=False): plt.xlabel(r'Wavelength [microns]') plt.ylabel(r'Transmission') plt.title(r' IR Transmission Spectra ') - msgs.info("Close the Figure to continue.") + log.info("Close the Figure to continue.") plt.show(block=True) plt.close() utils.pyplot_rcparams_default() @@ -229,7 +229,7 @@ def h2o_lines(): flux : `numpy.ndarray`_ Flux of the H2O atmospheric spectrum. """ - msgs.info("Reading in the water atmsopheric spectrum") + log.info("Reading in the water atmsopheric spectrum") h2o = np.loadtxt(dataPaths.skisim.get_file_path('HITRAN.dat'), usecols=(0, 1)) h2o_wv = 1./ h2o[:,0] * 1e4 # microns h2o_rad = h2o[:,1] * 5e11 # added to match XIDL @@ -248,7 +248,7 @@ def thar_lines(): flux : `numpy.ndarray`_ Flux of the ThAr lamp spectrum. """ - msgs.info("Reading in the ThAr spectrum") + log.info("Reading in the ThAr spectrum") thar = io.load_thar_spec() # create pixel array @@ -320,12 +320,12 @@ def nearIR_modelsky(resolution, waveminmax=(0.8,2.6), dlam=40.0, wv_min = waveminmax[0] wv_max = waveminmax[1] if flgd : - msgs.info("Creating wavelength vector in velocity space.") + log.info("Creating wavelength vector in velocity space.") velpix = dlam # km/s loglam = np.log10(1.0 + velpix/299792.458) wave = np.power(10.,np.arange(np.log10(wv_min), np.log10(wv_max), loglam)) else : - msgs.info("Creating wavelength vector in linear space.") + log.info("Creating wavelength vector in linear space.") wave = np.arange(wv_min, wv_max, dlam) # Calculate transparency @@ -335,14 +335,14 @@ def nearIR_modelsky(resolution, waveminmax=(0.8,2.6), dlam=40.0, logy = - 0.55 - 0.55 * (wave-1.0) y = np.power(10.,logy) - msgs.info("Add in a blackbody for the atmosphere.") + log.info("Add in a blackbody for the atmosphere.") bb, bb_counts = blackbody(wave, T_BB=T_BB, debug=debug) bb_counts = bb_counts - msgs.info("Add in OH lines") + log.info("Add in OH lines") oh_wv, oh_fx = oh_lines() # produces better wavelength solutions with 1.0 threshold - msgs.info("Selecting stronger OH lines") + log.info("Selecting stronger OH lines") filt_oh = oh_fx > 1. oh_wv, oh_fx = oh_wv[filt_oh], oh_fx[filt_oh] # scale_spec was added to match the XIDL code @@ -351,7 +351,7 @@ def nearIR_modelsky(resolution, waveminmax=(0.8,2.6), dlam=40.0, debug=debug) if wv_max > WAVE_WATER : - msgs.info("Add in H2O lines") + log.info("Add in H2O lines") h2o_wv, h2o_rad = h2o_lines() filt_h2o = (h2o_wv>wv_min-0.1) & (h2o_wvnp.max(th_wv)] = 0. if thar_outfile is not None: - msgs.info("Saving the ThAr model in: {}".format(thar_outfile)) + log.info("Saving the ThAr model in: {}".format(thar_outfile)) hdu = fits.PrimaryHDU(np.array(thar_spec)) header = hdu.header if flgd : @@ -519,7 +519,7 @@ def optical_modelThAr(resolution, waveminmax=(3000.,10500.), dlam=40.0, if debug: utils.pyplot_rcparams() - msgs.info("Plot of the Murphy et al. template at R={}".format(resolution)) + log.info("Plot of the Murphy et al. template at R={}".format(resolution)) plt.figure() plt.plot(th_wv, th_fx, color='navy', linestyle='-', alpha=0.3, @@ -534,7 +534,7 @@ def optical_modelThAr(resolution, waveminmax=(3000.,10500.), dlam=40.0, plt.xlabel(r'Wavelength [Ang.]') plt.ylabel(r'Emission') plt.title(r'Murphy et al. ThAr spectrum at R={}'.format(resolution)) - msgs.info("Close the Figure to continue.") + log.info("Close the Figure to continue.") plt.show(block=True) plt.close() utils.pyplot_rcparams_default() @@ -578,18 +578,18 @@ def conv2res(wavelength, flux, resolution, central_wl='midpt', wl_cent = float(central_wl) wl_sigma = wl_cent / resolution / 2.355 wl_bin = np.abs((wavelength - np.roll(wavelength,1))[np.where( np.abs(wavelength-wl_cent) == np.min(np.abs(wavelength-wl_cent)) )]) - msgs.info("The binning of the wavelength array at {} is: {}".format(wl_cent, wl_bin[0])) + log.info("The binning of the wavelength array at {} is: {}".format(wl_cent, wl_bin[0])) px_bin = wl_bin[0] px_sigma = wl_sigma / px_bin - msgs.info("Covolving with a Gaussian kernel with sigma = {} pixels".format(px_sigma)) + log.info("Covolving with a Gaussian kernel with sigma = {} pixels".format(px_sigma)) gauss_kernel = Gaussian1DKernel(px_sigma) flux_convolved = convolve(flux, gauss_kernel) if debug: utils.pyplot_rcparams() - msgs.info("Spectrum Convolved at R = {}".format(resolution)) + log.info("Spectrum Convolved at R = {}".format(resolution)) plt.figure() plt.plot(wavelength, flux, color='navy', linestyle='-', alpha=0.8, @@ -601,7 +601,7 @@ def conv2res(wavelength, flux, resolution, central_wl='midpt', plt.xlabel(r'Wavelength') plt.ylabel(r'Flux') plt.title(r'Spectrum Convolved at R = {}'.format(resolution)) - msgs.info("Close the Figure to continue.") + log.info("Close the Figure to continue.") plt.show(block=True) plt.close() utils.pyplot_rcparams_default() @@ -644,7 +644,7 @@ def iraf_datareader(database_dir, id_file): if feat_line is not None: N_lines = int(feat_line.group(1)) - msgs.info("The number of IDs in the IRAF database {} is {}".format(id_file, N_lines)) + log.info("The number of IDs in the IRAF database {} is {}".format(id_file, N_lines)) pixel = np.zeros(N_lines) line_id = np.zeros(N_lines) @@ -693,7 +693,7 @@ def create_linelist(wavelength, spec, fwhm, sigdetec=2., If True, convert the wavelengths of the created linelist from air to vacuum """ - msgs.info("Searching for peaks {} sigma above background".format(sigdetec)) + log.info("Searching for peaks {} sigma above background".format(sigdetec)) tampl_true, tampl, tcent, twid, centerr, ww, arcnorm, nsig = arc.detect_lines(spec, sigdetect=sigdetec, fwhm=fwhm, cont_samp=cont_samp, debug=debug) @@ -705,7 +705,7 @@ def create_linelist(wavelength, spec, fwhm, sigdetec=2., wave_peak = scipy.interpolate.interp1d(pixvec, wavelength, bounds_error=False, fill_value='extrapolate')(peaks_good) # Convert to vacuum? if convert_air_to_vac: - msgs.info("Converting wavelengths from air to vacuum") + log.info("Converting wavelengths from air to vacuum") wave_peak = airtovac(wave_peak * units.AA).value npeak = len(wave_peak) @@ -715,12 +715,12 @@ def create_linelist(wavelength, spec, fwhm, sigdetec=2., Source = npeak*['wavemodel.py'] if iraf_frmt: - msgs.info("Printing file in IRAF format: {}".format(file_root_name+'_iraf_lines.dat')) + log.info("Printing file in IRAF format: {}".format(file_root_name+'_iraf_lines.dat')) ion = np.array(ion) id_lines_iraf = np.vstack( (np.round(wave_peak,5), ion, np.round(ampl_good,5)) ).T np.savetxt(file_root_name+'_iraf_lines.dat', id_lines_iraf, fmt="%15s %6s %15s", delimiter=" ") else: - msgs.info("Printing file: {}".format(file_root_name+'_lines.dat')) + log.info("Printing file: {}".format(file_root_name+'_lines.dat')) dat = Table([wave_peak, ion, NIST, Instr, ampl_good, Source], names=('wave', 'ion','NIST','Instr','amplitude','Source')) dat.write(file_root_name+'_lines.dat',format='ascii.fixed_width') @@ -776,7 +776,7 @@ def create_OHlinelist(resolution, waveminmax=(0.8,2.6), dlam=40.0, flgd=True, ni flgd=flgd, nirsky_outfile=nirsky_outfile, debug=debug) if fwhm is None: - msgs.warning("No min FWHM for the line detection set. Derived from the resolution at the center of the spectrum") + log.warning("No min FWHM for the line detection set. Derived from the resolution at the center of the spectrum") wl_cent = np.average(wavelength) wl_fwhm = wl_cent / resolution wl_bin = np.abs((wavelength-np.roll(wavelength,1))[np.where(np.abs(wavelength-wl_cent)==np.min(np.abs(wavelength-wl_cent)))]) @@ -784,15 +784,15 @@ def create_OHlinelist(resolution, waveminmax=(0.8,2.6), dlam=40.0, flgd=True, ni # the minimum fwhm of the spectrum fwhm = 1.1 * wl_fwhm / wl_bin[0] if fwhm < 1.: - msgs.warning("Lines are unresolved. Setting FWHM=2.pixels") + log.warning("Lines are unresolved. Setting FWHM=2.pixels") fwhm = 2. if line_name is None: - msgs.warning("No line_name as been set. The file will contain XXX as ion") + log.warning("No line_name as been set. The file will contain XXX as ion") line_name = 'XXX' if file_root_name is None: - msgs.warning("No file_root_name as been set. The file will called OH_SKY_lines.dat") + log.warning("No file_root_name as been set. The file will called OH_SKY_lines.dat") file_root_name = 'OH_SKY' create_linelist(wavelength, spec, fwhm=fwhm, sigdetec=sigdetec, line_name=line_name, @@ -851,7 +851,7 @@ def create_ThArlinelist(resolution, waveminmax=(3000.,10500.), dlam=40.0, flgd=T wavelength, spec = optical_modelThAr(resolution, waveminmax=waveminmax, dlam=dlam, flgd=flgd, thar_outfile=thar_outfile, debug=debug) if fwhm is None: - msgs.warning("No min FWHM for the line detection set. Derived from the resolution at the center of the spectrum") + log.warning("No min FWHM for the line detection set. Derived from the resolution at the center of the spectrum") wl_cent = np.average(wavelength) wl_fwhm = wl_cent / resolution wl_bin = np.abs((wavelength-np.roll(wavelength,1))[np.where(np.abs(wavelength-wl_cent)==np.min(np.abs(wavelength-wl_cent)))]) @@ -859,15 +859,15 @@ def create_ThArlinelist(resolution, waveminmax=(3000.,10500.), dlam=40.0, flgd=T # the minimum fwhm of the spectrum fwhm = 1.1 * wl_fwhm / wl_bin[0] if fwhm < 1.: - msgs.warning("Lines are unresolved. Setting FWHM=2.*pixels") + log.warning("Lines are unresolved. Setting FWHM=2.*pixels") fwhm = 2. if line_name is None: - msgs.warning("No line_name as been set. The file will contain XXX as ion") + log.warning("No line_name as been set. The file will contain XXX as ion") line_name = 'XXX' if file_root_name is None: - msgs.warning("No file_root_name as been set. The file will called ThAr_lines.dat") + log.warning("No file_root_name as been set. The file will called ThAr_lines.dat") file_root_name = 'ThAr' create_linelist(wavelength, spec, fwhm=fwhm, sigdetec=sigdetec, line_name=line_name, diff --git a/pypeit/wavetilts.py b/pypeit/wavetilts.py index 6c2565d4b1..ad67ce2854 100644 --- a/pypeit/wavetilts.py +++ b/pypeit/wavetilts.py @@ -19,7 +19,7 @@ from astropy import stats, visualization from astropy import table -from pypeit import msgs, datamodel, utils +from pypeit import log, datamodel, utils from pypeit import PypeItError from pypeit import calibframe from pypeit import slittrace, wavecalib @@ -142,7 +142,7 @@ def fit2tiltimg(self, slitmask, flexure=None): `numpy.ndarray`_: New tilt image """ - msgs.info("Generating a tilts image from the fit parameters") + log.info("Generating a tilts image from the fit parameters") _flexure = 0. if flexure is None else flexure @@ -203,9 +203,9 @@ def show(self, waveimg=None, wcs_match=True, in_ginga=True, show_traces=False, _calib_dir = self.calib_dir if calib_dir is not None and calib_dir.exists(): _calib_dir = calib_dir - msgs.info(f'Searching for other calibration files in {str(_calib_dir)}') + log.info(f'Searching for other calibration files in {str(_calib_dir)}') else: - msgs.info(f'Searching for other calibration files in the default directory {str(_calib_dir)}') + log.info(f'Searching for other calibration files in the default directory {str(_calib_dir)}') cal_file = Path(_calib_dir).absolute() / self.tiltimg_filename if cal_file.exists(): @@ -227,7 +227,7 @@ def show(self, waveimg=None, wcs_match=True, in_ginga=True, show_traces=False, right = arc.resize_slits2arc(tilt_img_dict.image.shape, _slitmask.shape, _right) else: slits = None - msgs.warning(f'Slits file {str(cal_file)} NOT FOUND.') + log.warning(f'Slits file {str(cal_file)} NOT FOUND.') # get waveimg same_size = (slits.nspec, slits.nspat) == tilt_img_dict.image.shape @@ -238,7 +238,7 @@ def show(self, waveimg=None, wcs_match=True, in_ginga=True, show_traces=False, tilts = self.fit2tiltimg(slitmask, flexure=self.spat_flexure) waveimg = wv_calib.build_waveimg(tilts, slits, spat_flexure=self.spat_flexure) else: - msgs.warning('Could not load Wave image to show with tilts image.') + log.warning('Could not load Wave image to show with tilts image.') # Show # tilt image @@ -709,7 +709,7 @@ def run(self, doqa=True, debug=False, show=False): # Subtract arc continuum _mstilt = self.mstilt.image.copy() if self.par['rm_continuum']: - msgs.info('Subtracting the continuum') + log.info('Subtracting the continuum') continuum = self.model_arc_continuum(debug=debug) _mstilt -= continuum if debug: @@ -742,21 +742,21 @@ def run(self, doqa=True, debug=False, show=False): # Loop on all slits for slit_idx, slit_spat in enumerate(self.slits.spat_id): if self.tilt_bpm[slit_idx]: - msgs.info(f'Skipping bad slit/order {self.slits.slitord_id[slit_idx]} ({slit_idx+1}/{self.slits.nslits})') + log.info(f'Skipping bad slit/order {self.slits.slitord_id[slit_idx]} ({slit_idx+1}/{self.slits.nslits})') self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADTILTCALIB') continue - msgs.info(f'Computing tilts for slit/order {self.slits.slitord_id[slit_idx]} ({slit_idx+1}/{self.slits.nslits})') + log.info(f'Computing tilts for slit/order {self.slits.slitord_id[slit_idx]} ({slit_idx+1}/{self.slits.nslits})') # Get the arc FWHM for this slit fwhm = autoid.set_fwhm(self.wavepar, measured_fwhm=self.measured_fwhms[slit_idx], verbose=True) # Identify lines for tracing tilts - msgs.info('Finding lines for tilt analysis') + log.info('Finding lines for tilt analysis') self.lines_spec, self.lines_spat \ = self.find_lines(self.arccen[:,slit_idx], self.slitcen[:,slit_idx], slit_idx, fwhm, bpm=self.arccen_bpm[:,slit_idx], debug=debug) if self.lines_spec is None: - msgs.warning('Did not recover any lines for slit/order = {:d}'.format(self.slits.slitord_id[slit_idx]) + + log.warning('Did not recover any lines for slit/order = {:d}'.format(self.slits.slitord_id[slit_idx]) + '. This slit/order will not reduced!') self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADTILTCALIB') continue @@ -766,13 +766,13 @@ def run(self, doqa=True, debug=False, show=False): # Performs the initial tracing of the line centroids as a # function of spatial position resulting in 1D traces for # each line. - msgs.info('Trace the tilts') + log.info('Trace the tilts') self.trace_dict = self.trace_tilts(_mstilt, self.lines_spec, self.lines_spat, thismask, self.slitcen[:, slit_idx], fwhm) # IF there are < 2 usable arc lines for tilt tracing, PCA fit does not work and the reduction crushes # TODO investigate why some slits have <2 usable arc lines if np.sum(self.trace_dict['use_tilt']) < 2: - msgs.warning('Less than 2 usable arc lines for slit/order = {:d}'.format(self.slits.slitord_id[slit_idx]) + + log.warning('Less than 2 usable arc lines for slit/order = {:d}'.format(self.slits.slitord_id[slit_idx]) + '. This slit/order will not reduced!') self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADTILTCALIB') continue @@ -783,7 +783,7 @@ def run(self, doqa=True, debug=False, show=False): use_tilt_spec_cov = (self.trace_dict['tilts_spec'][:, self.trace_dict['use_tilt']].max() - self.trace_dict['tilts_spec'][:, self.trace_dict['use_tilt']].min()) / self.arccen.shape[0] if use_tilt_spec_cov < 0.1: - msgs.warning(f'The spectral coverage of the usable arc lines is {use_tilt_spec_cov:.3f} (less than 10%).' + + log.warning(f'The spectral coverage of the usable arc lines is {use_tilt_spec_cov:.3f} (less than 10%).' + ' This slit/order will not be reduced!') self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADTILTCALIB') continue @@ -802,7 +802,7 @@ def run(self, doqa=True, debug=False, show=False): # TODO: Is 95% the right threshold? _gpm = self.all_fit_dict[slit_idx]['pypeitFit'].bool_gpm if np.sum(np.logical_not(_gpm)) > 0.95 * _gpm.size: - msgs.warning(f'Large number of pixels rejected in the fit. This slit/order will not be reduced!') + log.warning(f'Large number of pixels rejected in the fit. This slit/order will not be reduced!') self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADTILTCALIB') continue self.coeffs[:self.spec_order[slit_idx]+1,:self.spat_order[slit_idx]+1,slit_idx] = coeff_out @@ -818,7 +818,7 @@ def run(self, doqa=True, debug=False, show=False): # Check that the tilts image has values that span a reasonable range # TODO: Is this the right threshold? if np.nanmax(self.tilts) - np.nanmin(self.tilts) < 0.8: - msgs.warning('Tilts image fit not good. This slit/order will not be reduced!') + log.warning('Tilts image fit not good. This slit/order will not be reduced!') self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADTILTCALIB') continue # Save to final image @@ -949,7 +949,7 @@ def make_tbl_tilt_traces(self): tbl_tilt_traces[tbl_keys[i]] = np.expand_dims(arr, axis=0) if len(tbl_tilt_traces) == 0: - msgs.warning('No traced and fitted tilts have been found.') + log.warning('No traced and fitted tilts have been found.') return None return tbl_tilt_traces From b116ecb6b65a77f86747d3569729a8392d304029 Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Thu, 9 Oct 2025 15:25:01 -0700 Subject: [PATCH 11/33] interrupt --- pypeit/logger.py | 29 +++++++++++------ pypeit/tests/test_log.py | 70 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+), 9 deletions(-) create mode 100644 pypeit/tests/test_log.py diff --git a/pypeit/logger.py b/pypeit/logger.py index 93f81b3be7..66cf41e3d8 100644 --- a/pypeit/logger.py +++ b/pypeit/logger.py @@ -16,13 +16,16 @@ from IPython import embed import warnings -def short_warning(message, category, filename, lineno, file=None, line=None): - """ - Return the format for a short warning message. - """ - return f'{category.__name__}: {message}' -warnings.formatwarning = short_warning +#def short_warning(message, category, filename, lineno, file=None, line=None): +# """ +# Return the format for a short warning message. +# """ +# embed() +# exit() +# return f'{category.__name__}: {message}' +# +#warnings.formatwarning = short_warning # NOTE: This is essentially a hack to deal with all the RankWarnings that numpy # can throw during polynomial fitting. Specifically this happens frequently @@ -70,11 +73,11 @@ def format(self, record): # Add the level in colored text msg = color_text(f'[{levelname.upper()}]', level_colors[levelname], bold=True, nchar=10) - msg += ' : ' + msg += ' - ' if self.base_level == logging.DEBUG: # If including debug messages, include file inspection in *all* log # messages. - msg += color_text(f'{rec.filename}:{rec.funcName}:{rec.lineno}', inspect_color) + ' : ' + msg += color_text(f'{rec.filename}:{rec.funcName}:{rec.lineno}', inspect_color) + ' - ' # Add the message header rec.msg = msg + rec.msg @@ -149,6 +152,7 @@ def init(self, level: int = logging.INFO, capture_exceptions: bool = True, capture_warnings: bool = True, + stream = None, log_file: Optional[str | Path] = None, log_file_level: Optional[int] = None, ): @@ -174,6 +178,9 @@ def init(self, """ self.warnings_logger = logging.getLogger("py.warnings") + embed() + exit() + self.setLevel(logging.DEBUG) # Clear handlers before recreating. @@ -194,7 +201,7 @@ def init(self, sys.excepthook = self._excepthook # Set the stream handler - self.sh = logging.StreamHandler() + self.sh = logging.StreamHandler(stream=stream) formatter = DebugStreamFormatter() if level <= logging.DEBUG else StreamFormatter() self.sh.setFormatter(formatter) self.sh.setLevel(level) @@ -260,6 +267,10 @@ def makeRecord( """ Override the default makeRecord function to rework the message for exceptions. """ + + embed() + exit() + # If this is an error message, the execution information is provided, # and the error originates from the exception hook, reset the frame # information (file, function, and line number) to the calling function. diff --git a/pypeit/tests/test_log.py b/pypeit/tests/test_log.py new file mode 100644 index 0000000000..1c05fa51e5 --- /dev/null +++ b/pypeit/tests/test_log.py @@ -0,0 +1,70 @@ + +import io +import logging +import re +import sys +import warnings + +from IPython import embed +import pytest + +from pypeit import log +from pypeit import PypeItError + +def test_basic(): + + embed() + exit() + + warnings.warn('test') + + ansi_escape = re.compile(r'\x1b[^m]*m') + + # Test the different levels + logst = io.StringIO() + log.init(level=logging.DEBUG, stream=logst) + + log.debug('test') + log.info('test') + log.warning('test') + log.error('test') + log.critical('test') + + msg = logst.getvalue() + msg = ansi_escape.sub("", msg).split('\n') + + embed() + exit() + log.info('test') + log.warning('test') + + _stdout = sys.stdout + _stderr = sys.stderr + cap_out = io.StringIO() + cap_err = io.StringIO() + sys.stdout = cap_out + sys.stderr = cap_err + + log.init(level=logging.DEBUG) + log.debug('test') + sys.stdout = _stdout + sys.stderr = _stdout + + embed() + exit() + + log.info('test') + log.warning('test') + log.error('test') + log.critical('test') + warnings.warn('test') + + with pytest.raises(ValueError): + raise ValueError('test') + with pytest.raises(PypeItError): + raise PypeItError('test') + + sys.stdout = _stdout + + +test_basic() From a59f4236e57d44cfdd1504c1e2b9c9f0c19f1a23 Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Fri, 10 Oct 2025 08:57:23 -0700 Subject: [PATCH 12/33] tweaks and testing --- pypeit/logger.py | 166 ++++++++++++++++++++++---------------- pypeit/tests/test_log.py | 168 ++++++++++++++++++++++++++++++--------- 2 files changed, 229 insertions(+), 105 deletions(-) diff --git a/pypeit/logger.py b/pypeit/logger.py index 66cf41e3d8..245fd0625a 100644 --- a/pypeit/logger.py +++ b/pypeit/logger.py @@ -7,25 +7,25 @@ import traceback import copy import inspect +import io import logging from pathlib import Path import re import sys -from typing import Optional +from typing import Optional, List from IPython import embed import warnings -#def short_warning(message, category, filename, lineno, file=None, line=None): -# """ -# Return the format for a short warning message. -# """ -# embed() -# exit() -# return f'{category.__name__}: {message}' -# -#warnings.formatwarning = short_warning +# TODO: Can we put this *inside* the logger? +def short_warning(message, category, filename, lineno, file=None, line=None): + """ + Formatter for warning messages. Shortens default output to just the warning + type and warning message. + """ + return f'{category.__name__}: {message}' +warnings.formatwarning = short_warning # NOTE: This is essentially a hack to deal with all the RankWarnings that numpy # can throw during polynomial fitting. Specifically this happens frequently @@ -38,24 +38,66 @@ warnings.simplefilter('default', np.exceptions.RankWarning) -#WARNING_RE = re.compile(r"^.*?\s*?(\w*?Warning): (.*)") - +def color_text( + text:str, + color:List[int], + bold:Optional[bool] = False, + nchar:Optional[int] = None +) -> str: + """ + Return an input string with escape characters to colorize text written to + consoles. -def color_text(text, color, bold=False, nchar=None): + Parameters + ---------- + text + Text to colorize + color + 3-element list of integers with the RGB color values + bold + Flag to make the text bold + nchar + Force the output text to be right-justified with this number of + characters + + Returns + ------- + Reformatted string + """ msg = '\033[1;' if bold else '\033[' _text = f'{text}' if nchar is None else f'{text:>{nchar}}' return f'{msg}38;2;{color[0]};{color[1]};{color[2]}m{_text}\033[0m' +def clear_text_color(text:str): + """ + Remove escape characters that colorize the text in a string. + + Parameters + ---------- + text + String to alter + + Returns + ------- + String with all color escape characters removed + """ + return re.compile(r'\x1b[^m]*m').sub("", text) + + class StreamFormatter(logging.Formatter): """ Custom `Formatter ` for the stream handler. """ - base_level = None + """ + The base logging level for the class. Used to determine whether or not to + include the calling frame in the log message. + """ def format(self, record): + # RGB colors for the logging levels level_colors = { 'debug': [116, 173, 209], 'info': [49, 54, 149], @@ -63,7 +105,7 @@ def format(self, record): 'error': [215, 48, 39], 'critical': [165, 0, 38], } - inspect_color = level_colors['debug'] + frame_color = level_colors['debug'] rec = copy.copy(record) levelname = rec.levelname.lower() @@ -75,22 +117,12 @@ def format(self, record): msg = color_text(f'[{levelname.upper()}]', level_colors[levelname], bold=True, nchar=10) msg += ' - ' if self.base_level == logging.DEBUG: - # If including debug messages, include file inspection in *all* log - # messages. - msg += color_text(f'{rec.filename}:{rec.funcName}:{rec.lineno}', inspect_color) + ' - ' + # If including debug messages, include file frame inspection in + # *all* log messages. + msg += color_text(f'{rec.filename}:{rec.funcName}:{rec.lineno}', frame_color) + ' - ' # Add the message header rec.msg = msg + rec.msg -# NOTE: This is in the sdsstools looger, but I'm not sure what it does. I have -# commented it out for the moment, but we may want to bring it back. -# if levelname == "warning" and rec.args and len(rec.args) > 0: -# warning_category_groups = WARNING_RE.match(rec.args[0]) -# if warning_category_groups is not None: -# wcategory, wtext = warning_category_groups.groups() -# wcategory_colour = color_text(wcategory, level_colors['warning']) -# message = f'{color_text(wtext, [256, 256, 256])}' + wcategory_colour -# rec.args = tuple([message] + list(args[1:])) - # Return the base formatting return logging.Formatter.format(self, rec) @@ -108,37 +140,10 @@ class FileFormatter(logging.Formatter): """ base_fmt = "%(levelname)8s | %(asctime)s | %(filename)s:%(funcName)s:%(lineno)s | %(message)s" -# ansi_escape = re.compile(r'\x1b[^m]*m') def __init__(self, fmt=base_fmt): logging.Formatter.__init__(self, fmt, datefmt='%Y-%m-%d %H:%M:%S') -# def format(self, record): -# # Copy the record so that any modifications we make do not -# # affect how the record is displayed in other handlers. -# record_cp = copy.copy(record) -# -# record_cp.msg = self.ansi_escape.sub("", record_cp.msg) -# -# # TODO: Pulled this from sdsstools, but I'm not sure if it's still -# # relevant -# args = list(record_cp.args) -# -# # The format of a warnings redirected with warnings.captureWarnings -# # has the format : : message\n . -# # We reorganise that into a cleaner message. For some reason in this -# # case the message is in record.args instead of in record.msg. -# if ( -# record_cp.levelno == logging.WARNING -# and record_cp.args -# and len(record_cp.args) > 0 -# ): -# match = re.match(r"^(.*?):\s*?(\w*?Warning): (.*)", args[0]) -# if match: -# message = "{1} - {2} [{0}]".format(*match.groups()) -# record_cp.args = tuple([message] + list(args[1:])) -# -# return logging.Formatter.format(self, record_cp) class PypeItLogger(logging.Logger): """ @@ -152,7 +157,7 @@ def init(self, level: int = logging.INFO, capture_exceptions: bool = True, capture_warnings: bool = True, - stream = None, + stream: Optional[io.TextIOBase] = None, log_file: Optional[str | Path] = None, log_file_level: Optional[int] = None, ): @@ -168,19 +173,21 @@ def init(self, logging system. capture_warnings Capture warnings and redirect them to the log. + stream + Stream for logging messages, which defaults to sys.stderr. log_file Name for a log file. If None, logging is only recorded to the console. If the file provided already exists, it will be - ovewritten! + overwritten! log_file_level The logging level specific to the log file. If None, adopt the console logging level. """ + # NOTE: Because of how get_logger works, this makes warnings_logger an + # instance of PypeItLogger. self.warnings_logger = logging.getLogger("py.warnings") - embed() - exit() - + # Set the base level of the logger to DEBUG self.setLevel(logging.DEBUG) # Clear handlers before recreating. @@ -200,7 +207,8 @@ def init(self, self._excepthook_orig = sys.excepthook sys.excepthook = self._excepthook - # Set the stream handler + # Set the stream handler, its formatting, its level, and then add it to + # the set of handlers self.sh = logging.StreamHandler(stream=stream) formatter = DebugStreamFormatter() if level <= logging.DEBUG else StreamFormatter() self.sh.setFormatter(formatter) @@ -268,13 +276,29 @@ def makeRecord( Override the default makeRecord function to rework the message for exceptions. """ - embed() - exit() - - # If this is an error message, the execution information is provided, - # and the error originates from the exception hook, reset the frame - # information (file, function, and line number) to the calling function. - if (level == logging.ERROR + # If the warning was issued by "warnings", try to recover the calling + # frame details + if name == 'py.warnings': + frame = inspect.currentframe() + save_frame = None + while frame is not None: + # Work backwards through the frame to find the first occurrence + # of the call to the warnings.warn function. + if ( + Path(frame.f_code.co_filename).name == "warnings.py" + and frame.f_code.co_name == '_showwarnmsg' + ): + save_frame = frame.f_back + frame = frame.f_back + if save_frame is not None: + pathname = save_frame.f_code.co_filename + lineno = save_frame.f_lineno + func = save_frame.f_code.co_name + + # Do the same if (1) this is an error message, (2) the execution + # information is provided, and (3) the error originates from the + # exception hook. + elif (level == logging.ERROR and exc_info is not None and Path(pathname).name == 'logger.py' and func is not None @@ -286,6 +310,8 @@ def makeRecord( func = calling_frame.name # This keeps the traceback from being printed twice! exc_info = None + + # Call the base-class method return logging.Logger.makeRecord( self, name, level, pathname, lineno, msg, args, exc_info, func=func, extra=extra, sinfo=sinfo @@ -295,6 +321,7 @@ def get_logger( level: int = logging.INFO, capture_exceptions: bool = True, capture_warnings: bool = True, + stream: Optional[io.TextIOBase] = None, log_file: Optional[str | Path] = None, log_file_level: Optional[int] = None, ): @@ -310,6 +337,8 @@ def get_logger( logging system. capture_warnings Capture warnings and redirect them to the log. + stream + Stream for logging messages, which defaults to sys.stderr. log_file Name for a log file. If None, logging is only recorded to the console. If the file provided already exists, it will be @@ -328,6 +357,7 @@ def get_logger( level=level, capture_exceptions=capture_exceptions, capture_warnings=capture_warnings, + stream=stream, log_file=log_file, log_file_level=log_file_level ) diff --git a/pypeit/tests/test_log.py b/pypeit/tests/test_log.py index 1c05fa51e5..048e70080d 100644 --- a/pypeit/tests/test_log.py +++ b/pypeit/tests/test_log.py @@ -1,24 +1,22 @@ import io import logging -import re -import sys -import warnings +from pathlib import Path from IPython import embed -import pytest from pypeit import log -from pypeit import PypeItError +from pypeit.logger import clear_text_color -def test_basic(): +# TODO: It's difficult to test the PypeItLogger capturing of the warnings and +# exceptions because pytest overrides them as well. To test these, we would +# need to create a subprocess and actually run a script. See +# +# https://stackoverflow.com/questions/46310034/how-to-test-that-a-custom-excepthook-is-installed-correctly +# +# Punting for now. - embed() - exit() - - warnings.warn('test') - - ansi_escape = re.compile(r'\x1b[^m]*m') +def test_debug(): # Test the different levels logst = io.StringIO() @@ -29,42 +27,138 @@ def test_basic(): log.warning('test') log.error('test') log.critical('test') - - msg = logst.getvalue() - msg = ansi_escape.sub("", msg).split('\n') - embed() - exit() + msg = clear_text_color(logst.getvalue()).split('\n')[:-1] + assert len(msg) == 5, f'Incorrect number of stream logs {len(msg)}' + assert all(['test_log.py:test_debug' in m for m in msg]), \ + 'Calling function should be in all messages when using DEBUG level' + + +def test_info(): + + # Test the different levels + logst = io.StringIO() + log.init(level=logging.INFO, stream=logst) + + log.debug('test') log.info('test') log.warning('test') + log.error('test') + log.critical('test') + + msg = clear_text_color(logst.getvalue()).split('\n')[:-1] + assert len(msg) == 4, 'Incorrect number of stream logs (should not print DEBUG message)' + assert not any(['test_log.py:test_info' in m for m in msg]), \ + 'Calling function should not be in messages when using INFO level' - _stdout = sys.stdout - _stderr = sys.stderr - cap_out = io.StringIO() - cap_err = io.StringIO() - sys.stdout = cap_out - sys.stderr = cap_err - log.init(level=logging.DEBUG) +def test_warning(): + + # Test the different levels + logst = io.StringIO() + log.init(level=logging.WARNING, stream=logst) + log.debug('test') - sys.stdout = _stdout - sys.stderr = _stdout + log.info('test') + log.warning('test') + log.error('test') + log.critical('test') + + msg = clear_text_color(logst.getvalue()).split('\n')[:-1] + assert len(msg) == 3, 'Incorrect number of stream logs (should not print DEBUG/INFO messages)' + assert not any(['test_log.py:test_warning' in m for m in msg]), \ + 'Calling function should not be in messages when using WARNING level' + + +def test_log_file(): - embed() - exit() + lf = Path().absolute() / 'test_log.txt' + # Test the different levels + logst = io.StringIO() + log.init(level=logging.DEBUG, stream=logst, log_file=lf) + + log.debug('test') log.info('test') log.warning('test') log.error('test') log.critical('test') - warnings.warn('test') - with pytest.raises(ValueError): - raise ValueError('test') - with pytest.raises(PypeItError): - raise PypeItError('test') - - sys.stdout = _stdout + assert lf.is_file(), 'Log file not produced' + with open(lf, 'r') as f: + log_lines = f.readlines() + + msg = clear_text_color(logst.getvalue()).split('\n')[:-1] + + assert len(msg) == len(log_lines), 'Number of lines in file should match stream' + assert all(['test_log.py:test_log_file' in l for l in log_lines]), \ + 'Calling function should be included in all file logs' + + lf.unlink() + + +def test_log_file_info(): + + lf = Path().absolute() / 'test_log.txt' + + # Start the log + logst = io.StringIO() + log.init(level=logging.INFO, stream=logst, log_file=lf) + log.debug('test') + log.info('test') + log.warning('test') + assert lf.is_file(), 'Log file not produced' + with open(lf, 'r') as f: + log_lines = f.readlines() + + msg = clear_text_color(logst.getvalue()).split('\n')[:-1] + + assert len(msg) == len(log_lines), 'Number of lines in file should match stream' + assert all(['test_log.py:test_log_file_info' in l for l in log_lines]), \ + 'Calling function should be included in all file logs' + assert not any(['test_log.py:test_log_file_levels' in m for m in msg]), \ + 'Calling function should not be in stream messages when using INFO level' + + lf.unlink() + + +def test_log_file_level_diff(): + + lf = Path().absolute() / 'test_log.txt' + + # Start the log + logst = io.StringIO() + log.init(level=logging.INFO, stream=logst, log_file=lf, log_file_level=logging.DEBUG) + log.debug('test') + log.info('test') + log.warning('test') + assert lf.is_file(), 'Log file not produced' + with open(lf, 'r') as f: + log_lines = f.readlines() + + msg = clear_text_color(logst.getvalue()).split('\n')[:-1] + + assert len(msg) == 2 and len(log_lines) == 3, \ + 'Log file should include all entries, but stream should skip DEBUG message' + + lf.unlink() + + +def test_log_overwrite(): + + lf = Path().absolute() / 'test_log.txt' + + # Start the log + log.init(level=logging.DEBUG, log_file=lf) + log.debug('test') + assert lf.is_file(), 'Log file not produced' + + # Reinit, which should restart the file + log.init(level=logging.DEBUG, log_file=lf) + log.debug('test') + assert lf.is_file(), 'Log file not produced' + with open(lf, 'r') as f: + log_lines = f.readlines() + assert len(log_lines) == 1, 'reinitializing the log should overwrite the log file' - -test_basic() + lf.unlink() From 8d7d7fdff011fdf80efb03062f2d348b88459281 Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Fri, 10 Oct 2025 09:16:36 -0700 Subject: [PATCH 13/33] testing --- pypeit/logger.py | 4 ++++ pypeit/tests/test_runpypeit.py | 3 --- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pypeit/logger.py b/pypeit/logger.py index 245fd0625a..c25c0f1644 100644 --- a/pypeit/logger.py +++ b/pypeit/logger.py @@ -361,6 +361,10 @@ def get_logger( log_file=log_file, log_file_level=log_file_level ) + # TODO: We might want to prohibit propagation of this logger to the root + # one, but I'm not really sure if that's necessary or how it works. + # Leaving this commented out for now. + # log.propagate = False finally: logging.setLoggerClass(orig_logger) diff --git a/pypeit/tests/test_runpypeit.py b/pypeit/tests/test_runpypeit.py index 86ed9acdda..5bf6ea5e1d 100644 --- a/pypeit/tests/test_runpypeit.py +++ b/pypeit/tests/test_runpypeit.py @@ -116,6 +116,3 @@ def test_run_pypeit(): # Clean-up shutil.rmtree(outdir) - -test_run_pypeit() - From 8753ac943712221632aba0b3b73bfc99e4573a44 Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Fri, 10 Oct 2025 11:30:12 -0700 Subject: [PATCH 14/33] script updates --- pypeit/logger.py | 30 ++---- pypeit/pypeit.py | 20 +--- pypeit/scripts/arxiv_solution.py | 29 +++--- pypeit/scripts/cache_github_data.py | 7 +- pypeit/scripts/chk_alignments.py | 7 +- pypeit/scripts/chk_edges.py | 7 +- pypeit/scripts/chk_flats.py | 7 +- pypeit/scripts/chk_flexure.py | 7 +- pypeit/scripts/chk_for_calibs.py | 7 +- pypeit/scripts/chk_noise_1dspec.py | 6 +- pypeit/scripts/chk_noise_2dspec.py | 6 +- pypeit/scripts/chk_plugins.py | 7 +- pypeit/scripts/chk_scattlight.py | 7 +- pypeit/scripts/chk_tilts.py | 7 +- pypeit/scripts/chk_wavecalib.py | 7 +- pypeit/scripts/clean_cache.py | 7 +- pypeit/scripts/coadd_1dspec.py | 21 ++-- pypeit/scripts/coadd_2dspec.py | 23 ++--- pypeit/scripts/coadd_datacube.py | 17 ++- pypeit/scripts/collate_1d.py | 15 ++- pypeit/scripts/compare_sky.py | 6 +- pypeit/scripts/compile_wvarxiv.py | 8 +- pypeit/scripts/edge_inspector.py | 7 +- pypeit/scripts/extract_datacube.py | 18 ++-- pypeit/scripts/flux_calib.py | 20 ++-- pypeit/scripts/flux_setup.py | 7 +- pypeit/scripts/identify.py | 20 ++-- pypeit/scripts/install_extinctfile.py | 7 +- pypeit/scripts/install_linelist.py | 7 +- pypeit/scripts/install_ql_calibs.py | 7 +- pypeit/scripts/install_telluric.py | 7 +- pypeit/scripts/install_wvarxiv.py | 7 +- pypeit/scripts/lowrdx_skyspec.py | 7 +- pypeit/scripts/multislit_flexure.py | 7 +- pypeit/scripts/obslog.py | 7 +- pypeit/scripts/parse_slits.py | 7 +- pypeit/scripts/print_bpm.py | 7 +- pypeit/scripts/qa_html.py | 7 +- pypeit/scripts/ql.py | 7 +- pypeit/scripts/run_pypeit.py | 43 ++++---- pypeit/scripts/run_to_calibstep.py | 28 ++--- pypeit/scripts/scriptbase.py | 143 ++++++++++++++++++++------ pypeit/scripts/sensfunc.py | 13 +-- pypeit/scripts/setup.py | 14 ++- pypeit/scripts/setup_coadd2d.py | 7 +- pypeit/scripts/show_1dspec.py | 7 +- pypeit/scripts/show_2dspec.py | 17 ++- pypeit/scripts/show_arxiv.py | 7 +- pypeit/scripts/show_pixflat.py | 7 +- pypeit/scripts/show_wvcalib.py | 7 +- pypeit/scripts/skysub_regions.py | 10 +- pypeit/scripts/tellfit.py | 14 ++- pypeit/scripts/trace_edges.py | 18 ++-- pypeit/scripts/version.py | 4 +- pypeit/scripts/view_fits.py | 8 +- 55 files changed, 439 insertions(+), 322 deletions(-) diff --git a/pypeit/logger.py b/pypeit/logger.py index c25c0f1644..786d44fe94 100644 --- a/pypeit/logger.py +++ b/pypeit/logger.py @@ -4,7 +4,6 @@ Implementation heavily references loggers from astropy and sdsstools. """ -import traceback import copy import inspect import io @@ -12,12 +11,12 @@ from pathlib import Path import re import sys +import traceback from typing import Optional, List +import warnings from IPython import embed -import warnings - # TODO: Can we put this *inside* the logger? def short_warning(message, category, filename, lineno, file=None, line=None): """ @@ -155,8 +154,6 @@ class PypeItLogger(logging.Logger): def init(self, level: int = logging.INFO, - capture_exceptions: bool = True, - capture_warnings: bool = True, stream: Optional[io.TextIOBase] = None, log_file: Optional[str | Path] = None, log_file_level: Optional[int] = None, @@ -168,11 +165,6 @@ def init(self, ---------- level The logging level printed to the console - capture_exceptions - Override the exception hook and redirect all exceptions to the - logging system. - capture_warnings - Capture warnings and redirect them to the log. stream Stream for logging messages, which defaults to sys.stderr. log_file @@ -183,6 +175,13 @@ def init(self, The logging level specific to the log file. If None, adopt the console logging level. """ + # NOTE: I originally included these as options in the class. I've + # removed them for now (i.e., we'll always catch warnings and + # exceptions), but I've left the if statements in place below in case we + # want to make these things options in the future. + capture_exceptions = True + capture_warnings = True + # NOTE: Because of how get_logger works, this makes warnings_logger an # instance of PypeItLogger. self.warnings_logger = logging.getLogger("py.warnings") @@ -317,10 +316,10 @@ def makeRecord( sinfo=sinfo ) +# NOTE: If we allow warning and exception capture to be optional, remember to +# add them as parameters here as well. def get_logger( level: int = logging.INFO, - capture_exceptions: bool = True, - capture_warnings: bool = True, stream: Optional[io.TextIOBase] = None, log_file: Optional[str | Path] = None, log_file_level: Optional[int] = None, @@ -332,11 +331,6 @@ def get_logger( ---------- level The logging level printed to the console - capture_exceptions - Override the exception hook and redirect all exceptions to the - logging system. - capture_warnings - Capture warnings and redirect them to the log. stream Stream for logging messages, which defaults to sys.stderr. log_file @@ -355,8 +349,6 @@ def get_logger( log = logging.getLogger("pypeit") log.init( level=level, - capture_exceptions=capture_exceptions, - capture_warnings=capture_warnings, stream=stream, log_file=log_file, log_file_level=log_file_level diff --git a/pypeit/pypeit.py b/pypeit/pypeit.py index f8ebf57dd3..059273787c 100644 --- a/pypeit/pypeit.py +++ b/pypeit/pypeit.py @@ -53,20 +53,10 @@ class PypeIt: Args: pypeit_file (:obj:`str`): PypeIt filename. - verbosity (:obj:`int`, optional): - Verbosity level of system output. Can be: - - - 0: No output - - 1: Minimal output (default) - - 2: All output - overwrite (:obj:`bool`, optional): Flag to overwrite any existing files/directories. reuse_calibs (:obj:`bool`, optional): Reuse any pre-existing calibration files - logname (:obj:`str`, optional): - The name of an ascii log file with the details of the - reduction. show: (:obj:`bool`, optional): Show reduction steps via plots (which will block further execution until clicked on) and outputs to ginga. Requires @@ -84,16 +74,14 @@ class PypeIt: fitstbl (:obj:`pypeit.metadata.PypeItMetaData`): holds the meta info """ - def __init__(self, pypeit_file, verbosity=2, overwrite=True, reuse_calibs=False, logname=None, - show=False, redux_path=None, calib_only=False): + def __init__( + self, pypeit_file, overwrite=True, reuse_calibs=False, show=False, redux_path=None, + calib_only=False + ): # Set up logging - self.logname = logname - self.verbosity = verbosity self.pypeit_file = pypeit_file - log.init(level=log.level, log_file=self.logname) - # Load up PypeIt file self.pypeItFile = inputfiles.PypeItFile.from_file(pypeit_file) self.calib_only = calib_only diff --git a/pypeit/scripts/arxiv_solution.py b/pypeit/scripts/arxiv_solution.py index 81f43729b2..4396e25e2c 100644 --- a/pypeit/scripts/arxiv_solution.py +++ b/pypeit/scripts/arxiv_solution.py @@ -1,5 +1,5 @@ """ -This script enables the user to convert a MasterWaveCalib wavelength solution fits file +This script enables the user to convert a WaveCalib wavelength solution fits file into a PypeIt arxiv solution that can be used with the full_template method. .. include common links, assuming primary doc root is up one directory @@ -18,34 +18,35 @@ class ArxivSolution(scriptbase.ScriptBase): @classmethod def get_parser(cls, width=None): - parser = super().get_parser(description='Read in a MasterWaveCalib solution and convert it into the ' - 'format required for the PypeIt full template archive', width=width) - parser.add_argument('file', type = str, default=None, help='MasterWaveCalib file') + parser = super().get_parser( + description='Read in a WaveCalib solution and convert it into the format ' + 'required for the PypeIt full template archive', + width=width, default_log_file=True + ) + parser.add_argument('file', type = str, default=None, help='WaveCalib file') parser.add_argument('binning', type=int, help="Spectral binning") parser.add_argument('-s', '--slit', default=0, type=int, help='Slit number to use') - parser.add_argument('-v', '--verbosity', type=int, default=1, - help='Verbosity level between 0 [none] and 2 [all]. Default: 1. ' - 'Level 2 writes a log with filename make_arxiv_solution_YYYYMMDD-HHMM.log') parser.add_argument('--try_old', default=False, action='store_true', help='Attempt to load old datamodel versions. A crash may ensue..') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): import os from pypeit.wavecalib import WaveCalib from pypeit.core.wavecal import wvutils - chk_version = not args.try_old + # Initialize the log + cls.init_log(args) - # Set the verbosity, and create a logfile if verbosity == 2 -# log.set_logfile_and_verbosity('arxiv_solution', args.verbosity) + # Set whether or not to check datamodel versions + chk_version = not args.try_old # Check that a file has been provided if args.file is None: - raise PypeItError('You must input a MasterWaveCalib file') + raise PypeItError('You must input a WaveCalib file') elif not os.path.exists(args.file): - raise PypeItError(f"The following MasterWaveCalib file does not exist:\n{args.file}") + raise PypeItError(f"The following WaveCalib file does not exist:\n{args.file}") # Load the wavelength calibration file wv_calib = WaveCalib.from_file(args.file, chk_version=chk_version) diff --git a/pypeit/scripts/cache_github_data.py b/pypeit/scripts/cache_github_data.py index 82c4ed98ee..880ab884eb 100644 --- a/pypeit/scripts/cache_github_data.py +++ b/pypeit/scripts/cache_github_data.py @@ -41,8 +41,8 @@ def get_parser(cls, width=None): help='Force re-download of existing files') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): import os import pathlib from IPython import embed @@ -55,6 +55,9 @@ def main(args): from pypeit import cache from pypeit.pypeitdata import PypeItDataPath + # Initialize the log + cls.init_log(args) + # First check the input spectrograph list if any([inst not in available_spectrographs for inst in args.spectrograph]): raise ValueError('Provided invalid spectrograph name. Options are: ' diff --git a/pypeit/scripts/chk_alignments.py b/pypeit/scripts/chk_alignments.py index 5882328a32..d9587ecda9 100644 --- a/pypeit/scripts/chk_alignments.py +++ b/pypeit/scripts/chk_alignments.py @@ -25,10 +25,13 @@ def get_parser(cls, width=None): help='Attempt to load old datamodel versions. A crash may ensue..') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): from pypeit import alignframe + # Initialize the log + cls.init_log(args) + # Load chk_version = not args.try_old alignments = alignframe.Alignments.from_file(args.file, chk_version=chk_version) diff --git a/pypeit/scripts/chk_edges.py b/pypeit/scripts/chk_edges.py index 6d1f791002..b815785e8f 100644 --- a/pypeit/scripts/chk_edges.py +++ b/pypeit/scripts/chk_edges.py @@ -28,13 +28,16 @@ def get_parser(cls, width=None): help='Attempt to load old datamodel versions. A crash may ensue..') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): from pathlib import Path from pypeit import edgetrace, slittrace, log + # Initialize the log + cls.init_log(args) + chk_version = not args.try_old # Load diff --git a/pypeit/scripts/chk_flats.py b/pypeit/scripts/chk_flats.py index 19f557eb9f..9d19caf9c6 100644 --- a/pypeit/scripts/chk_flats.py +++ b/pypeit/scripts/chk_flats.py @@ -22,11 +22,14 @@ def get_parser(cls, width=None): help='Attempt to load old datamodel versions. A crash may ensue..') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): from pypeit import flatfield + # Initialize the log + cls.init_log(args) + chk_version = not args.try_old # Load diff --git a/pypeit/scripts/chk_flexure.py b/pypeit/scripts/chk_flexure.py index dec368e2af..b69bb3a57a 100644 --- a/pypeit/scripts/chk_flexure.py +++ b/pypeit/scripts/chk_flexure.py @@ -26,8 +26,8 @@ def get_parser(cls, width=None): help='Attempt to load old datamodel versions. A crash may ensue..') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): from IPython import embed from astropy.io import fits @@ -36,6 +36,9 @@ def main(args): from pypeit import specobjs from pypeit import spec2dobj + # Initialize the log + cls.init_log(args) + chk_version = not args.try_old flexure_type = 'spat' if args.spat else 'spec' diff --git a/pypeit/scripts/chk_for_calibs.py b/pypeit/scripts/chk_for_calibs.py index c8c126a223..8099599c11 100644 --- a/pypeit/scripts/chk_for_calibs.py +++ b/pypeit/scripts/chk_for_calibs.py @@ -30,8 +30,8 @@ def get_parser(cls, width=None): help='If not toggled, remove setup_files/ folder and its files.') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): """ Args: @@ -59,6 +59,9 @@ def main(args): import shutil + # Initialize the log + cls.init_log(args) + # Check that the spectrograph is provided if using a file root if args.root is not None: if args.spectrograph is None: diff --git a/pypeit/scripts/chk_noise_1dspec.py b/pypeit/scripts/chk_noise_1dspec.py index 93bc916df0..eb29439e8c 100644 --- a/pypeit/scripts/chk_noise_1dspec.py +++ b/pypeit/scripts/chk_noise_1dspec.py @@ -173,8 +173,10 @@ def get_parser(cls, width=None): return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): + # Initialize the log + cls.init_log(args) chk_version = not args.try_old diff --git a/pypeit/scripts/chk_noise_2dspec.py b/pypeit/scripts/chk_noise_2dspec.py index e8360093fe..669ddef645 100644 --- a/pypeit/scripts/chk_noise_2dspec.py +++ b/pypeit/scripts/chk_noise_2dspec.py @@ -178,8 +178,10 @@ def get_parser(cls, width=None): - @staticmethod - def main(args): + @classmethod + def main(cls, args): + # Initialize the log + cls.init_log(args) chk_version = not args.try_old diff --git a/pypeit/scripts/chk_plugins.py b/pypeit/scripts/chk_plugins.py index e576812d88..9f1ef261a9 100644 --- a/pypeit/scripts/chk_plugins.py +++ b/pypeit/scripts/chk_plugins.py @@ -8,13 +8,16 @@ class ChkPlugins(scriptbase.ScriptBase): - @staticmethod - def main(args): + @classmethod + def main(cls, args): from pypeit.display import required_plugins, plugins_available from pypeit import log from pypeit import PypeItError + # Initialize the log + cls.init_log(args) + success, report = plugins_available(return_report=True) if not success: raise PypeItError(report) diff --git a/pypeit/scripts/chk_scattlight.py b/pypeit/scripts/chk_scattlight.py index 5767d9bfc3..6c897073ba 100644 --- a/pypeit/scripts/chk_scattlight.py +++ b/pypeit/scripts/chk_scattlight.py @@ -31,8 +31,8 @@ def get_parser(cls, width=None): help='Attempt to load old datamodel versions. A crash may ensue..') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): from pypeit import scattlight, spec2dobj, slittrace from pypeit import log @@ -40,6 +40,9 @@ def main(args): from pypeit.images.detector_container import DetectorContainer from pypeit import io + # Initialize the log + cls.init_log(args) + chk_version = not args.try_old # Parse the detector name diff --git a/pypeit/scripts/chk_tilts.py b/pypeit/scripts/chk_tilts.py index baddabc85f..d6869c5ef8 100644 --- a/pypeit/scripts/chk_tilts.py +++ b/pypeit/scripts/chk_tilts.py @@ -30,11 +30,14 @@ def get_parser(cls, width=None): help='Attempt to load old datamodel versions. A crash may ensue..') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): from pathlib import Path from pypeit import wavetilts + # Initialize the log + cls.init_log(args) + chk_version = not args.try_old # tilts file path diff --git a/pypeit/scripts/chk_wavecalib.py b/pypeit/scripts/chk_wavecalib.py index 83ac86d2bb..f36b377b1d 100644 --- a/pypeit/scripts/chk_wavecalib.py +++ b/pypeit/scripts/chk_wavecalib.py @@ -22,14 +22,17 @@ def get_parser(cls, width=None): help='Attempt to load old datamodel versions. A crash may ensue..') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): from IPython import embed from astropy.io import fits from pypeit import wavecalib, spec2dobj, log from pypeit import PypeItError + # Initialize the log + cls.init_log(args) + chk_version = not args.try_old # Loop over the input files diff --git a/pypeit/scripts/clean_cache.py b/pypeit/scripts/clean_cache.py index 794e778358..1840f4043a 100644 --- a/pypeit/scripts/clean_cache.py +++ b/pypeit/scripts/clean_cache.py @@ -29,8 +29,8 @@ def get_parser(cls, width=None): help='Only list the contents of the cache.') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): from IPython import embed import astropy.utils.data @@ -38,6 +38,9 @@ def main(args): from pypeit import PypeItError from pypeit import cache + # Initialize the log + cls.init_log(args) + if args.list: # Print the full contents contents = cache.search_cache(None, path_only=False) diff --git a/pypeit/scripts/coadd_1dspec.py b/pypeit/scripts/coadd_1dspec.py index 9529fe5fbb..f71b3de9f8 100644 --- a/pypeit/scripts/coadd_1dspec.py +++ b/pypeit/scripts/coadd_1dspec.py @@ -59,8 +59,10 @@ class CoAdd1DSpec(scriptbase.ScriptBase): @classmethod def get_parser(cls, width=None): - parser = super().get_parser(description='Coadd 1D spectra produced by PypeIt', - width=width, formatter=scriptbase.SmartFormatter) + parser = super().get_parser( + description='Coadd 1D spectra produced by PypeIt', width=width, + formatter=scriptbase.SmartFormatter, default_log_file=True + ) parser.add_argument('coadd1d_file', type=str, help="R|File to guide coadding process.\n\n" @@ -141,18 +143,15 @@ def get_parser(cls, width=None): help="show QA during coadding process") parser.add_argument("--par_outfile", default='coadd1d.par', help="Output to save the parameters") - parser.add_argument('-v', '--verbosity', type=int, default=1, - help='Verbosity level between 0 [none] and 2 [all]. Default: 1. ' - 'Level 2 writes a log with filename coadd_1dspec_YYYYMMDD-HHMM.log') - #parser.add_argument("--test_spec_path", type=str, help="Path for testing") return parser - @staticmethod - def main(args): - """ Runs the 1d coadding steps + @classmethod + def main(cls, args): + """ + Runs the 1d coadding steps """ - # Set the verbosity, and create a logfile if verbosity == 2 -# log.set_logfile_and_verbosity('coadd_1dspec', args.verbosity) + # Initialize the log + cls.init_log(args) # Load the file #config_lines, spec1dfiles, objids = read_coaddfile(args.coadd1d_file) diff --git a/pypeit/scripts/coadd_2dspec.py b/pypeit/scripts/coadd_2dspec.py index 20603dd7ce..1f5f9a69cd 100644 --- a/pypeit/scripts/coadd_2dspec.py +++ b/pypeit/scripts/coadd_2dspec.py @@ -12,7 +12,7 @@ class CoAdd2DSpec(scriptbase.ScriptBase): @classmethod def get_parser(cls, width=None): parser = super().get_parser(description='Coadd 2D spectra produced by PypeIt', - width=width) + width=width, default_log_file=True) parser.add_argument('coadd2d_file', type=str, default=None, help='File to guide 2d coadds') @@ -29,25 +29,16 @@ def get_parser(cls, width=None): help="Basename of files to save the parameters, spec1d, and spec2d") parser.add_argument("--debug", default=False, action="store_true", help="show debug plots?") - parser.add_argument('-v', '--verbosity', type=int, default=1, - help='Verbosity level between 0 [none] and 2 [all]. Default: 1. ' - 'Level 2 writes a log with filename coadd_2dspec_YYYYMMDD-HHMM.log') - #parser.add_argument("--wave_method", type=str, default=None, - # help="Wavelength method for wavelength grid. If not set, code will " - # "use linear for Multislit and log10 for Echelle") - #parser.add_argument("--std", default=False, action="store_true", - # help="This is a standard star reduction.") return parser - @staticmethod - def main(args): - """ Executes 2d coadding + @classmethod + def main(cls, args): + """ + Executes 2d coadding """ from pathlib import Path - import os - import glob import copy from collections import OrderedDict @@ -64,8 +55,8 @@ def main(args): from pypeit import spec2dobj from pypeit.spectrographs.util import load_spectrograph - # Set the verbosity, and create a logfile if verbosity == 2 -# log.set_logfile_and_verbosity('coadd_2dspec', args.verbosity) + # Initialize the log + cls.init_log(args) # Load the file coadd2dFile = inputfiles.Coadd2DFile.from_file(args.coadd2d_file) diff --git a/pypeit/scripts/coadd_datacube.py b/pypeit/scripts/coadd_datacube.py index 47011b59f8..1614eb1bfe 100644 --- a/pypeit/scripts/coadd_datacube.py +++ b/pypeit/scripts/coadd_datacube.py @@ -12,19 +12,18 @@ class CoAddDataCube(scriptbase.ScriptBase): @classmethod def get_parser(cls, width=None): - parser = super().get_parser(description='Read in an array of spec2D files and convert ' - 'them into a datacube', width=width) + parser = super().get_parser( + description='Read in an array of spec2D files and convert them into a datacube', + width=width, default_log_file=True + ) parser.add_argument('file', type = str, default=None, help='filename.coadd3d file') parser.add_argument('--det', default=1, type=int, help="Detector") parser.add_argument('-o', '--overwrite', default=False, action='store_true', help='Overwrite any existing files/directories') - parser.add_argument('-v', '--verbosity', type=int, default=1, - help='Verbosity level between 0 [none] and 2 [all]. Default: 1. ' - 'Level 2 writes a log with filename coadd_datacube_YYYYMMDD-HHMM.log') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): import time from pypeit import log @@ -35,8 +34,8 @@ def main(args): from pypeit.coadd3d import CoAdd3D from pypeit.spectrographs.util import load_spectrograph - # Set the verbosity, and create a logfile if verbosity == 2 -# log.set_logfile_and_verbosity('coadd_datacube', args.verbosity) + # Initialize the log + cls.init_log(args) # Check that a file has been provided if args.file is None: diff --git a/pypeit/scripts/collate_1d.py b/pypeit/scripts/collate_1d.py index a1604cdad2..add863a5a5 100644 --- a/pypeit/scripts/collate_1d.py +++ b/pypeit/scripts/collate_1d.py @@ -668,7 +668,8 @@ def get_parser(cls, width=None): parser = super().get_parser(description='Flux/Coadd multiple 1d spectra from multiple ' 'nights and prepare a directory for the KOA.', - width=width, formatter=scriptbase.SmartFormatter) + width=width, formatter=scriptbase.SmartFormatter, + default_log_file=True) # TODO: Is the file optional? If so, shouldn't the first argument start # with '--'? @@ -721,16 +722,12 @@ def get_parser(cls, width=None): parser.add_argument("--refframe", type=str, default = None, choices = pypeitpar.WavelengthSolutionPar.valid_reference_frames(), help=blank_par.descr['refframe']) parser.add_argument('--chk_version', action = 'store_true', help=blank_pypar['rdx'].descr['chk_version']) - parser.add_argument('-v', '--verbosity', type=int, default=1, - help='Verbosity level between 0 [none] and 2 [all]. Default: 1. ' - 'Level 2 writes a log with filename collate_1d_YYYYMMDD-HHMM.log') return parser - @staticmethod - def main(args): - - # Set the verbosity, and create a logfile if verbosity == 2 -# log.set_logfile_and_verbosity('collate_1d', args.verbosity) + @classmethod + def main(cls, args): + # Initialize the log + cls.init_log(args) start_time = datetime.now() (par, spectrograph, spec1d_files) = build_parameters(args) diff --git a/pypeit/scripts/compare_sky.py b/pypeit/scripts/compare_sky.py index eee351a889..9914bf8250 100644 --- a/pypeit/scripts/compare_sky.py +++ b/pypeit/scripts/compare_sky.py @@ -30,14 +30,16 @@ def get_parser(cls, width=None): return parser # Script to run XSpec from the command line or ipython - @staticmethod - def main(args): + @classmethod + def main(cls, args): import matplotlib.pyplot as plt import linetools.spectra.io from pypeit import io + # Initialize the log + cls.init_log(args) # Extension exten = args.exten if args.exten is not None else 1 diff --git a/pypeit/scripts/compile_wvarxiv.py b/pypeit/scripts/compile_wvarxiv.py index e6d9d7ae64..a0b4b68d54 100644 --- a/pypeit/scripts/compile_wvarxiv.py +++ b/pypeit/scripts/compile_wvarxiv.py @@ -41,12 +41,16 @@ def get_parser(cls, width=None): return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): + from astropy.table import Table, join from importlib_resources import files as imres_files import glob, os + # Initialize the log + cls.init_log(args) + # Read in the wvarxiv files assert os.path.isdir(args.wvarxiv_folder), 'The wvarxiv_folder does not exist' wavarxiv_files = glob.glob(args.wvarxiv_folder + '/*.fits') diff --git a/pypeit/scripts/edge_inspector.py b/pypeit/scripts/edge_inspector.py index d81ab72e3a..aef43c3b0d 100644 --- a/pypeit/scripts/edge_inspector.py +++ b/pypeit/scripts/edge_inspector.py @@ -21,14 +21,17 @@ def get_parser(cls, width=None): help='Attempt to load old datamodel versions. A crash may ensue..') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): from pathlib import Path from matplotlib import pyplot from pypeit import edgetrace from pypeit.core.gui import edge_inspector + # Initialize the log + cls.init_log(args) + chk_version = not args.try_old # Set the file name to the full path diff --git a/pypeit/scripts/extract_datacube.py b/pypeit/scripts/extract_datacube.py index 65bb0e6036..94a95a4806 100644 --- a/pypeit/scripts/extract_datacube.py +++ b/pypeit/scripts/extract_datacube.py @@ -16,8 +16,11 @@ class ExtractDataCube(scriptbase.ScriptBase): @classmethod def get_parser(cls, width=None): - parser = super().get_parser(description='Read in a datacube, extract a spectrum of a point source,' - 'and save it as a spec1d file.', width=width) + parser = super().get_parser( + description='Read in a datacube, extract a spectrum of a point source, and save it as ' + 'a spec1d file.', + width=width, default_log_file=True + ) parser.add_argument('file', type = str, default=None, help='spec3d.fits DataCube file') parser.add_argument("-e", "--ext_file", type=str, help='Configuration file with extraction parameters') @@ -27,13 +30,10 @@ def get_parser(cls, width=None): help='Overwrite any existing files/directories') parser.add_argument('-b', '--boxcar_radius', type=float, default=None, help='Radius of the circular boxcar (in arcseconds) to use for the extraction.') - parser.add_argument('-v', '--verbosity', type=int, default=1, - help='Verbosity level between 0 [none] and 2 [all]. Default: 1. ' - 'Level 2 writes a log with filename extract_datacube_YYYYMMDD-HHMM.log') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): import time from pypeit import log @@ -44,8 +44,8 @@ def main(args): from pypeit.spectrographs.util import load_spectrograph from pypeit.coadd3d import DataCube - # Set the verbosity, and create a logfile if verbosity == 2 -# log.set_logfile_and_verbosity('extract_datacube', args.verbosity) + # Initialize the log + cls.init_log(args) # Check that a file has been provided if args.file is None: diff --git a/pypeit/scripts/flux_calib.py b/pypeit/scripts/flux_calib.py index 6af4ffc6e8..503705054e 100644 --- a/pypeit/scripts/flux_calib.py +++ b/pypeit/scripts/flux_calib.py @@ -23,7 +23,8 @@ class FluxCalib(scriptbase.ScriptBase): @classmethod def get_parser(cls, width=None): parser = super().get_parser(description='Flux calibrate 1D spectra produced by PypeIt', - width=width, formatter=scriptbase.SmartFormatter) + width=width, formatter=scriptbase.SmartFormatter, + default_log_file=True) parser.add_argument("flux_file", type=str, help="R|File to guide fluxing process. This file must have the " @@ -58,29 +59,22 @@ def get_parser(cls, width=None): "specify no sensfiles and use an archived one.\n" "Archived sensfiles are available for the following spectrographs: " + ",".join(SensFileArchive.supported_spectrographs()) + "\n\n") -# parser.add_argument("--debug", default=False, action="store_true", -# help="show debug plots?") parser.add_argument("--par_outfile", default='fluxing.par', action="store_true", help="Output to save the parameters") - parser.add_argument('-v', '--verbosity', type=int, default=1, - help='Verbosity level between 0 [none] and 2 [all]. Default: 1. ' - 'Level 2 writes a log with filename flux_calib_YYYYMMDD-HHMM.log') -# parser.add_argument("--plot", default=False, action="store_true", -# help="Show the sensitivity function?") parser.add_argument('--try_old', default=False, action='store_true', help='Attempt to load old datamodel versions. A crash may ensue..') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): """ Runs fluxing steps """ + # Initialize the log + cls.init_log(args) + # Set whether or not to check datamodel versions chk_version = not args.try_old - # Set the verbosity, and create a logfile if verbosity == 2 -# log.set_logfile_and_verbosity('flux_calib', args.verbosity) - # Load the file fluxFile = inputfiles.FluxFile.from_file(args.flux_file) diff --git a/pypeit/scripts/flux_setup.py b/pypeit/scripts/flux_setup.py index e65a1b1b8d..2830119f16 100644 --- a/pypeit/scripts/flux_setup.py +++ b/pypeit/scripts/flux_setup.py @@ -68,8 +68,8 @@ def get_parser(cls, width=None): '\n') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): """ This setups PypeIt input files for fluxing, coadding, and telluric corrections. It will produce three files named as @@ -77,6 +77,9 @@ def main(args): spectrograph name but can be overriden on the command line. """ + # Initialize the log + cls.init_log(args) + allfiles = [] for path in args.paths: allfiles += Path(path).iterdir() diff --git a/pypeit/scripts/identify.py b/pypeit/scripts/identify.py index d9f4ef6790..c9501c8b5a 100644 --- a/pypeit/scripts/identify.py +++ b/pypeit/scripts/identify.py @@ -13,8 +13,11 @@ class Identify(scriptbase.ScriptBase): @classmethod def get_parser(cls, width=None): - parser = super().get_parser(description='Launch PypeIt pypeit_identify tool, display extracted ' - 'Arc, and load linelist.', width=width) + parser = super().get_parser( + description='Launch PypeIt pypeit_identify tool, display extracted Arc, and load ' + 'linelist.', + width=width, default_log_file=True + ) parser.add_argument('arc_file', type=str, default=None, help='PypeIt Arc file') parser.add_argument('slits_file', type=str, default=None, help='PypeIt Slits file') parser.add_argument("--lamps", type=str, @@ -45,15 +48,12 @@ def get_parser(cls, width=None): help="Save the solutions, despite the RMS") parser.add_argument('--rescale_resid', default=False, action='store_true', help="Rescale the residual plot to include all points?") - parser.add_argument('-v', '--verbosity', type=int, default=1, - help='Verbosity level between 0 [none] and 2 [all]. Default: 1. ' - 'Level 2 writes a log with filename identify_YYYYMMDD-HHMM.log') parser.add_argument('--try_old', default=False, action='store_true', help='Attempt to load old datamodel versions. A crash may ensue..') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): import os import json @@ -70,12 +70,12 @@ def main(args): from pypeit.core.wavecal import autoid from pypeit.utils import jsonify + # Initialize the log + cls.init_log(args) + # Set whether or not to check datamodel versions chk_version = not args.try_old - # Set the verbosity, and create a logfile if verbosity == 2 -# log.set_logfile_and_verbosity('identify', args.verbosity) - # Load the Arc file msarc = ArcImage.from_file(args.arc_file, chk_version=chk_version) diff --git a/pypeit/scripts/install_extinctfile.py b/pypeit/scripts/install_extinctfile.py index df14eaa3e1..d176ac3f22 100644 --- a/pypeit/scripts/install_extinctfile.py +++ b/pypeit/scripts/install_extinctfile.py @@ -21,11 +21,14 @@ def get_parser(cls, width=None): 'files with the same root.') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): import numpy as np from pypeit import log + # Initialize the log + cls.init_log(args) + # Grab all the files files = np.concatenate([sorted(scriptbase.ScriptBase.expandpath(f)) for f in args.files]) # Remove any that are *not* files (i.e., directories or symlinks) diff --git a/pypeit/scripts/install_linelist.py b/pypeit/scripts/install_linelist.py index 08b034b041..9e0083e0b0 100644 --- a/pypeit/scripts/install_linelist.py +++ b/pypeit/scripts/install_linelist.py @@ -20,11 +20,14 @@ def get_parser(cls, width=None): 'in the PypeIt cache') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): import numpy as np from pypeit import log + # Initialize the log + cls.init_log(args) + # Grab all the files files = np.concatenate([sorted(scriptbase.ScriptBase.expandpath(f)) for f in args.files]) # Remove any that are *not* files (i.e., directories or symlinks) diff --git a/pypeit/scripts/install_ql_calibs.py b/pypeit/scripts/install_ql_calibs.py index 805a85e528..f1d1496d52 100644 --- a/pypeit/scripts/install_ql_calibs.py +++ b/pypeit/scripts/install_ql_calibs.py @@ -32,14 +32,17 @@ def get_parser(cls, width=None): help='Remove the downloaded zip file') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): import os import zipfile from pypeit.cache import __PYPEIT_DATA__ from pypeit.io import create_symlink + # Initialize the log + cls.init_log(args) + # Check that either the zip file or the directory is provided if args.zip is None and args.ql_path is None: raise ValueError('Must provide either the zip file or the path to an existing ' diff --git a/pypeit/scripts/install_telluric.py b/pypeit/scripts/install_telluric.py index 7869bf3b27..0bcc4cf0d8 100644 --- a/pypeit/scripts/install_telluric.py +++ b/pypeit/scripts/install_telluric.py @@ -23,11 +23,14 @@ def get_parser(cls, width=None): help='This is a local file to be installed in the cache') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): import os from pypeit import cache + # Initialize the log + cls.init_log(args) + # Loop through the files passed for file in args.files: diff --git a/pypeit/scripts/install_wvarxiv.py b/pypeit/scripts/install_wvarxiv.py index c1a683ca61..b79747a697 100644 --- a/pypeit/scripts/install_wvarxiv.py +++ b/pypeit/scripts/install_wvarxiv.py @@ -20,11 +20,14 @@ def get_parser(cls, width=None): 'in the PypeIt cache') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): import numpy as np from pypeit import log + # Initialize the log + cls.init_log(args) + # Grab all the files files = np.concatenate([sorted(scriptbase.ScriptBase.expandpath(f)) for f in args.files]) # Remove any that are *not* files (i.e., directories or symlinks) diff --git a/pypeit/scripts/lowrdx_skyspec.py b/pypeit/scripts/lowrdx_skyspec.py index 616e4b6799..bd49d8812b 100644 --- a/pypeit/scripts/lowrdx_skyspec.py +++ b/pypeit/scripts/lowrdx_skyspec.py @@ -20,11 +20,14 @@ def get_parser(cls, width=None): parser.add_argument('new_file', type=str, default=None, help='PYPIT FITS sky spectrum') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): from scipy.io.idl import readsav from linetools.spectra.xspectrum1d import XSpectrum1D + # Initialize the log + cls.init_log(args) + # Read lrdx_sky = readsav(args.lowrdx_sky) # Generate diff --git a/pypeit/scripts/multislit_flexure.py b/pypeit/scripts/multislit_flexure.py index 4213d70f2d..89cc453773 100644 --- a/pypeit/scripts/multislit_flexure.py +++ b/pypeit/scripts/multislit_flexure.py @@ -45,11 +45,14 @@ def get_parser(cls, width=None): action="store_true", help="show debug plots?") return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): from astropy.io import fits + # Initialize the log + cls.init_log(args) + # Load the file flexFile = inputfiles.FlexureFile.from_file(args.flex_file) diff --git a/pypeit/scripts/obslog.py b/pypeit/scripts/obslog.py index 6e1e7a7657..8796639d03 100644 --- a/pypeit/scripts/obslog.py +++ b/pypeit/scripts/obslog.py @@ -89,12 +89,15 @@ def get_parser(cls, width=None): help='View the obs log in a GUI') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): from pypeit.spectrographs.util import load_spectrograph from pypeit.pypeitsetup import PypeItSetup + # Initialize the log + cls.init_log(args) + # Check that input spectrograph is supported if args.spec not in available_spectrographs: raise ValueError('Instrument \'{0}\' unknown to PypeIt.\n'.format(args.spec) diff --git a/pypeit/scripts/parse_slits.py b/pypeit/scripts/parse_slits.py index 7285cce8c3..aba70c0fe6 100644 --- a/pypeit/scripts/parse_slits.py +++ b/pypeit/scripts/parse_slits.py @@ -64,8 +64,11 @@ def get_parser(cls, width=None): return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): + + # Initialize the log + cls.init_log(args) chk_version = not args.try_old diff --git a/pypeit/scripts/print_bpm.py b/pypeit/scripts/print_bpm.py index dd61e78627..c4033e878a 100644 --- a/pypeit/scripts/print_bpm.py +++ b/pypeit/scripts/print_bpm.py @@ -43,8 +43,11 @@ def get_parser(cls, width=None): 'value is acceptable. Default is 1.') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): + + # Initialize the log + cls.init_log(args) # Convert the integer bitmask value to a list of binary numbers binvals = [int(x) for x in bin(args.bit)[2:]][::-1] diff --git a/pypeit/scripts/qa_html.py b/pypeit/scripts/qa_html.py index a768ee3224..4331bc1ade 100644 --- a/pypeit/scripts/qa_html.py +++ b/pypeit/scripts/qa_html.py @@ -22,8 +22,8 @@ def get_parser(cls, width=None): return parser # TODO: unit_test and path aren't used, right? - @staticmethod - def main(args, unit_test=False, path=''): + @classmethod + def main(cls, args, unit_test=False, path=''): """Builds the HTML files. Args: @@ -37,6 +37,9 @@ def main(args, unit_test=False, path=''): from pypeit.core import qa + # Initialize the log + cls.init_log(args) + # Flags flg_MF, flg_exp = False, False if args.type == 'MF': diff --git a/pypeit/scripts/ql.py b/pypeit/scripts/ql.py index e5eda00976..432208a115 100644 --- a/pypeit/scripts/ql.py +++ b/pypeit/scripts/ql.py @@ -754,8 +754,11 @@ def get_parser(cls, width=None): return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): + + # Initialize the log + cls.init_log(args) tstart = time.perf_counter() diff --git a/pypeit/scripts/run_pypeit.py b/pypeit/scripts/run_pypeit.py index 120a2e0e63..16fee49878 100644 --- a/pypeit/scripts/run_pypeit.py +++ b/pypeit/scripts/run_pypeit.py @@ -10,15 +10,6 @@ class RunPypeIt(scriptbase.ScriptBase): - # TODO: Combining classmethod and property works in python 3.9 and later - # only: https://docs.python.org/3.9/library/functions.html#classmethod - # Order matters. In python 3.9, it would be: - # - # @classmethod - # @property - # - # Because we're not requiring python 3.9 yet, we have to leave this as a - # classmethod only: @classmethod def name(cls): """ @@ -49,11 +40,10 @@ def get_parser(cls, width=None): import argparse parser = super().get_parser(description=cls.usage(), - width=width, formatter=argparse.RawDescriptionHelpFormatter) + width=width, formatter=argparse.RawDescriptionHelpFormatter, + default_log_file=True) parser.add_argument('pypeit_file', type=str, help='PypeIt reduction file (must have .pypeit extension)') - parser.add_argument('-v', '--verbosity', type=int, default=2, - help='Verbosity level between 0 [none] and 2 [all]') parser.add_argument('-r', '--redux_path', default=None, help='Path to directory for the reduction. Only advised for testing') @@ -67,7 +57,6 @@ def get_parser(cls, width=None): 'remote control ginga session via ' '"ginga --modules=RC,SlitWavelength &"') - # TODO: JFH Should the default now be true with the new definition. parser.add_argument('-o', '--overwrite', default=False, action='store_true', help='Overwrite any existing files/directories') parser.add_argument('-c', '--calib_only', default=False, action='store_true', @@ -75,27 +64,31 @@ def get_parser(cls, width=None): return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): - import os + from pathlib import Path from IPython import embed from pypeit import pypeit from pypeit import log from pypeit import PypeItError - # Load options from command line - splitnm = os.path.splitext(args.pypeit_file) - if splitnm[1] != '.pypeit': - raise PypeItError('Input file must have a .pypeit extension!') - logname = splitnm[0] + ".log" + # Set a default log file based on the name of the pypeit file, not the + # name of the script + if args.log_file == 'default': + _pypeit_file = Path(args.pypeit_file) + if _pypeit_file.suffix != '.pypeit': + raise PypeItError('Input file must have a .pypeit extension!') + args.log_file = _pypeit_file.with_suffix('.log') + + cls.init_log(args) # Instantiate the main pipeline reduction object - pypeIt = pypeit.PypeIt(args.pypeit_file, verbosity=args.verbosity, - reuse_calibs=args.reuse_calibs, overwrite=args.overwrite, - redux_path=args.redux_path, calib_only=args.calib_only, - logname=logname, show=args.show) + pypeIt = pypeit.PypeIt( + args.pypeit_file, reuse_calibs=args.reuse_calibs, overwrite=args.overwrite, + redux_path=args.redux_path, calib_only=args.calib_only, show=args.show + ) if args.calib_only: pypeIt.calib_all() diff --git a/pypeit/scripts/run_to_calibstep.py b/pypeit/scripts/run_to_calibstep.py index b2592af3ab..7ccab1c868 100644 --- a/pypeit/scripts/run_to_calibstep.py +++ b/pypeit/scripts/run_to_calibstep.py @@ -28,9 +28,6 @@ def get_parser(cls, width=None): parser.add_argument('--det', type=str, help='Detector to reduce') # TODO -- Grab these from run_pypeit.py ? - parser.add_argument('-v', '--verbosity', type=int, default=2, - help='Verbosity level between 0 [none] and 2 [all]') - parser.add_argument('-r', '--redux_path', default=None, help='Path to directory for the reduction. Only advised for testing') parser.add_argument('-s', '--show', default=False, action='store_true', @@ -41,8 +38,8 @@ def get_parser(cls, width=None): return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): import ast import numpy as np @@ -53,11 +50,16 @@ def main(args): from pypeit import log from pypeit import PypeItError - # Load options from command line - _pypeit_file = Path(args.pypeit_file).absolute() - if _pypeit_file.suffix != '.pypeit': - raise PypeItError(f'Input file {_pypeit_file} must have a .pypeit extension!') - logname = _pypeit_file.parent / f'{_pypeit_file.stem}.log' + # Set a default log file based on the name of the pypeit file, not the + # name of the script + if args.log_file == 'default': + _pypeit_file = Path(args.pypeit_file) + if _pypeit_file.suffix != '.pypeit': + raise PypeItError('Input file must have a .pypeit extension!') + args.log_file = _pypeit_file.with_suffix('.log') + + # Initialize the log + cls.init_log(args) # Check for the frame or calib_group if args.science_frame is None and args.calib_group is None: @@ -66,9 +68,9 @@ def main(args): log.warning("Both science_frame and calib_group ID provided. Will use the science_frame") # Instantiate the main pipeline reduction object - pypeIt = pypeit.PypeIt(args.pypeit_file, verbosity=args.verbosity, - redux_path=args.redux_path, - logname=logname, show=args.show, calib_only=True) + pypeIt = pypeit.PypeIt( + args.pypeit_file, redux_path=args.redux_path, show=args.show, calib_only=True + ) pypeIt.reuse_calibs = True # Find the detectors to reduce diff --git a/pypeit/scripts/scriptbase.py b/pypeit/scripts/scriptbase.py index 2b8ae446b4..a1080a114c 100644 --- a/pypeit/scripts/scriptbase.py +++ b/pypeit/scripts/scriptbase.py @@ -5,13 +5,17 @@ .. include:: ../include/links.rst """ -from IPython import embed - -import os -from pathlib import Path import argparse -import textwrap +import datetime from functools import reduce +import logging +from pathlib import Path +import textwrap + +from IPython import embed + +from pypeit import log +from pypeit import PypeItError class SmartFormatter(argparse.HelpFormatter): r""" @@ -113,15 +117,14 @@ def entry_point(cls): """ cls.main(cls.parse_args()) - # TODO: Combining classmethod and property works in python 3.9 and later - # only: https://docs.python.org/3.9/library/functions.html#classmethod - # Order matters. In python 3.9, it would be: + # TODO: This could also be combined with the property decorator; see + # https://docs.python.org/3.9/library/functions.html#classmethod + # Order matters. # # @classmethod # @property # - # Because we're not requiring python 3.9 yet, we have to leave this as a - # classmethod only: + # Leave as is for now. @classmethod def name(cls): """ @@ -136,14 +139,14 @@ def parse_args(cls, options=None): Parse the command-line arguments. """ parser = cls.get_parser() - ScriptBase._fill_parser_cwd(parser) + cls._fill_parser_cwd(parser) return parser.parse_args() if options is None else parser.parse_args(options) @staticmethod def _fill_parser_cwd(parser): """ Replace the default of any action that is exactly ``'current working - directory'`` with the value of ``os.getcwd()``. + directory'`` with the value of ``Path.cwd()``. The ``parser`` is edited *in place*. @@ -153,11 +156,11 @@ def _fill_parser_cwd(parser): """ for action in parser._actions: if action.default == 'current working directory': - action.default = os.getcwd() + action.default = str(Path.cwd()) # Base classes should override this - @staticmethod - def main(args): + @classmethod + def main(cls, args): """ Execute the script. """ @@ -165,7 +168,9 @@ def main(args): @classmethod def get_parser(cls, description=None, width=None, - formatter=argparse.ArgumentDefaultsHelpFormatter): + formatter=argparse.ArgumentDefaultsHelpFormatter, + include_log_options=True, + default_log_file=False): """ Construct the command-line argument parser. @@ -177,30 +182,100 @@ def get_parser(cls, description=None, width=None, *Any* argument that defaults to the string ``'current working directory'`` will be replaced by the - result of ``os.getcwd()`` when the script is executed. This means + result of ``Path.cwd()`` when the script is executed. This means help dialogs will include this replacement, and parsing of the - command line will use ``os.getcwd()`` as the default. This + command line will use ``Path.cwd()`` as the default. This functionality is largely to allow for PypeIt's automated documentation of script help dialogs without the "current working" directory being that of the developer that most recently compiled the docs. - Args: - description (:obj:`str`, optional): - A short description of the purpose of the script. - width (:obj:`int`, optional): - Restrict the width of the formatted help output to be no longer - than this number of characters, if possible given the help - formatter. If None, the width is the same as the terminal - width. - formatter (`argparse.HelpFormatter`_): - Class used to format the help output. - - Returns: - `argparse.ArgumentParser`_: Command-line interpreter. - """ - return argparse.ArgumentParser(description=description, - formatter_class=lambda prog: formatter(prog, width=width)) + Parameters + ---------- + description : :obj:`str`, optional + A short description of the purpose of the script. + width : :obj:`int`, optional + Restrict the width of the formatted help output to be no longer than + this number of characters, if possible given the help formatter. If + None, the width is the same as the terminal width. + formatter : `argparse.HelpFormatter`_ + Class used to format the help output. + include_log_options : :obj:`bool`, optional + Include options that define the logging level(s) and log file. + default_log_file : :obj:`bool`, optional + If true, script will use the default log file name if none is + provided. Ignored if ``include_log_options`` is False. + + Returns + ------- + `argparse.ArgumentParser`_ + Command-line interpreter. + """ + parser = argparse.ArgumentParser( + description=description, formatter_class=lambda prog: formatter(prog, width=width) + ) + if not include_log_options: + return parser + # Add the logging options + parser.add_argument( + '-v', '--verbosity', type=int, default=2, + help='Verbosity level, which must be 0, 1, or 2. Level 0 only includes warning and ' + 'error messages, level 1 also includes informational messages, and level 2 also ' + 'includes debugging messages and includes the calling function in the message.' + ) + parser.add_argument( + '--log_file', type=str, default='default' if default_log_file else None, + help='Name for the log file. If set to "default", a default name is used. If None, ' + 'a log file is not produced.' + ) + parser.add_argument( + '--log_level', type=int, default=None, + help='Verbosity level for the log file. If a log file is produce and this is None, ' + 'the file log will match the console stream log.' + ) + return parser + + @classmethod + def init_log(cls, args): + """ + Initialize the logger provided the command-line arguments. + """ + level = cls._convert_verbosity_to_logging_level(args.verbosity) + log_file_level = None if args.log_level is None else \ + cls._convert_verbosity_to_logging_level(args.log_level) + if args.log_file == 'default': + _log_file = cls.default_log_file() + elif args.log_file in ['None', None]: + _log_file = None + else: + _log_file = args.log_file + log.init(level=level, + log_file=_log_file, + log_file_level=log_file_level) + + @staticmethod + def _convert_verbosity_to_logging_level(v): + """ + Given a PypeIt "verbosity level," return the logging level. + """ + match v: + case 0: + return logging.WARNING + case 1: + return logging.INFO + case 2: + return logging.DEBUG + case _: + raise PypeItError(f'Verbosity level must be 0, 1, or 2, not {v}.') + + @classmethod + def default_log_file(cls): + """ + Set the default name for the log file. + """ + # Create a UT timestamp (to the minute) for the log filename + timestamp = datetime.datetime.now(datetime.UTC).strftime("%Y%m%d-%H%M") + return f'{cls.name()}_{timestamp}.log' @staticmethod def expandpath(path_pattern): diff --git a/pypeit/scripts/sensfunc.py b/pypeit/scripts/sensfunc.py index 2f864114f9..675cc5cf3b 100644 --- a/pypeit/scripts/sensfunc.py +++ b/pypeit/scripts/sensfunc.py @@ -17,7 +17,7 @@ class SensFunc(scriptbase.ScriptBase): @classmethod def get_parser(cls, width=None): parser = super().get_parser(description='Compute a sensitivity function', width=width, - formatter=scriptbase.SmartFormatter) + formatter=scriptbase.SmartFormatter, default_log_file=True) parser.add_argument("spec1dfile", type=str, help='spec1d file for the standard that will be used to compute ' 'the sensitivity function. This can be the output file of ' @@ -81,13 +81,10 @@ def get_parser(cls, width=None): help="show debug plots?") parser.add_argument("--par_outfile", default='sensfunc.par', help="Name of output file to save the parameters used by the fit") - parser.add_argument('-v', '--verbosity', type=int, default=1, - help='Verbosity level between 0 [none] and 2 [all]. Default: 1. ' - 'Level 2 writes a log with filename sensfunc_YYYYMMDD-HHMM.log') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): """Executes sensitivity function computation.""" import os @@ -100,8 +97,8 @@ def main(args): from pypeit import sensfunc from pypeit.spectrographs.util import load_spectrograph - # Set the verbosity, and create a logfile if verbosity == 2 -# log.set_logfile_and_verbosity('sensfunc', args.verbosity) + # Initialize the log + cls.init_log(args) # Check parameter inputs if args.algorithm is not None and args.sens_file is not None: diff --git a/pypeit/scripts/setup.py b/pypeit/scripts/setup.py index c688e9167c..4bc00d8cc9 100644 --- a/pypeit/scripts/setup.py +++ b/pypeit/scripts/setup.py @@ -17,7 +17,7 @@ class Setup(scriptbase.ScriptBase): def get_parser(cls, width=None): parser = super().get_parser(description='Parse data files to construct a pypeit file in ' 'preparation for reduction using \'run_pypeit\'', - width=width) + width=width, default_log_file=True) # TODO: Spectrograph should be a required argument parser.add_argument('-s', '--spectrograph', default=None, type=str, @@ -48,9 +48,6 @@ def get_parser(cls, width=None): help='Include the manual spatial shift (flexure) column for the user to edit') parser.add_argument('-m', '--manual_extraction', default=False, action='store_true', help='Include the manual extraction column for the user to edit') - parser.add_argument('-v', '--verbosity', type=int, default=1, - help='Verbosity level between 0 [none] and 2 [all]. Default: 1. ' - 'Level 2 writes a log with filename setup_YYYYMMDD-HHMM.log') parser.add_argument('-k', '--keep_bad_frames', default=False, action='store_true', help='Keep all frames, even if they are identified as having ' 'bad/unrecognized configurations that cannot be reduced by ' @@ -73,8 +70,8 @@ def get_parser(cls, width=None): return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): import time from pathlib import Path @@ -99,8 +96,9 @@ def main(args): # Start the GUI from pypeit.setup_gui.controller import start_gui start_gui(args) -# else: -# log.set_logfile_and_verbosity("setup", args.verbosity) + else: + # Initialize the log + cls.init_log(args) # Initialize PypeItSetup based on the arguments ps = PypeItSetup.from_file_root(args.root, args.spectrograph, extension=args.extension) diff --git a/pypeit/scripts/setup_coadd2d.py b/pypeit/scripts/setup_coadd2d.py index ca90573461..00d8a5a906 100644 --- a/pypeit/scripts/setup_coadd2d.py +++ b/pypeit/scripts/setup_coadd2d.py @@ -81,8 +81,8 @@ def get_parser(cls, width=None): return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): from pathlib import Path @@ -100,6 +100,9 @@ def main(args): from pypeit.spectrographs.util import load_spectrograph from pypeit.coadd2d import CoAdd2D + # Initialize the log + cls.init_log(args) + if args.pypeit_file is None: pypeitFile = None par = None diff --git a/pypeit/scripts/show_1dspec.py b/pypeit/scripts/show_1dspec.py index 18b86ba979..6fedeadde0 100644 --- a/pypeit/scripts/show_1dspec.py +++ b/pypeit/scripts/show_1dspec.py @@ -34,8 +34,8 @@ def get_parser(cls, width=None): help='Open the spectrum in ginga') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): """ Runs the XSpecGui on an input file """ import sys @@ -49,6 +49,9 @@ def main(args): from pypeit import log from pypeit import PypeItError + # Initialize the log + cls.init_log(args) + sobjs = specobjs.SpecObjs.from_fitsfile(args.file, chk_version=False) # List only? diff --git a/pypeit/scripts/show_2dspec.py b/pypeit/scripts/show_2dspec.py index cff31be25e..30cd6c7c82 100644 --- a/pypeit/scripts/show_2dspec.py +++ b/pypeit/scripts/show_2dspec.py @@ -83,7 +83,7 @@ class Show2DSpec(scriptbase.ScriptBase): def get_parser(cls, width=None): parser = super().get_parser(description='Display sky subtracted, spec2d image in a ' 'ginga viewer.', - width=width) + width=width, default_log_file=True) parser.add_argument('file', type=str, default=None, help='Path to a PypeIt spec2d file') parser.add_argument('--list', default=False, action='store_true', @@ -124,24 +124,23 @@ def get_parser(cls, width=None): parser.add_argument('--no_clear', dest='clear', default=True, action='store_false', help='Do *not* clear all existing tabs') - parser.add_argument('-v', '--verbosity', type=int, default=1, - help='Verbosity level between 0 [none] and 2 [all]') parser.add_argument('--try_old', default=False, action='store_true', help='Attempt to load old datamodel versions. A crash may ensue..') return parser - @staticmethod - def main(args): - - chk_version = not args.try_old + @classmethod + def main(cls, args): # List only? if args.list: io.fits_open(args.file).info() return - # Set the verbosity, and create a logfile if verbosity == 2 -# log.set_logfile_and_verbosity('show_2dspec', args.verbosity) + # Initialize the log + cls.init_log(args) + + # Set whether or not to check datamodel versions + chk_version = not args.try_old # Parse the detector name if args.det is None: diff --git a/pypeit/scripts/show_arxiv.py b/pypeit/scripts/show_arxiv.py index 82fa433fc5..a09d946797 100644 --- a/pypeit/scripts/show_arxiv.py +++ b/pypeit/scripts/show_arxiv.py @@ -23,13 +23,16 @@ def get_parser(cls, width=None): help=argparse.SUPPRESS) return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): """ Shows the spectrum """ from matplotlib import pyplot as plt from pypeit.core.wavecal import waveio + # Initialize the log + cls.init_log(args) + # NOTE: Path is checked within load_template() wave, flux = waveio.load_template(args.file, args.det)[:2] diff --git a/pypeit/scripts/show_pixflat.py b/pypeit/scripts/show_pixflat.py index 37aedc2a4d..926782b2ef 100644 --- a/pypeit/scripts/show_pixflat.py +++ b/pypeit/scripts/show_pixflat.py @@ -21,8 +21,8 @@ def get_parser(cls, width=None): 'to show detectors 1 and 2. If not provided, all detectors will be shown.') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): import numpy as np from pypeit import log from pypeit import PypeItError @@ -30,6 +30,9 @@ def main(args): from pypeit.display import display from pypeit import dataPaths + # Initialize the log + cls.init_log(args) + # check if the file exists file_path = dataPaths.pixelflat.get_file_path(args.file, return_none=True) if file_path is None: diff --git a/pypeit/scripts/show_wvcalib.py b/pypeit/scripts/show_wvcalib.py index 712234c5e2..705a39fede 100644 --- a/pypeit/scripts/show_wvcalib.py +++ b/pypeit/scripts/show_wvcalib.py @@ -30,13 +30,16 @@ def get_parser(cls, width=None): help='Attempt to load old datamodel versions. A crash may ensue..') return parser - @staticmethod - def main(args, unit_test=False): + @classmethod + def main(cls, args, unit_test=False): """ Shows the spectrum """ from matplotlib import pyplot as plt + # Initialize the log + cls.init_log(args) + chk_version = not args.try_old # Load diff --git a/pypeit/scripts/skysub_regions.py b/pypeit/scripts/skysub_regions.py index b3b2ddc394..60cfb9c45f 100644 --- a/pypeit/scripts/skysub_regions.py +++ b/pypeit/scripts/skysub_regions.py @@ -28,15 +28,12 @@ def get_parser(cls, width=None): help='Use flexure corrected slit edges?') parser.add_argument('-s', '--standard', default=False, action='store_true', help='List standard stars as well?') - parser.add_argument('-v', '--verbosity', type=int, default=1, - help='Verbosity level between 0 [none] and 2 [all]. Default: 1. ' - 'Level 2 writes a log with filename skysub_regions_YYYYMMDD-HHMM.log') parser.add_argument('--try_old', default=False, action='store_true', help='Attempt to load old datamodel versions. A crash may ensue..') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): from IPython import embed from pypeit import spec2dobj import os @@ -49,6 +46,9 @@ def main(args): from pypeit.images.detector_container import DetectorContainer from pypeit.edgetrace import EdgeTraceSet + # Initialize the log + cls.init_log(args) + chk_version = not args.try_old # Parse the detector name diff --git a/pypeit/scripts/tellfit.py b/pypeit/scripts/tellfit.py index 7823952292..df6a1f00e5 100644 --- a/pypeit/scripts/tellfit.py +++ b/pypeit/scripts/tellfit.py @@ -15,7 +15,8 @@ class TellFit(scriptbase.ScriptBase): def get_parser(cls, width=None): par = TelluricPar() parser = super().get_parser(description='Telluric correct a spectrum', - width=width, formatter=scriptbase.SmartFormatter) + width=width, formatter=scriptbase.SmartFormatter, + default_log_file=True) parser.add_argument("spec1dfile", type=str, help="spec1d or coadd file that will be used for telluric correction.") parser.add_argument("--objmodel", type=str, default=None, @@ -52,17 +53,14 @@ def get_parser(cls, width=None): help="Show the telluric corrected spectrum") parser.add_argument("--par_outfile", default='telluric.par', help="Name of output file to save the parameters used by the fit") - parser.add_argument('-v', '--verbosity', type=int, default=1, - help='Verbosity level between 0 [none] and 2 [all]. Default: 1. ' - 'Level 2 writes a log with filename tellfit_YYYYMMDD-HHMM.log') parser.add_argument('--chk_version', default=False, action='store_true', help='Ensure the datamodels are from the current PypeIt version. ' 'By default (consistent with previous functionality) this is ' 'not enforced and crashes may ensue ...') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): """ Executes telluric correction. """ @@ -79,8 +77,8 @@ def main(args): from pypeit.core import telluric from pypeit import inputfiles - # Set the verbosity, and create a logfile if verbosity == 2 -# log.set_logfile_and_verbosity('tellfit', args.verbosity) + # Initialize the log + cls.init_log(args) # Determine the spectrograph header = fits.getheader(args.spec1dfile) diff --git a/pypeit/scripts/trace_edges.py b/pypeit/scripts/trace_edges.py index d5238276d0..a0394287d0 100644 --- a/pypeit/scripts/trace_edges.py +++ b/pypeit/scripts/trace_edges.py @@ -14,7 +14,8 @@ class TraceEdges(scriptbase.ScriptBase): def get_parser(cls, width=None): from pypeit.spectrographs import available_spectrographs - parser = super().get_parser(description='Trace slit edges', width=width) + parser = super().get_parser(description='Trace slit edges', width=width, + default_log_file=True) # Require either a pypeit file or a fits file inp = parser.add_mutually_exclusive_group(required=True) @@ -50,17 +51,10 @@ def get_parser(cls, width=None): 'plots related to the PCA decomposition and the slit and order ' 'matching. (3) Also show the individual polynomial fits to the ' 'detected edges.') - parser.add_argument('--show', default=False, action='store_true', - help='DEPRECATED! If set, the code will assume you mean to set ' - '--debug 1.') - parser.add_argument('-v', '--verbosity', type=int, default=1, - help='Verbosity level between 0 [none] and 2 [all]. Default: 1. ' - 'Level 2 writes a log with filename trace_edges_YYYYMMDD-HHMM.log') - return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): import time from pathlib import Path @@ -74,8 +68,8 @@ def main(args): from IPython import embed - # Set the verbosity, and create a logfile if verbosity == 2 -# log.set_logfile_and_verbosity('trace_edges', args.verbosity) + # Initialize the log + cls.init_log(args) if args.show: log.warning('"show" option is deprecated. Setting debug = 1.') diff --git a/pypeit/scripts/version.py b/pypeit/scripts/version.py index e8d325a0f1..8b298fe42e 100644 --- a/pypeit/scripts/version.py +++ b/pypeit/scripts/version.py @@ -5,8 +5,8 @@ class Version(scriptbase.ScriptBase): - @staticmethod - def main(args): + @classmethod + def main(cls, args): import pypeit print('The version of PypeIt is: {:s}'.format(pypeit.__version__)) diff --git a/pypeit/scripts/view_fits.py b/pypeit/scripts/view_fits.py index 681298393b..4e858f8583 100644 --- a/pypeit/scripts/view_fits.py +++ b/pypeit/scripts/view_fits.py @@ -45,8 +45,8 @@ def get_parser(cls, width=None): help='Upon completion embed in ipython shell') return parser - @staticmethod - def main(args): + @classmethod + def main(cls, args): from pypeit import log from pypeit import PypeItError @@ -61,8 +61,8 @@ def main(args): print(hdu.info()) return - # TODO: Update verbosity - log.init(level=log.level) + # Initialize the log + cls.init_log(args) if args.proc and args.exten is not None: raise PypeItError('You cannot specify --proc and --exten, since --exten shows the raw image') From 315ed06d5fc690fc857cff2620adbdfa2c5b37f7 Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Fri, 10 Oct 2025 12:55:54 -0700 Subject: [PATCH 15/33] testing --- pypeit/core/atmextinction.py | 14 +++++++------- pypeit/core/flux_calib.py | 2 +- pypeit/core/spectrum.py | 17 +++++++++-------- pypeit/core/standard.py | 22 +++++++++++----------- pypeit/tests/test_atmext.py | 6 ------ 5 files changed, 28 insertions(+), 33 deletions(-) diff --git a/pypeit/core/atmextinction.py b/pypeit/core/atmextinction.py index 84f32dd0c1..8c2830e5cc 100644 --- a/pypeit/core/atmextinction.py +++ b/pypeit/core/atmextinction.py @@ -46,13 +46,13 @@ class AtmosphericExtinction: def __init__(self, wave, mag_ext, assume_sorted=True, file=None): if len(wave) != len(mag_ext): - log.error('Wavelength and extinction vectors must have the same length.') + raise PypeItError('Wavelength and extinction vectors must have the same length.') self.wave = np.asarray(wave, dtype=float) self.mag_ext = np.asarray(mag_ext, dtype=float) if self.wave.ndim != 1 or self.mag_ext.ndim != 1: - log.error('Atmospheric extinction must be 1D.') + raise PypeItError('Atmospheric extinction must be 1D.') if not assume_sorted: srt = np.argsort(self.wave) @@ -95,7 +95,7 @@ def closest_extinction_file(longitude, latitude, toler=5.): return extinct_files[int(idx)]['File'] # Crash with a helpful error message - log.error( + raise PypeItError( f'No atmospheric extinction file was found within {toler} degrees of observation at ' f'lon = {longitude:.1f} lat = {latitude:.1f}.' ) @@ -118,7 +118,7 @@ def from_coordinates(cls, longitude, latitude, toler=5.): try: extinct_file = cls.closest_extinction_file(longitude, latitude, toler=toler) except PypeItError as e: - log.error( + raise PypeItError( f'{e} You may select a specific extinction file (e.g., KPNO) for use by adding ' 'an ``extinct_file`` to your pypeit_sensfunc or pypeit_fluxcalib input file. ' 'See instructions at' @@ -166,7 +166,7 @@ def correction_factor(self, wave, airmass=1.): """ # Warn if extrapolation is necessary if np.amin(wave) < np.amin(self.wave) or np.amax(wave) > np.amax(self.wave): - log.warn( + log.warning( 'Spectral regions outside of the bounds of the atmospheric extinction curve are ' 'set to the nearest value.' ) @@ -205,14 +205,14 @@ def correct(flux, factor, ivar=None): _flux = np.asarray(flux) _factor = np.asarray(factor) if _flux.size != _factor.size: - log.error('Flux and correction factor arrays must have the same size.') + raise PypeItError('Flux and correction factor arrays must have the same size.') if ivar is None: return _flux * _factor _ivar = np.asarray(ivar) if _ivar.size != _flux.size: - log.error('Inverse variance and flux arrays must have the same size.') + raise PypeItError('Inverse variance and flux arrays must have the same size.') return _flux * _factor, _ivar * utils.inverse(_factor**2) diff --git a/pypeit/core/flux_calib.py b/pypeit/core/flux_calib.py index 0e7f670ecd..4916a3300d 100644 --- a/pypeit/core/flux_calib.py +++ b/pypeit/core/flux_calib.py @@ -286,7 +286,7 @@ def get_sensfunc_factor(wave, wave_zp, zeropoint, exptime, tellmodel=None, delta else: # Apply Extinction if optical bands log.info("Applying extinction correction") -# log.warn("Extinction correction applied only if the spectra covers <10000Ang.") +# log.warning("Extinction correction applied only if the spectra covers <10000Ang.") senstot = sensfunc_obs * atmext.correction_factor(wave, airmass=airmass) # senstot is the conversion from N_lam to F_lam, and the division by exptime and delta_wave are to convert diff --git a/pypeit/core/spectrum.py b/pypeit/core/spectrum.py index 16b2d0f018..b0809832e2 100644 --- a/pypeit/core/spectrum.py +++ b/pypeit/core/spectrum.py @@ -11,6 +11,7 @@ import numpy as np from pypeit import log +from pypeit import PypeItError from pypeit import sampling from pypeit import utils @@ -50,23 +51,23 @@ def __init__(self, wave, flux, ivar=None, gpm=None, meta=None): self.wave = np.asarray(wave, dtype=float).copy() if self.wave.ndim != 1: - log.error('wavelength array must always be 1D in the spectrum object') + raise PypeItError('wavelength array must always be 1D in the spectrum object') if self.wave.size != self.flux.shape[0]: - log.error('wavelength vector must match length of flux array') + raise PypeItError('wavelength vector must match length of flux array') if ivar is None: self.ivar = None else: self.ivar = np.asarray(ivar, dtype=float).copy() if self.ivar.shape != self.flux.shape: - log.error('Wavelength and inverse variance arrays do not have the same shape.') + raise PypeItError('Wavelength and inverse variance arrays do not have the same shape.') if gpm is None: self.gpm = np.ones(self.flux.shape, dtype=bool) else: self.gpm = np.asarray(gpm, dtype=bool).copy() if self.gpm.shape != self.flux.shape: - log.error('Wavelength and good-pixel arrays do not have the same size.') + raise PypeItError('Wavelength and good-pixel arrays do not have the same size.') self.meta = meta if meta is None else deepcopy(meta) @@ -119,7 +120,7 @@ def multiply(self, a): # Multiply by a scalar if isinstance(a, (int, np.integer, float, np.floating)): if float(a) == 0.: - log.warn('Multiplicative factor is 0!') + log.warning('Multiplicative factor is 0!') self.flux *= a if self.ivar is not None: if np.absolute(a) > 0: @@ -138,7 +139,7 @@ def multiply(self, a): # Check the wavelength vectors # TODO: Loosen this; i.e., use isclose instead of array_equal? if not np.array_equal(a.wave, self.wave): - log.error('To multiply two spectra, their wavelength vectors must be identical.') + raise PypeItError('To multiply two spectra, their wavelength vectors must be identical.') a_flux = a.flux a_gpm = a.gpm if a.ivar is not None: @@ -150,7 +151,7 @@ def multiply(self, a): # Check the input if a_flux.ndim > self.ndim: - log.error( + raise PypeItError( 'Multiplication does not allow the dimensionality of the spectrum to change. ' f'The dimensionality of this spectrum is {self.ndim} and the multiplier is ' f'{a_flux.ndim}.' @@ -159,7 +160,7 @@ def multiply(self, a): # below should work, as long as the last a.ndim dimensions of a and this # spectrum match. if a_flux.shape != self.shape[:a_flux.ndim]: - log.error( + raise PypeItError( 'Numpy will not be able to successfully broadcast arithmetic operations between ' f'this spectrum, shape={self.shape}, and the multiplier, shape={a_flux.shape}.' ) diff --git a/pypeit/core/standard.py b/pypeit/core/standard.py index f88295c01e..4927e85f2f 100644 --- a/pypeit/core/standard.py +++ b/pypeit/core/standard.py @@ -70,12 +70,12 @@ def archive_entry(archive, name): # Get the file star_file = stds_path.get_file_path(f'{archive}_info.txt') if not star_file.is_file(): - log.error(f'File does not exist!: {star_file}') + raise PypeItError(f'File does not exist!: {star_file}') star_tbl = table.Table.read(star_file, comment='#', format='ascii') idx = np.where(star_tbl['Name'] == name)[0] if len(idx) != 1: - log.error(f'{name} is not a named source in {star_file}.') + raise PypeItError(f'{name} is not a named source in {star_file}.') return star_tbl[idx[0]] @@ -112,14 +112,14 @@ def nearest_archive_entry(archive, ra, dec, unit=None): _unit = unit obj_coord = coordinates.SkyCoord([ra], [dec], unit=_unit) if obj_coord.size > 1: - log.error('Matching to archive can only be done one object at a time.') + raise PypeItError('Matching to archive can only be done one object at a time.') # Set the path (creates a new PypeItDataPath object) stds_path = dataPaths.standards / archive # Get the file star_file = stds_path.get_file_path(f"{archive}_info.txt") if not star_file.is_file(): - log.error(f"File does not exist!: {star_file}") + raise PypeItError(f"File does not exist!: {star_file}") star_tbl = table.Table.read(star_file, comment='#', format='ascii') star_coords = coordinates.SkyCoord(star_tbl['RA_2000'], star_tbl['DEC_2000'], @@ -218,7 +218,7 @@ def from_coordinates(cls, ra, dec, tol=20., unit=None): """ sep, row = nearest_archive_entry(cls.archive, ra, dec, unit=unit) if sep > tol * units.arcmin: - log.error(f'Closest object ({row["Name"]}) is separated by {sep.to("arcmin").value} ' + raise PypeItError(f'Closest object ({row["Name"]}) is separated by {sep.to("arcmin").value} ' f'arcmin, which is beyond the required tolerance ({tol} arcmin).') return cls(row['File'], meta=cls._init_meta(row=row)) @@ -512,7 +512,7 @@ def from_coordinates(cls, ra, dec, tol=20., unit=None, wave=None): """ sep, row = nearest_archive_entry(cls.model_type, ra, dec, unit=unit) if sep > tol * units.arcmin: - log.error(f'Closest object ({row["Name"]}) is separated by {sep.to("arcmin").value} ' + raise PypeItError(f'Closest object ({row["Name"]}) is separated by {sep.to("arcmin").value} ' f'arcmin, which is beyond the required tolerance ({tol} arcmin).') return cls(row['a_x10m23'], row['T_K'], wave=wave, meta=cls._init_meta(row=row)) @@ -578,7 +578,7 @@ def __init__(self, V_mag, spectral_type): # interpolate across types. indx = np.where(spectral_type == sk82_tab['Sp'])[0] if len(indx) != 1: - log.error( + raise PypeItError( f'Provided spectral type {spectral_type} not available in Schmidt-Kaler (1982) ' 'table. See the KuruczModelStandard API.' ) @@ -729,10 +729,10 @@ def get_archive_sets(archives=['xshooter', 'calspec', 'esofil', 'noao', 'ing']): good = np.ones(len(_archives), dtype=bool) for i, s in enumerate(_archives): if s not in archive_classes.keys(): - log.warn(f'{s} is not a recognized archive of standard spectra. Ignoring.') + log.warning(f'{s} is not a recognized archive of standard spectra. Ignoring.') good[i] = False if not any(good): - log.error('None of the provided standard spectra archives are valid. Try using ' + raise PypeItError('None of the provided standard spectra archives are valid. Try using ' 'the default list.') return _archives[good] @@ -825,7 +825,7 @@ def get_archive_standard(ra, dec, tol=20., unit=None, archives='default', check= res = np.asarray(list([nearest_archive_entry(key, ra, dec, unit=unit) for key in _archives])) indx = np.argmin(res[:,0]) sep, row = res[indx] - log.error(f'Unable to find a standard star within {tol:.1f} arcmin of RA={ra}, DEC={dec} in ' + raise PypeItError(f'Unable to find a standard star within {tol:.1f} arcmin of RA={ra}, DEC={dec} in ' f'the following archives: {_archives}. The nearest object is {row["Name"]} in ' f'{_archives[indx]} at RA={row["RA_2000"]}, DEC={row["DEC_2000"]}, separated by ' f'{sep.to("arcmin").value:.1f} arcmin.') @@ -914,7 +914,7 @@ def get_standard_spectrum(spectral_type=None, V_mag=None, ra=None, dec=None, tol if spectral_type is not None and V_mag is not None: return get_model_standard(spectral_type, V_mag) if ra is None or dec is None: - log.error('Insufficient data provided to determine the appropriate standard spectrum. ' + raise PypeItError('Insufficient data provided to determine the appropriate standard spectrum. ' 'Provide either the coordinates of the standard or a stellar type and ' 'magnitude.') return get_archive_standard(ra, dec, tol=tol, unit=unit, archives=archives) diff --git a/pypeit/tests/test_atmext.py b/pypeit/tests/test_atmext.py index 34f9d7cbce..da2249dd0b 100644 --- a/pypeit/tests/test_atmext.py +++ b/pypeit/tests/test_atmext.py @@ -6,13 +6,7 @@ import numpy as np import pytest -from astropy import units - -from pypeit.core import flux_calib from pypeit import telescopes -from pypeit.par.pypeitpar import Coadd1DPar - - from pypeit import PypeItError from pypeit.core.atmextinction import AtmosphericExtinction From 808a323c9c7b4f61eb4ac2102ce5ea69a9ff11ed Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Fri, 10 Oct 2025 13:56:22 -0700 Subject: [PATCH 16/33] typing and newline --- pypeit/cache.py | 10 +++++----- pypeit/logger.py | 8 ++++++-- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/pypeit/cache.py b/pypeit/cache.py index 4cdff5cedd..158f883ad6 100644 --- a/pypeit/cache.py +++ b/pypeit/cache.py @@ -260,7 +260,7 @@ def fetch_remote_file( in [requests.codes.forbidden, requests.codes.not_found] ): err_msg = ( - f"The file {filename}" + f"The file {filename}\n" f"is not hosted in the cloud. Please download this file from" f"the PypeIt Google Drive and install it using the script" f"pypeit_install_telluric --local. See instructions at" @@ -269,7 +269,7 @@ def fetch_remote_file( elif filetype == "arc_lines/lists": err_msg = ( - f"Cannot find local arc line list {filename}" + f"Cannot find local arc line list {filename}\n" f"Use the script `pypeit_install_linelist` to install" f"your custom line list into the cache. See instructions at" "https://pypeit.readthedocs.io/en/latest/wave_calib.html#line-lists" @@ -277,7 +277,7 @@ def fetch_remote_file( elif filetype == "extinction": err_msg = ( - f"Cannot find local extinction file {filename}" + f"Cannot find local extinction file {filename}\n" f"Use the script `pypeit_install_extinctfile` to install" f"your custom extinction file into the cache. See instructions at" "https://pypeit.readthedocs.io/en/latest/fluxing.html#extinction-correction" @@ -288,8 +288,8 @@ def fetch_remote_file( else: err_msg = ( - f"Error downloading {filename}: {error}" - f"URL attempted: {remote_url}" + f"Error downloading {filename}: {error}\n" + f"URL attempted: {remote_url}\n" f"If the error relates to the server not being found," f"check your internet connection. If the remote server" f"name has changed, please contact the PypeIt development" diff --git a/pypeit/logger.py b/pypeit/logger.py index 786d44fe94..da9b7081b8 100644 --- a/pypeit/logger.py +++ b/pypeit/logger.py @@ -68,7 +68,7 @@ def color_text( return f'{msg}38;2;{color[0]};{color[1]};{color[2]}m{_text}\033[0m' -def clear_text_color(text:str): +def clear_text_color(text:str) -> str: """ Remove escape characters that colorize the text in a string. @@ -323,7 +323,7 @@ def get_logger( stream: Optional[io.TextIOBase] = None, log_file: Optional[str | Path] = None, log_file_level: Optional[int] = None, -): +) -> PypeItLogger: """ Instantiate a new logger. @@ -340,6 +340,10 @@ def get_logger( log_file_level The logging level specific to the log file. If None, adopt the console logging level. + + Returns + ------- + Logging object for PypeIt. """ orig_logger = logging.getLoggerClass() From 7fd9b15be78e4b7c279483977e2886c6504cf0ff Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Fri, 10 Oct 2025 15:24:58 -0700 Subject: [PATCH 17/33] test fixes --- pypeit/core/wavecal/autoid.py | 2 +- pypeit/pypeit.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pypeit/core/wavecal/autoid.py b/pypeit/core/wavecal/autoid.py index 8d843bd5e2..2bd59b34b3 100644 --- a/pypeit/core/wavecal/autoid.py +++ b/pypeit/core/wavecal/autoid.py @@ -1585,7 +1585,7 @@ def report_final(nslits, all_patt_dict, detections, f'{report_ttl}' f' Pixels {signtxt} with wavelength\n' f' Number of lines detected = {detections[st].size}\n' - f' Number of lines that were fit = {len(wv_calib[st]['pixel_fit'])}\n' + f' Number of lines that were fit = {len(wv_calib[st]["pixel_fit"])}\n' f' Central wavelength = {cen_wave}A\n' f' Central dispersion = {cen_disp}A/pix\n' f' Central wave/disp = {cen_wave / cen_disp}\n' diff --git a/pypeit/pypeit.py b/pypeit/pypeit.py index 059273787c..c6b3352674 100644 --- a/pypeit/pypeit.py +++ b/pypeit/pypeit.py @@ -528,17 +528,17 @@ def reduce_exposure(self, frames, bg_frames=None, std_outfile=None): objFind_list = [] # Print status message - log_string = f'Reducing target {self.fitstbl['target'][frames[0]]}\n' + log_string = f'Reducing target {self.fitstbl["target"][frames[0]]}\n' # TODO: Print these when the frames are actually combined, # backgrounds are used, etc? log_string += 'Combining frames:\n' for iframe in frames: - log_string += f'{self.fitstbl['filename'][iframe]}\n' + log_string += f'{self.fitstbl["filename"][iframe]}\n' log.info(log_string) if has_bg: bg_log_string = '' for iframe in bg_frames: - bg_log_string += f'{self.fitstbl['filename'][iframe]}\n' + bg_log_string += f'{self.fitstbl["filename"][iframe]}\n' bg_log_string = '\nUsing background from frames:\n' + bg_log_string log.info(bg_log_string) From 690a5b6599d728ecc1cebf320d02520ab27c0bdf Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Thu, 16 Oct 2025 08:20:58 -0700 Subject: [PATCH 18/33] win test fix? --- pypeit/__init__.py | 8 +++----- pypeit/logger.py | 12 ++++++++++++ pypeit/tests/test_log.py | 10 +++++++++- 3 files changed, 24 insertions(+), 6 deletions(-) diff --git a/pypeit/__init__.py b/pypeit/__init__.py index 85467b2501..7c251a1bd6 100644 --- a/pypeit/__init__.py +++ b/pypeit/__init__.py @@ -5,11 +5,6 @@ that can be imported by submodules. """ -# Imports for signal and log handling -import os -import sys -import signal - from .version import version # Set version @@ -30,6 +25,9 @@ from pypeit import pypeitdata dataPaths = pypeitdata.PypeItDataPaths() +## Imports for signal and log handling +#import sys +#import signal ## Send all signals to messages to be dealt with (i.e. someone hits ctrl+c) #def signal_handler(signalnum, handler): # """ diff --git a/pypeit/logger.py b/pypeit/logger.py index da9b7081b8..984383c203 100644 --- a/pypeit/logger.py +++ b/pypeit/logger.py @@ -315,6 +315,18 @@ def makeRecord( self, name, level, pathname, lineno, msg, args, exc_info, func=func, extra=extra, sinfo=sinfo ) + + def close_file(self): + """ + Explicitly close the log file. + """ + if self.fh is None: + return + self.fh.close() + self.removeHandler(self.fh) + if self.fh in self.warnings_logger.handlers: + self.warnings_logger.removeHandler(self.fh) + # NOTE: If we allow warning and exception capture to be optional, remember to # add them as parameters here as well. diff --git a/pypeit/tests/test_log.py b/pypeit/tests/test_log.py index 048e70080d..a8eb3d6734 100644 --- a/pypeit/tests/test_log.py +++ b/pypeit/tests/test_log.py @@ -93,7 +93,9 @@ def test_log_file(): assert len(msg) == len(log_lines), 'Number of lines in file should match stream' assert all(['test_log.py:test_log_file' in l for l in log_lines]), \ 'Calling function should be included in all file logs' - + + # Close the log so that we can delete the file + log.close_file() lf.unlink() @@ -119,6 +121,8 @@ def test_log_file_info(): assert not any(['test_log.py:test_log_file_levels' in m for m in msg]), \ 'Calling function should not be in stream messages when using INFO level' + # Close the log so that we can delete the file + log.close_file() lf.unlink() @@ -141,6 +145,8 @@ def test_log_file_level_diff(): assert len(msg) == 2 and len(log_lines) == 3, \ 'Log file should include all entries, but stream should skip DEBUG message' + # Close the log so that we can delete the file + log.close_file() lf.unlink() @@ -161,4 +167,6 @@ def test_log_overwrite(): log_lines = f.readlines() assert len(log_lines) == 1, 'reinitializing the log should overwrite the log file' + # Close the log so that we can delete the file + log.close_file() lf.unlink() From 3d63da2302c05846093b5c1f73b828d89363ec1a Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Thu, 16 Oct 2025 08:30:51 -0700 Subject: [PATCH 19/33] doc update --- doc/help/pypeit_arxiv_solution.rst | 25 ++++-- doc/help/pypeit_cache_github_data.rst | 16 +++- doc/help/pypeit_chk_alignments.rst | 27 ++++-- doc/help/pypeit_chk_edges.rst | 16 +++- doc/help/pypeit_chk_flats.rst | 28 +++++-- doc/help/pypeit_chk_flexure.rst | 27 ++++-- doc/help/pypeit_chk_for_calibs.rst | 17 +++- doc/help/pypeit_chk_noise_1dspec.rst | 15 +++- doc/help/pypeit_chk_noise_2dspec.rst | 15 +++- doc/help/pypeit_chk_plugins.rst | 17 +++- doc/help/pypeit_chk_scattlight.rst | 46 +++++++---- doc/help/pypeit_chk_tilts.rst | 36 +++++--- doc/help/pypeit_chk_wavecalib.rst | 26 ++++-- doc/help/pypeit_clean_cache.rst | 16 +++- doc/help/pypeit_coadd_1dspec.rst | 20 +++-- doc/help/pypeit_coadd_2dspec.rst | 21 +++-- doc/help/pypeit_coadd_datacube.rst | 20 +++-- doc/help/pypeit_collate_1d.rst | 21 +++-- doc/help/pypeit_compare_sky.rst | 15 +++- doc/help/pypeit_compile_wvarxiv.rst | 30 +++++-- doc/help/pypeit_edge_inspector.rst | 24 ++++-- doc/help/pypeit_extract_datacube.rst | 25 ++++-- doc/help/pypeit_flux_calib.rst | 18 ++-- doc/help/pypeit_flux_setup.rst | 15 +++- doc/help/pypeit_identify.rst | 27 ++++-- doc/help/pypeit_install_extinctfile.rst | 24 ++++-- doc/help/pypeit_install_linelist.rst | 22 ++++- doc/help/pypeit_install_ql_calibs.rst | 35 +++++--- doc/help/pypeit_install_telluric.rst | 30 +++++-- doc/help/pypeit_install_wvarxiv.rst | 22 ++++- doc/help/pypeit_lowrdx_skyspec.rst | 22 ++++- doc/help/pypeit_multislit_flexure.rst | 45 ++++++---- doc/help/pypeit_obslog.rst | 19 ++++- doc/help/pypeit_parse_slits.rst | 24 ++++-- doc/help/pypeit_print_bpm.rst | 40 ++++++--- doc/help/pypeit_qa_html.rst | 24 ++++-- doc/help/pypeit_ql.rst | 15 +++- doc/help/pypeit_run_to_calibstep.rst | 19 ++++- doc/help/pypeit_sensfunc.rst | 22 +++-- doc/help/pypeit_setup.rst | 24 ++++-- doc/help/pypeit_setup_coadd2d.rst | 15 +++- doc/help/pypeit_show_1dspec.rst | 40 ++++++--- doc/help/pypeit_show_2dspec.rst | 26 ++++-- doc/help/pypeit_show_arxiv.rst | 22 ++++- doc/help/pypeit_show_pixflat.rst | 29 +++++-- doc/help/pypeit_show_wvcalib.rst | 16 +++- doc/help/pypeit_skysub_regions.rst | 21 +++-- doc/help/pypeit_tellfit.rst | 22 +++-- doc/help/pypeit_trace_edges.rst | 27 +++--- doc/help/pypeit_version.rst | 17 +++- doc/help/pypeit_view_fits.rst | 105 +++++++++++++----------- doc/help/run_pypeit.rst | 16 +++- pypeit/scripts/scriptbase.py | 6 +- 53 files changed, 976 insertions(+), 336 deletions(-) diff --git a/doc/help/pypeit_arxiv_solution.rst b/doc/help/pypeit_arxiv_solution.rst index 48ff29d875..89144054aa 100644 --- a/doc/help/pypeit_arxiv_solution.rst +++ b/doc/help/pypeit_arxiv_solution.rst @@ -1,23 +1,32 @@ .. code-block:: console $ pypeit_arxiv_solution -h - usage: pypeit_arxiv_solution [-h] [-s SLIT] [-v VERBOSITY] [--try_old] + usage: pypeit_arxiv_solution [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [-s SLIT] [--try_old] file binning - Read in a MasterWaveCalib solution and convert it into the format required for - the PypeIt full template archive + Read in a WaveCalib solution and convert it into the format required for the + PypeIt full template archive positional arguments: - file MasterWaveCalib file + file WaveCalib file binning Spectral binning options: -h, --help show this help message and exit - -s, --slit SLIT Slit number to use (default: 0) -v, --verbosity VERBOSITY - Verbosity level between 0 [none] and 2 [all]. Default: - 1. Level 2 writes a log with filename - make_arxiv_solution_YYYYMMDD-HHMM.log (default: 1) + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: default) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) + -s, --slit SLIT Slit number to use (default: 0) --try_old Attempt to load old datamodel versions. A crash may ensue.. (default: False) \ No newline at end of file diff --git a/doc/help/pypeit_cache_github_data.rst b/doc/help/pypeit_cache_github_data.rst index f08591035c..728db8086e 100644 --- a/doc/help/pypeit_cache_github_data.rst +++ b/doc/help/pypeit_cache_github_data.rst @@ -1,7 +1,9 @@ .. code-block:: console $ pypeit_cache_github_data -h - usage: pypeit_cache_github_data [-h] [--exclude EXCLUDE [EXCLUDE ...] | + usage: pypeit_cache_github_data [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] + [--exclude EXCLUDE [EXCLUDE ...] | --include INCLUDE [INCLUDE ...]] [--spec_dependent_only] [--force_update] spectrograph [spectrograph ...] @@ -35,6 +37,18 @@ options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) --exclude EXCLUDE [EXCLUDE ...] A subset of the directories to *exclude* from the list of files to download. Options are: tests, reid_arxiv, diff --git a/doc/help/pypeit_chk_alignments.rst b/doc/help/pypeit_chk_alignments.rst index 2f58ffa5b7..f59c744633 100644 --- a/doc/help/pypeit_chk_alignments.rst +++ b/doc/help/pypeit_chk_alignments.rst @@ -1,16 +1,31 @@ .. code-block:: console $ pypeit_chk_alignments -h - usage: pypeit_chk_alignments [-h] [--chname CHNAME] [--try_old] file + usage: pypeit_chk_alignments [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--chname CHNAME] + [--try_old] + file Display Alignment image and the trace data positional arguments: - file PypeIt Alignment file [e.g. Alignment_A_1_DET01.fits] + file PypeIt Alignment file [e.g. Alignment_A_1_DET01.fits] options: - -h, --help show this help message and exit - --chname CHNAME Channel name for image in Ginga (default: Alignments) - --try_old Attempt to load old datamodel versions. A crash may ensue.. - (default: False) + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) + --chname CHNAME Channel name for image in Ginga (default: Alignments) + --try_old Attempt to load old datamodel versions. A crash may + ensue.. (default: False) \ No newline at end of file diff --git a/doc/help/pypeit_chk_edges.rst b/doc/help/pypeit_chk_edges.rst index 2ae4f55e32..b91b940bc4 100644 --- a/doc/help/pypeit_chk_edges.rst +++ b/doc/help/pypeit_chk_edges.rst @@ -1,7 +1,9 @@ .. code-block:: console $ pypeit_chk_edges -h - usage: pypeit_chk_edges [-h] [--slits_file SLITS_FILE] [--mpl] [--try_old] + usage: pypeit_chk_edges [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--slits_file SLITS_FILE] + [--mpl] [--try_old] trace_file Display trace image and edge traces @@ -11,6 +13,18 @@ options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) --slits_file SLITS_FILE PypeIt Slits file [e.g. Slits_A_1_01.fits]. If this file does not exist or is not provided, PypeIt will attempt diff --git a/doc/help/pypeit_chk_flats.rst b/doc/help/pypeit_chk_flats.rst index 91f2178cb3..8981ef3b9f 100644 --- a/doc/help/pypeit_chk_flats.rst +++ b/doc/help/pypeit_chk_flats.rst @@ -1,17 +1,31 @@ .. code-block:: console $ pypeit_chk_flats -h - usage: pypeit_chk_flats [-h] [--type TYPE] [--try_old] file + usage: pypeit_chk_flats [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--type TYPE] [--try_old] + file Display flat images in Ginga viewer positional arguments: - file PypeIt Flat file [e.g. Flat_A_1_DET01.fits] + file PypeIt Flat file [e.g. Flat_A_1_DET01.fits] options: - -h, --help show this help message and exit - --type TYPE Which flats to display. Must be one of: pixel, illum, all - (default: all) - --try_old Attempt to load old datamodel versions. A crash may ensue.. - (default: False) + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) + --type TYPE Which flats to display. Must be one of: pixel, illum, + all (default: all) + --try_old Attempt to load old datamodel versions. A crash may + ensue.. (default: False) \ No newline at end of file diff --git a/doc/help/pypeit_chk_flexure.rst b/doc/help/pypeit_chk_flexure.rst index d7110d32d4..b68670d764 100644 --- a/doc/help/pypeit_chk_flexure.rst +++ b/doc/help/pypeit_chk_flexure.rst @@ -1,18 +1,31 @@ .. code-block:: console $ pypeit_chk_flexure -h - usage: pypeit_chk_flexure [-h] (--spec | --spat) [--try_old] + usage: pypeit_chk_flexure [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] (--spec | --spat) [--try_old] input_file [input_file ...] Print QA on flexure to the screen positional arguments: - input_file One or more PypeIt spec2d or spec1d file + input_file One or more PypeIt spec2d or spec1d file options: - -h, --help show this help message and exit - --spec Check the spectral flexure (default: False) - --spat Check the spatial flexure (default: False) - --try_old Attempt to load old datamodel versions. A crash may ensue.. - (default: False) + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) + --spec Check the spectral flexure (default: False) + --spat Check the spatial flexure (default: False) + --try_old Attempt to load old datamodel versions. A crash may + ensue.. (default: False) \ No newline at end of file diff --git a/doc/help/pypeit_chk_for_calibs.rst b/doc/help/pypeit_chk_for_calibs.rst index 2f35551cbd..8785bb976c 100644 --- a/doc/help/pypeit_chk_for_calibs.rst +++ b/doc/help/pypeit_chk_for_calibs.rst @@ -1,8 +1,9 @@ .. code-block:: console $ pypeit_chk_for_calibs -h - usage: pypeit_chk_for_calibs [-h] [-s SPECTROGRAPH] [-e EXTENSION] - [--save_setups] + usage: pypeit_chk_for_calibs [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [-s SPECTROGRAPH] + [-e EXTENSION] [--save_setups] root Script to check for calibrations @@ -12,6 +13,18 @@ options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) -s, --spectrograph SPECTROGRAPH A valid spectrograph identifier: aat_uhrf, apf_levy, bok_bc, gemini_flamingos1, gemini_flamingos2, diff --git a/doc/help/pypeit_chk_noise_1dspec.rst b/doc/help/pypeit_chk_noise_1dspec.rst index 51b54fbfaf..e8c2877569 100644 --- a/doc/help/pypeit_chk_noise_1dspec.rst +++ b/doc/help/pypeit_chk_noise_1dspec.rst @@ -1,7 +1,8 @@ .. code-block:: console $ pypeit_chk_noise_1dspec -h - usage: pypeit_chk_noise_1dspec [-h] [--fileformat FILEFORMAT] + usage: pypeit_chk_noise_1dspec [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--fileformat FILEFORMAT] [--extraction EXTRACTION] [--ploterr] [--step] [--z [Z ...]] [--maskdef_objname MASKDEF_OBJNAME] [--pypeit_name PYPEIT_NAME] [--wavemin WAVEMIN] @@ -16,6 +17,18 @@ options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) --fileformat FILEFORMAT Is this coadd1d or spec1d? (default: spec1d) --extraction EXTRACTION diff --git a/doc/help/pypeit_chk_noise_2dspec.rst b/doc/help/pypeit_chk_noise_2dspec.rst index d0a6662125..971c7cbb84 100644 --- a/doc/help/pypeit_chk_noise_2dspec.rst +++ b/doc/help/pypeit_chk_noise_2dspec.rst @@ -1,7 +1,8 @@ .. code-block:: console $ pypeit_chk_noise_2dspec -h - usage: pypeit_chk_noise_2dspec [-h] [--det DET] [--z [Z ...]] + usage: pypeit_chk_noise_2dspec [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--det DET] [--z [Z ...]] [--maskdef_id MASKDEF_ID] [--pypeit_id PYPEIT_ID] [--pad PAD] [--aspect_ratio ASPECT_RATIO] [--wavemin WAVEMIN] [--wavemax WAVEMAX] @@ -15,6 +16,18 @@ options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) --det DET Detector name or number. If a number, the name is constructed assuming the reduction is for a single detector. If a string, it must match the name of the diff --git a/doc/help/pypeit_chk_plugins.rst b/doc/help/pypeit_chk_plugins.rst index c09f6e855c..aa758aa926 100644 --- a/doc/help/pypeit_chk_plugins.rst +++ b/doc/help/pypeit_chk_plugins.rst @@ -1,8 +1,21 @@ .. code-block:: console $ pypeit_chk_plugins -h - usage: pypeit_chk_plugins [-h] + usage: pypeit_chk_plugins [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] options: - -h, --help show this help message and exit + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) \ No newline at end of file diff --git a/doc/help/pypeit_chk_scattlight.rst b/doc/help/pypeit_chk_scattlight.rst index 08cd077c40..a279100dbf 100644 --- a/doc/help/pypeit_chk_scattlight.rst +++ b/doc/help/pypeit_chk_scattlight.rst @@ -1,27 +1,41 @@ .. code-block:: console $ pypeit_chk_scattlight -h - usage: pypeit_chk_scattlight [-h] [--spec2d SPEC2D] [--det DET] [--mask MASK] - [--try_old] + usage: pypeit_chk_scattlight [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--spec2d SPEC2D] + [--det DET] [--mask MASK] [--try_old] file slits Display the scattered light image in a Ginga viewer positional arguments: - file PypeIt Scattered Light file [e.g. - ScatteredLight_A_0_DET01.fits.gz] - slits Slits calibration file [e.g. Slits_A_0_DET01.fits.gz] + file PypeIt Scattered Light file [e.g. + ScatteredLight_A_0_DET01.fits.gz] + slits Slits calibration file [e.g. Slits_A_0_DET01.fits.gz] options: - -h, --help show this help message and exit - --spec2d SPEC2D PypeIt science spec2d file (default: None) - --det DET Detector name or number. If a number, the name is constructed - assuming the reduction is for a single detector. If a string, - it must match the name of the detector object (e.g., DET01 - for a detector, MSC01 for a mosaic). (default: 1) - --mask MASK If True, the detector pixels that are considered on the slit - will be masked to highlight the scattered light regions - (default: False) - --try_old Attempt to load old datamodel versions. A crash may ensue.. - (default: False) + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) + --spec2d SPEC2D PypeIt science spec2d file (default: None) + --det DET Detector name or number. If a number, the name is + constructed assuming the reduction is for a single + detector. If a string, it must match the name of the + detector object (e.g., DET01 for a detector, MSC01 for a + mosaic). (default: 1) + --mask MASK If True, the detector pixels that are considered on the + slit will be masked to highlight the scattered light + regions (default: False) + --try_old Attempt to load old datamodel versions. A crash may + ensue.. (default: False) \ No newline at end of file diff --git a/doc/help/pypeit_chk_tilts.rst b/doc/help/pypeit_chk_tilts.rst index a7440699a2..a9af5c3d31 100644 --- a/doc/help/pypeit_chk_tilts.rst +++ b/doc/help/pypeit_chk_tilts.rst @@ -1,21 +1,37 @@ .. code-block:: console $ pypeit_chk_tilts -h - usage: pypeit_chk_tilts [-h] [--mpl] [--show_traces] [--try_old] file + usage: pypeit_chk_tilts [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--mpl] [--show_traces] + [--try_old] + file Display Tiltimg image and 2D fitted tilts in Ginga viewer or Matplotlib window. Tiltimg file must be in the same directory as Tilts. positional arguments: - file PypeIt Tilts file [e.g. Tilt_A_1_01.fits] + file PypeIt Tilts file [e.g. Tilt_A_1_01.fits] options: - -h, --help show this help message and exit - --mpl Use a matplotlib window instead of ginga to show the tilts. - Faster plotting. (default: False) - --show_traces Show the traced tilts. This slows down the plotting (mostly in - Ginga). If not set, only the fitted, masked and rejected in the - fit tilts are shown. (default: False) - --try_old Attempt to load old datamodel versions. A crash may ensue.. - (default: False) + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) + --mpl Use a matplotlib window instead of ginga to show the + tilts. Faster plotting. (default: False) + --show_traces Show the traced tilts. This slows down the plotting + (mostly in Ginga). If not set, only the fitted, masked + and rejected in the fit tilts are shown. (default: + False) + --try_old Attempt to load old datamodel versions. A crash may + ensue.. (default: False) \ No newline at end of file diff --git a/doc/help/pypeit_chk_wavecalib.rst b/doc/help/pypeit_chk_wavecalib.rst index fe49166847..abfd0d1957 100644 --- a/doc/help/pypeit_chk_wavecalib.rst +++ b/doc/help/pypeit_chk_wavecalib.rst @@ -1,16 +1,30 @@ .. code-block:: console $ pypeit_chk_wavecalib -h - usage: pypeit_chk_wavecalib [-h] [--try_old] input_file [input_file ...] + usage: pypeit_chk_wavecalib [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--try_old] + input_file [input_file ...] Print QA on Wavelength Calib to the screen positional arguments: - input_file One or more PypeIt WaveCalib file [e.g. WaveCalib_A_1_DET01.fits] - or spec2d file + input_file One or more PypeIt WaveCalib file [e.g. + WaveCalib_A_1_DET01.fits] or spec2d file options: - -h, --help show this help message and exit - --try_old Attempt to load old datamodel versions. A crash may ensue.. - (default: False) + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) + --try_old Attempt to load old datamodel versions. A crash may + ensue.. (default: False) \ No newline at end of file diff --git a/doc/help/pypeit_clean_cache.rst b/doc/help/pypeit_clean_cache.rst index bab7586238..a18fc4831c 100644 --- a/doc/help/pypeit_clean_cache.rst +++ b/doc/help/pypeit_clean_cache.rst @@ -1,12 +1,26 @@ .. code-block:: console $ pypeit_clean_cache -h - usage: pypeit_clean_cache [-h] [-p PATTERN [PATTERN ...]] [--all] [--clear] [-l] + usage: pypeit_clean_cache [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [-p PATTERN [PATTERN ...]] + [--all] [--clear] [-l] View/Remove fils in the PypeIt data cache options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) -p, --pattern PATTERN [PATTERN ...] Remove any files matching the provided pattern. If combined with --version, this selects only files diff --git a/doc/help/pypeit_coadd_1dspec.rst b/doc/help/pypeit_coadd_1dspec.rst index de925ee79b..893beee9da 100644 --- a/doc/help/pypeit_coadd_1dspec.rst +++ b/doc/help/pypeit_coadd_1dspec.rst @@ -1,8 +1,9 @@ .. code-block:: console $ pypeit_coadd_1dspec -h - usage: pypeit_coadd_1dspec [-h] [--debug] [--show] [--par_outfile PAR_OUTFILE] - [-v VERBOSITY] + usage: pypeit_coadd_1dspec [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--debug] [--show] + [--par_outfile PAR_OUTFILE] coadd1d_file Coadd 1D spectra produced by PypeIt @@ -103,12 +104,19 @@ options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. --debug show debug plots? --show show QA during coadding process --par_outfile PAR_OUTFILE Output to save the parameters - -v, --verbosity VERBOSITY - Verbosity level between 0 [none] and 2 [all]. Default: - 1. Level 2 writes a log with filename - coadd_1dspec_YYYYMMDD-HHMM.log \ No newline at end of file diff --git a/doc/help/pypeit_coadd_2dspec.rst b/doc/help/pypeit_coadd_2dspec.rst index 91c8a3b4ee..4b2cec7a71 100644 --- a/doc/help/pypeit_coadd_2dspec.rst +++ b/doc/help/pypeit_coadd_2dspec.rst @@ -1,8 +1,9 @@ .. code-block:: console $ pypeit_coadd_2dspec -h - usage: pypeit_coadd_2dspec [-h] [--show] [--debug_offsets] [--peaks] - [--basename BASENAME] [--debug] [-v VERBOSITY] + usage: pypeit_coadd_2dspec [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--show] [--debug_offsets] + [--peaks] [--basename BASENAME] [--debug] coadd2d_file Coadd 2D spectra produced by PypeIt @@ -12,6 +13,18 @@ options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: default) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) --show Show the reduction steps. Equivalent to the -s option when running pypeit. (default: False) --debug_offsets Show QA plots useful for debugging automatic offset @@ -21,8 +34,4 @@ --basename BASENAME Basename of files to save the parameters, spec1d, and spec2d (default: None) --debug show debug plots? (default: False) - -v, --verbosity VERBOSITY - Verbosity level between 0 [none] and 2 [all]. Default: - 1. Level 2 writes a log with filename - coadd_2dspec_YYYYMMDD-HHMM.log (default: 1) \ No newline at end of file diff --git a/doc/help/pypeit_coadd_datacube.rst b/doc/help/pypeit_coadd_datacube.rst index c5d29fed99..103db91162 100644 --- a/doc/help/pypeit_coadd_datacube.rst +++ b/doc/help/pypeit_coadd_datacube.rst @@ -1,7 +1,9 @@ .. code-block:: console $ pypeit_coadd_datacube -h - usage: pypeit_coadd_datacube [-h] [--det DET] [-o] [-v VERBOSITY] file + usage: pypeit_coadd_datacube [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--det DET] [-o] + file Read in an array of spec2D files and convert them into a datacube @@ -10,11 +12,19 @@ options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: default) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) --det DET Detector (default: 1) -o, --overwrite Overwrite any existing files/directories (default: False) - -v, --verbosity VERBOSITY - Verbosity level between 0 [none] and 2 [all]. Default: - 1. Level 2 writes a log with filename - coadd_datacube_YYYYMMDD-HHMM.log (default: 1) \ No newline at end of file diff --git a/doc/help/pypeit_collate_1d.rst b/doc/help/pypeit_collate_1d.rst index b19136bd8f..c3e5c09b35 100644 --- a/doc/help/pypeit_collate_1d.rst +++ b/doc/help/pypeit_collate_1d.rst @@ -1,7 +1,9 @@ .. code-block:: console $ pypeit_collate_1d -h - usage: pypeit_collate_1d [-h] [--spec1d_files [SPEC1D_FILES ...]] + usage: pypeit_collate_1d [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] + [--spec1d_files [SPEC1D_FILES ...]] [--par_outfile PAR_OUTFILE] [--outdir OUTDIR] [--spec1d_outdir SPEC1D_OUTDIR] [--tolerance TOLERANCE] [--match_using MATCH_USING] [--dry_run] [--ignore_flux] @@ -9,7 +11,7 @@ [--exclude_slit_trace_bm EXCLUDE_SLIT_TRACE_BM] [--exclude_serendip] [--wv_rms_thresh WV_RMS_THRESH] [--refframe {observed,heliocentric,barycentric}] - [--chk_version] [-v VERBOSITY] + [--chk_version] [input_file] Flux/Coadd multiple 1d spectra from multiple nights and prepare a directory for @@ -45,6 +47,17 @@ options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. --spec1d_files [SPEC1D_FILES ...] One or more spec1d files to flux/coadd/archive. Can contain wildcards @@ -95,8 +108,4 @@ to crash or lead to erroneous results. I.e., you really need to know what you are doing if you set this to False! - -v, --verbosity VERBOSITY - Verbosity level between 0 [none] and 2 [all]. Default: - 1. Level 2 writes a log with filename - collate_1d_YYYYMMDD-HHMM.log \ No newline at end of file diff --git a/doc/help/pypeit_compare_sky.rst b/doc/help/pypeit_compare_sky.rst index 3b0f4b2bf9..f2265072a7 100644 --- a/doc/help/pypeit_compare_sky.rst +++ b/doc/help/pypeit_compare_sky.rst @@ -1,7 +1,8 @@ .. code-block:: console $ pypeit_compare_sky -h - usage: pypeit_compare_sky [-h] [--exten EXTEN] [--optimal] + usage: pypeit_compare_sky [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--exten EXTEN] [--optimal] [--scale_user SCALE_USER] file skyfile @@ -14,6 +15,18 @@ options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) --exten EXTEN FITS extension (default: None) --optimal Show Optimal? Default is boxcar (default: False) --scale_user SCALE_USER diff --git a/doc/help/pypeit_compile_wvarxiv.rst b/doc/help/pypeit_compile_wvarxiv.rst index cb18a39f76..1bf38d7921 100644 --- a/doc/help/pypeit_compile_wvarxiv.rst +++ b/doc/help/pypeit_compile_wvarxiv.rst @@ -1,19 +1,33 @@ .. code-block:: console $ pypeit_compile_wvarxiv -h - usage: pypeit_compile_wvarxiv [-h] [--append] wvarxiv_folder instrument grating + usage: pypeit_compile_wvarxiv [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--append] + wvarxiv_folder instrument grating Read in a set of wxarxiv solutions from Identify and compile them into a single fits file to be used with the reidentify method. positional arguments: - wvarxiv_folder Location of the WVarxiv files - instrument Name of instrument. e.g. keck_lris_blue, keck_deimos, - gemini_gmos_south_ham - grating Instrument grating name. E.g. b600, r400, 600_10000. + wvarxiv_folder Location of the WVarxiv files + instrument Name of instrument. e.g. keck_lris_blue, keck_deimos, + gemini_gmos_south_ham + grating Instrument grating name. E.g. b600, r400, 600_10000. options: - -h, --help show this help message and exit - --append Append to an existing file for this instrument. (default: - False) + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) + --append Append to an existing file for this instrument. + (default: False) \ No newline at end of file diff --git a/doc/help/pypeit_edge_inspector.rst b/doc/help/pypeit_edge_inspector.rst index ca482dfafa..954c9e9b92 100644 --- a/doc/help/pypeit_edge_inspector.rst +++ b/doc/help/pypeit_edge_inspector.rst @@ -1,15 +1,29 @@ .. code-block:: console $ pypeit_edge_inspector -h - usage: pypeit_edge_inspector [-h] [--try_old] trace_file + usage: pypeit_edge_inspector [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--try_old] + trace_file Interactively inspect/edit slit edge traces positional arguments: - trace_file PypeIt Edges file [e.g. Edges_A_0_DET01.fits.gz] + trace_file PypeIt Edges file [e.g. Edges_A_0_DET01.fits.gz] options: - -h, --help show this help message and exit - --try_old Attempt to load old datamodel versions. A crash may ensue.. - (default: False) + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) + --try_old Attempt to load old datamodel versions. A crash may + ensue.. (default: False) \ No newline at end of file diff --git a/doc/help/pypeit_extract_datacube.rst b/doc/help/pypeit_extract_datacube.rst index dd395e3ed3..c0bf367c4e 100644 --- a/doc/help/pypeit_extract_datacube.rst +++ b/doc/help/pypeit_extract_datacube.rst @@ -1,18 +1,31 @@ .. code-block:: console $ pypeit_extract_datacube -h - usage: pypeit_extract_datacube [-h] [-e EXT_FILE] [-s SAVE] [-o] - [-b BOXCAR_RADIUS] [-v VERBOSITY] + usage: pypeit_extract_datacube [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [-e EXT_FILE] [-s SAVE] + [-o] [-b BOXCAR_RADIUS] file - Read in a datacube, extract a spectrum of a point source,and save it as a spec1d - file. + Read in a datacube, extract a spectrum of a point source, and save it as a + spec1d file. positional arguments: file spec3d.fits DataCube file options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: default) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) -e, --ext_file EXT_FILE Configuration file with extraction parameters (default: None) @@ -22,8 +35,4 @@ -b, --boxcar_radius BOXCAR_RADIUS Radius of the circular boxcar (in arcseconds) to use for the extraction. (default: None) - -v, --verbosity VERBOSITY - Verbosity level between 0 [none] and 2 [all]. Default: - 1. Level 2 writes a log with filename - extract_datacube_YYYYMMDD-HHMM.log (default: 1) \ No newline at end of file diff --git a/doc/help/pypeit_flux_calib.rst b/doc/help/pypeit_flux_calib.rst index 8af82dd6e4..bd87932b77 100644 --- a/doc/help/pypeit_flux_calib.rst +++ b/doc/help/pypeit_flux_calib.rst @@ -1,7 +1,8 @@ .. code-block:: console $ pypeit_flux_calib -h - usage: pypeit_flux_calib [-h] [--par_outfile] [-v VERBOSITY] [--try_old] + usage: pypeit_flux_calib [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--par_outfile] [--try_old] flux_file Flux calibrate 1D spectra produced by PypeIt @@ -49,11 +50,18 @@ options: -h, --help show this help message and exit - --par_outfile Output to save the parameters -v, --verbosity VERBOSITY - Verbosity level between 0 [none] and 2 [all]. Default: - 1. Level 2 writes a log with filename - flux_calib_YYYYMMDD-HHMM.log + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. + --par_outfile Output to save the parameters --try_old Attempt to load old datamodel versions. A crash may ensue.. \ No newline at end of file diff --git a/doc/help/pypeit_flux_setup.rst b/doc/help/pypeit_flux_setup.rst index 7f42af20a6..f7af6ff15f 100644 --- a/doc/help/pypeit_flux_setup.rst +++ b/doc/help/pypeit_flux_setup.rst @@ -1,7 +1,9 @@ .. code-block:: console $ pypeit_flux_setup -h - usage: pypeit_flux_setup [-h] [--name NAME] [--objmodel {qso,star,poly}] + usage: pypeit_flux_setup [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--name NAME] + [--objmodel {qso,star,poly}] paths [paths ...] Setup configuration files to perform flux calibration, 1D coadding, and telluric @@ -14,6 +16,17 @@ options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. --name NAME The base name to use for the output files. Defaults to the instrument name is used. --objmodel {qso,star,poly} diff --git a/doc/help/pypeit_identify.rst b/doc/help/pypeit_identify.rst index 91c1347213..e7bba132bf 100644 --- a/doc/help/pypeit_identify.rst +++ b/doc/help/pypeit_identify.rst @@ -1,11 +1,12 @@ .. code-block:: console $ pypeit_identify -h - usage: pypeit_identify [-h] [--lamps LAMPS] [-s] [--wmin WMIN] [--wmax WMAX] - [--slits SLITS] [-m] [-n] [--det DET] [--rmstol RMSTOL] - [--fwhm FWHM] [--sigdetect SIGDETECT] [--pixtol PIXTOL] - [--linear] [--force_save] [--rescale_resid] - [-v VERBOSITY] [--try_old] + usage: pypeit_identify [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--lamps LAMPS] [-s] + [--wmin WMIN] [--wmax WMAX] [--slits SLITS] [-m] [-n] + [--det DET] [--rmstol RMSTOL] [--fwhm FWHM] + [--sigdetect SIGDETECT] [--pixtol PIXTOL] [--linear] + [--force_save] [--rescale_resid] [--try_old] arc_file slits_file Launch PypeIt pypeit_identify tool, display extracted Arc, and load linelist. @@ -16,6 +17,18 @@ options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: default) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) --lamps LAMPS Comma separated list of calibration lamps (no spaces) (default: None) -s, --solution Load a wavelength solution from the arc_file (if it @@ -41,10 +54,6 @@ --force_save Save the solutions, despite the RMS (default: False) --rescale_resid Rescale the residual plot to include all points? (default: False) - -v, --verbosity VERBOSITY - Verbosity level between 0 [none] and 2 [all]. Default: - 1. Level 2 writes a log with filename identify_YYYYMMDD- - HHMM.log (default: 1) --try_old Attempt to load old datamodel versions. A crash may ensue.. (default: False) \ No newline at end of file diff --git a/doc/help/pypeit_install_extinctfile.rst b/doc/help/pypeit_install_extinctfile.rst index 4a50fbd5a4..2e526be8c4 100644 --- a/doc/help/pypeit_install_extinctfile.rst +++ b/doc/help/pypeit_install_extinctfile.rst @@ -1,15 +1,29 @@ .. code-block:: console $ pypeit_install_extinctfile -h - usage: pypeit_install_extinctfile [-h] files [files ...] + usage: pypeit_install_extinctfile [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] + files [files ...] Script to install user-created extinction file positional arguments: - files One or more files with extinction curve data to be installed in - the PypeIt cache. May include wildcards for multiple files with - the same root. + files One or more files with extinction curve data to be + installed in the PypeIt cache. May include wildcards for + multiple files with the same root. options: - -h, --help show this help message and exit + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) \ No newline at end of file diff --git a/doc/help/pypeit_install_linelist.rst b/doc/help/pypeit_install_linelist.rst index be23b96a96..2273440aef 100644 --- a/doc/help/pypeit_install_linelist.rst +++ b/doc/help/pypeit_install_linelist.rst @@ -1,14 +1,28 @@ .. code-block:: console $ pypeit_install_linelist -h - usage: pypeit_install_linelist [-h] files [files ...] + usage: pypeit_install_linelist [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] + files [files ...] Script to install user-created arc line lists positional arguments: - files Filename(s) of the line list files to be installed in the PypeIt - cache + files Filename(s) of the line list files to be installed in + the PypeIt cache options: - -h, --help show this help message and exit + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) \ No newline at end of file diff --git a/doc/help/pypeit_install_ql_calibs.rst b/doc/help/pypeit_install_ql_calibs.rst index 9785d3dcf6..8e47c93870 100644 --- a/doc/help/pypeit_install_ql_calibs.rst +++ b/doc/help/pypeit_install_ql_calibs.rst @@ -1,19 +1,32 @@ .. code-block:: console $ pypeit_install_ql_calibs -h - usage: pypeit_install_ql_calibs [-h] [--zip ZIP | --ql_path QL_PATH] - [--odir ODIR] [--rmzip] + usage: pypeit_install_ql_calibs [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--zip ZIP | + --ql_path QL_PATH] [--odir ODIR] [--rmzip] Script to install PypeIt QL calibration files options: - -h, --help show this help message and exit - --zip ZIP Zip file of the full QL_CALIB directory downloaded from the - PypeIt Google Drive (default: None) - --ql_path QL_PATH An existing directory to symlink as the QL_CALIB directory. - (default: None) - --odir ODIR The directory in which to extract the zip file. Ignored if - a direct path is provided using --ql_path. (default: - current working directory) - --rmzip Remove the downloaded zip file (default: False) + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) + --zip ZIP Zip file of the full QL_CALIB directory downloaded from + the PypeIt Google Drive (default: None) + --ql_path QL_PATH An existing directory to symlink as the QL_CALIB + directory. (default: None) + --odir ODIR The directory in which to extract the zip file. Ignored + if a direct path is provided using --ql_path. (default: + current working directory) + --rmzip Remove the downloaded zip file (default: False) \ No newline at end of file diff --git a/doc/help/pypeit_install_telluric.rst b/doc/help/pypeit_install_telluric.rst index 9edef33ae7..13bb855969 100644 --- a/doc/help/pypeit_install_telluric.rst +++ b/doc/help/pypeit_install_telluric.rst @@ -1,19 +1,33 @@ .. code-block:: console $ pypeit_install_telluric -h - usage: pypeit_install_telluric [-h] [--force_update] [--local_file] + usage: pypeit_install_telluric [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--force_update] + [--local_file] files [files ...] Script to download/install PypeIt telluric files positional arguments: - files Exact paths to TelFits files to be downloaded from the Cloud - and installed in the PypeIt cache + files Exact paths to TelFits files to be downloaded from the + Cloud and installed in the PypeIt cache options: - -h, --help show this help message and exit - --force_update Force download of latest version of the telluric grid - (default: False) - --local_file This is a local file to be installed in the cache (default: - False) + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) + --force_update Force download of latest version of the telluric grid + (default: False) + --local_file This is a local file to be installed in the cache + (default: False) \ No newline at end of file diff --git a/doc/help/pypeit_install_wvarxiv.rst b/doc/help/pypeit_install_wvarxiv.rst index a1ccbe0bc3..a4ea2f09a5 100644 --- a/doc/help/pypeit_install_wvarxiv.rst +++ b/doc/help/pypeit_install_wvarxiv.rst @@ -1,14 +1,28 @@ .. code-block:: console $ pypeit_install_wvarxiv -h - usage: pypeit_install_wvarxiv [-h] files [files ...] + usage: pypeit_install_wvarxiv [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] + files [files ...] Script to install user-created wavelength templates positional arguments: - files Filename(s) of the template files to be installed in the PypeIt - cache + files Filename(s) of the template files to be installed in the + PypeIt cache options: - -h, --help show this help message and exit + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) \ No newline at end of file diff --git a/doc/help/pypeit_lowrdx_skyspec.rst b/doc/help/pypeit_lowrdx_skyspec.rst index e1579a96fa..7b7a3c4963 100644 --- a/doc/help/pypeit_lowrdx_skyspec.rst +++ b/doc/help/pypeit_lowrdx_skyspec.rst @@ -1,15 +1,29 @@ .. code-block:: console $ pypeit_lowrdx_skyspec -h - usage: pypeit_lowrdx_skyspec [-h] lowrdx_sky new_file + usage: pypeit_lowrdx_skyspec [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] + lowrdx_sky new_file Read an IDL save file with a LowRedux sky spectrum and convert it into a pypeit file. positional arguments: - lowrdx_sky LowRedux Sky Spectrum (IDL save file) - new_file PYPIT FITS sky spectrum + lowrdx_sky LowRedux Sky Spectrum (IDL save file) + new_file PYPIT FITS sky spectrum options: - -h, --help show this help message and exit + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) \ No newline at end of file diff --git a/doc/help/pypeit_multislit_flexure.rst b/doc/help/pypeit_multislit_flexure.rst index 4e748c2d5e..acabd51b72 100644 --- a/doc/help/pypeit_multislit_flexure.rst +++ b/doc/help/pypeit_multislit_flexure.rst @@ -1,26 +1,39 @@ .. code-block:: console $ pypeit_multislit_flexure -h - usage: pypeit_multislit_flexure [-h] [--clobber] [--debug] flex_file outroot + usage: pypeit_multislit_flexure [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--clobber] [--debug] + flex_file outroot Calculate and apply flexure corrections for 1D spectra produced by PypeIt. positional arguments: - flex_file File to guide flexure corrections for this multi-slit mode. This - file must have the following format: - - flexure read - filename - spec1dfile1 - spec1dfile2 - ... - flexure end - - - outroot Output fileroot for the flexure fits saved as FITS. + flex_file File to guide flexure corrections for this multi-slit + mode. This file must have the following format: + + flexure read + filename + spec1dfile1 + spec1dfile2 + ... + flexure end + + + outroot Output fileroot for the flexure fits saved as FITS. options: - -h, --help show this help message and exit - --clobber Clobber output files - --debug show debug plots? + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. + --clobber Clobber output files + --debug show debug plots? \ No newline at end of file diff --git a/doc/help/pypeit_obslog.rst b/doc/help/pypeit_obslog.rst index 66a7857f09..f0be4133b0 100644 --- a/doc/help/pypeit_obslog.rst +++ b/doc/help/pypeit_obslog.rst @@ -1,9 +1,10 @@ .. code-block:: console $ pypeit_obslog -h - usage: pypeit_obslog [-h] [-r ROOT] [-k] [-c COLUMNS] [-b] [-t BAD_TYPES] [-g] - [-i] [-s SORT] [-e EXTENSION] [-d OUTPUT_PATH] [-o] - [-f FILE] [-G] + usage: pypeit_obslog [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [-r ROOT] [-k] [-c COLUMNS] [-b] + [-t BAD_TYPES] [-g] [-i] [-s SORT] [-e EXTENSION] + [-d OUTPUT_PATH] [-o] [-f FILE] [-G] spec Construct an observing log for a set of files from the provided spectrograph @@ -36,6 +37,18 @@ options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) -r, --root ROOT Root to search for data files. You can provide the top- level directory (e.g., /data/Kast) or the search string up through the wildcard (.e.g, /data/Kast/b). Use the diff --git a/doc/help/pypeit_parse_slits.rst b/doc/help/pypeit_parse_slits.rst index 19dbd2c1dd..37e4e9363f 100644 --- a/doc/help/pypeit_parse_slits.rst +++ b/doc/help/pypeit_parse_slits.rst @@ -1,15 +1,29 @@ .. code-block:: console $ pypeit_parse_slits -h - usage: pypeit_parse_slits [-h] [--try_old] input_file + usage: pypeit_parse_slits [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--try_old] + input_file Print info on slits from a input file positional arguments: - input_file Either a spec2D or Slits filename + input_file Either a spec2D or Slits filename options: - -h, --help show this help message and exit - --try_old Attempt to load old datamodel versions. A crash may ensue.. - (default: False) + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) + --try_old Attempt to load old datamodel versions. A crash may + ensue.. (default: False) \ No newline at end of file diff --git a/doc/help/pypeit_print_bpm.rst b/doc/help/pypeit_print_bpm.rst index 73e2508173..acc49bdd20 100644 --- a/doc/help/pypeit_print_bpm.rst +++ b/doc/help/pypeit_print_bpm.rst @@ -1,7 +1,9 @@ .. code-block:: console $ pypeit_print_bpm -h - usage: pypeit_print_bpm [-h] [--file FILE] [--det DET] bit + usage: pypeit_print_bpm [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--file FILE] [--det DET] + bit Print out an informative description of a bad pixel masked value. Usually, you should run pypeit_show_2dspec --showmask first to see the bad pixel mask values. @@ -9,17 +11,31 @@ about. positional arguments: - bit Bad pixel mask value to describe in plain text + bit Bad pixel mask value to describe in plain text options: - -h, --help show this help message and exit - --file FILE PypeIt spec2d file to use for the description(optional). If - provided, the bitmask contained in the spec2d file will be used - to describe the bad pixel mask value. If not provided, the - default pypeit bad pixel mask will be used. (default: None) - --det DET Detector name or number. If a number, the name is constructed - assuming the reduction is for a single detector. If a string, it - must match the name of the detector object (e.g., DET01 for a - detector, MSC01 for a mosaic). This is not required, and the - value is acceptable. Default is 1. (default: 1) + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) + --file FILE PypeIt spec2d file to use for the description(optional). + If provided, the bitmask contained in the spec2d file + will be used to describe the bad pixel mask value. If + not provided, the default pypeit bad pixel mask will be + used. (default: None) + --det DET Detector name or number. If a number, the name is + constructed assuming the reduction is for a single + detector. If a string, it must match the name of the + detector object (e.g., DET01 for a detector, MSC01 for a + mosaic). This is not required, and the value is + acceptable. Default is 1. (default: 1) \ No newline at end of file diff --git a/doc/help/pypeit_qa_html.rst b/doc/help/pypeit_qa_html.rst index 53e02ef3d0..d8daa47153 100644 --- a/doc/help/pypeit_qa_html.rst +++ b/doc/help/pypeit_qa_html.rst @@ -1,15 +1,29 @@ .. code-block:: console $ pypeit_qa_html -h - usage: pypeit_qa_html [-h] [--qapath QAPATH] pypeit_file type + usage: pypeit_qa_html [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--qapath QAPATH] + pypeit_file type Script to build HTML files for PYPIT QA. positional arguments: - pypeit_file PYPIT file - type QA Type (MF, exp, all) + pypeit_file PYPIT file + type QA Type (MF, exp, all) options: - -h, --help show this help message and exit - --qapath QAPATH Path the QA folder including QA/) (default: QA/) + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) + --qapath QAPATH Path the QA folder including QA/) (default: QA/) \ No newline at end of file diff --git a/doc/help/pypeit_ql.rst b/doc/help/pypeit_ql.rst index e3dd0e0f00..7e7be62348 100644 --- a/doc/help/pypeit_ql.rst +++ b/doc/help/pypeit_ql.rst @@ -1,7 +1,8 @@ .. code-block:: console $ pypeit_ql -h - usage: pypeit_ql [-h] [--raw_files RAW_FILES [RAW_FILES ...]] + usage: pypeit_ql [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--raw_files RAW_FILES [RAW_FILES ...]] [--raw_path RAW_PATH] [--sci_files SCI_FILES [SCI_FILES ...]] [--redux_path REDUX_PATH] [--parent_calib_dir PARENT_CALIB_DIR] [--setup_calib_dir SETUP_CALIB_DIR] [--clear_science] @@ -44,6 +45,18 @@ options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) --raw_files RAW_FILES [RAW_FILES ...] Either a PypeIt-formatted input file with the list of raw images to process and the relevant path, or a space- diff --git a/doc/help/pypeit_run_to_calibstep.rst b/doc/help/pypeit_run_to_calibstep.rst index 65a976fd13..7f6d5627b1 100644 --- a/doc/help/pypeit_run_to_calibstep.rst +++ b/doc/help/pypeit_run_to_calibstep.rst @@ -1,9 +1,11 @@ .. code-block:: console $ pypeit_run_to_calibstep -h - usage: pypeit_run_to_calibstep [-h] [--science_frame SCIENCE_FRAME] + usage: pypeit_run_to_calibstep [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] + [--science_frame SCIENCE_FRAME] [--calib_group CALIB_GROUP] [--det DET] - [-v VERBOSITY] [-r REDUX_PATH] [-s] + [-r REDUX_PATH] [-s] pypeit_file step Run PypeIt to a single calibration step for an input frame @@ -16,6 +18,17 @@ options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. --science_frame SCIENCE_FRAME Raw science frame to reduce as listed in your PypeIt file, e.g. b28.fits.gz. Either this or the calib_group @@ -24,8 +37,6 @@ Calibration group ID to reduce. Either this or the frame must be provided --det DET Detector to reduce - -v, --verbosity VERBOSITY - Verbosity level between 0 [none] and 2 [all] -r, --redux_path REDUX_PATH Path to directory for the reduction. Only advised for testing diff --git a/doc/help/pypeit_sensfunc.rst b/doc/help/pypeit_sensfunc.rst index 12011cd5f0..ea1679880c 100644 --- a/doc/help/pypeit_sensfunc.rst +++ b/doc/help/pypeit_sensfunc.rst @@ -1,9 +1,10 @@ .. code-block:: console $ pypeit_sensfunc -h - usage: pypeit_sensfunc [-h] [--extr {OPT,BOX}] [--algorithm {UVIS,IR}] - [--multi MULTI] [-o OUTFILE] [-s SENS_FILE] [-f] - [--debug] [--par_outfile PAR_OUTFILE] [-v VERBOSITY] + usage: pypeit_sensfunc [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--extr {OPT,BOX}] + [--algorithm {UVIS,IR}] [--multi MULTI] [-o OUTFILE] + [-s SENS_FILE] [-f] [--debug] [--par_outfile PAR_OUTFILE] spec1dfile Compute a sensitivity function @@ -15,6 +16,17 @@ options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. --extr {OPT,BOX} Override the default extraction method used for computing the sensitivity function. Note that it is not possible to set --extr and simultaneously use a .sens @@ -90,8 +102,4 @@ --par_outfile PAR_OUTFILE Name of output file to save the parameters used by the fit - -v, --verbosity VERBOSITY - Verbosity level between 0 [none] and 2 [all]. Default: - 1. Level 2 writes a log with filename sensfunc_YYYYMMDD- - HHMM.log \ No newline at end of file diff --git a/doc/help/pypeit_setup.rst b/doc/help/pypeit_setup.rst index 5c25aa6d5c..767418adf2 100644 --- a/doc/help/pypeit_setup.rst +++ b/doc/help/pypeit_setup.rst @@ -1,15 +1,29 @@ .. code-block:: console $ pypeit_setup -h - usage: pypeit_setup [-h] [-s SPECTROGRAPH] [-r ROOT [ROOT ...]] [-e EXTENSION] - [-d OUTPUT_PATH] [-o] [-c CFG_SPLIT] [-b] [-f] [-m] - [-v VERBOSITY] [-k] [-p PARAM_BLOCK_FILE] [-G] + usage: pypeit_setup [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [-s SPECTROGRAPH] + [-r ROOT [ROOT ...]] [-e EXTENSION] [-d OUTPUT_PATH] [-o] + [-c CFG_SPLIT] [-b] [-f] [-m] [-k] [-p PARAM_BLOCK_FILE] + [-G] Parse data files to construct a pypeit file in preparation for reduction using 'run_pypeit' options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: default) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) -s, --spectrograph SPECTROGRAPH A valid spectrograph identifier: aat_uhrf, apf_levy, bok_bc, gemini_flamingos1, gemini_flamingos2, @@ -65,10 +79,6 @@ -m, --manual_extraction Include the manual extraction column for the user to edit (default: False) - -v, --verbosity VERBOSITY - Verbosity level between 0 [none] and 2 [all]. Default: - 1. Level 2 writes a log with filename setup_YYYYMMDD- - HHMM.log (default: 1) -k, --keep_bad_frames Keep all frames, even if they are identified as having bad/unrecognized configurations that cannot be reduced diff --git a/doc/help/pypeit_setup_coadd2d.rst b/doc/help/pypeit_setup_coadd2d.rst index 185fbbdb66..058aabcb2e 100644 --- a/doc/help/pypeit_setup_coadd2d.rst +++ b/doc/help/pypeit_setup_coadd2d.rst @@ -1,7 +1,8 @@ .. code-block:: console $ pypeit_setup_coadd2d -h - usage: pypeit_setup_coadd2d [-h] (-f PYPEIT_FILE | + usage: pypeit_setup_coadd2d [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] (-f PYPEIT_FILE | -d SCIENCE_DIR [SCIENCE_DIR ...]) [--keep_par] [--obj OBJ [OBJ ...]] [--det DET [DET ...]] [--only_slits ONLY_SLITS [ONLY_SLITS ...]] @@ -15,6 +16,18 @@ options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) -f, --pypeit_file PYPEIT_FILE PypeIt reduction file (default: None) -d, --science_dir SCIENCE_DIR [SCIENCE_DIR ...] diff --git a/doc/help/pypeit_show_1dspec.rst b/doc/help/pypeit_show_1dspec.rst index b96c66cb05..ef2893e9ae 100644 --- a/doc/help/pypeit_show_1dspec.rst +++ b/doc/help/pypeit_show_1dspec.rst @@ -1,24 +1,38 @@ .. code-block:: console $ pypeit_show_1dspec -h - usage: pypeit_show_1dspec [-h] [--list] [--exten EXTEN] [--obj OBJ] - [--extract EXTRACT] [--flux] [-m] [--ginga] + usage: pypeit_show_1dspec [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--list] [--exten EXTEN] + [--obj OBJ] [--extract EXTRACT] [--flux] [-m] + [--ginga] file Show a 1D spectrum positional arguments: - file Spectral file + file Spectral file options: - -h, --help show this help message and exit - --list List the extensions only? (default: False) - --exten EXTEN FITS extension (default: 1) - --obj OBJ Object name in lieu of extension, e.g. - SPAT0424-SLIT0000-DET01 (default: None) - --extract EXTRACT Extraction method. Default is OPT. ['BOX', 'OPT'] (default: - OPT) - --flux Show fluxed spectrum? (default: False) - -m, --unmasked Only show unmasked data. (default: True) - --ginga Open the spectrum in ginga (default: False) + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) + --list List the extensions only? (default: False) + --exten EXTEN FITS extension (default: 1) + --obj OBJ Object name in lieu of extension, e.g. + SPAT0424-SLIT0000-DET01 (default: None) + --extract EXTRACT Extraction method. Default is OPT. ['BOX', 'OPT'] + (default: OPT) + --flux Show fluxed spectrum? (default: False) + -m, --unmasked Only show unmasked data. (default: True) + --ginga Open the spectrum in ginga (default: False) \ No newline at end of file diff --git a/doc/help/pypeit_show_2dspec.rst b/doc/help/pypeit_show_2dspec.rst index 18b4e3f4b5..1d6ce26037 100644 --- a/doc/help/pypeit_show_2dspec.rst +++ b/doc/help/pypeit_show_2dspec.rst @@ -1,11 +1,12 @@ .. code-block:: console $ pypeit_show_2dspec -h - usage: pypeit_show_2dspec [-h] [--list] [--det DET] [--spat_id SPAT_ID] - [--maskID MASKID] [--showmask [SHOWMASK ...]] - [--removetrace] [--embed] [--ignore_extract_mask] - [--channels CHANNELS] [--prefix PREFIX] [--no_clear] - [-v VERBOSITY] [--try_old] + usage: pypeit_show_2dspec [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--list] [--det DET] + [--spat_id SPAT_ID] [--maskID MASKID] + [--showmask [SHOWMASK ...]] [--removetrace] [--embed] + [--ignore_extract_mask] [--channels CHANNELS] + [--prefix PREFIX] [--no_clear] [--try_old] file Display sky subtracted, spec2d image in a ginga viewer. @@ -15,6 +16,18 @@ options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: default) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) --list List the extensions only? (default: False) --det DET Detector name or number. If a number, the name is constructed assuming the reduction is for a single @@ -45,9 +58,6 @@ --prefix PREFIX Channel name prefix [lets you display more than one set] (default: ) --no_clear Do *not* clear all existing tabs (default: True) - -v, --verbosity VERBOSITY - Verbosity level between 0 [none] and 2 [all] (default: - 1) --try_old Attempt to load old datamodel versions. A crash may ensue.. (default: False) \ No newline at end of file diff --git a/doc/help/pypeit_show_arxiv.rst b/doc/help/pypeit_show_arxiv.rst index b8cb130a57..54c8ab3848 100644 --- a/doc/help/pypeit_show_arxiv.rst +++ b/doc/help/pypeit_show_arxiv.rst @@ -1,14 +1,28 @@ .. code-block:: console $ pypeit_show_arxiv -h - usage: pypeit_show_arxiv [-h] [--det DET] file + usage: pypeit_show_arxiv [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--det DET] + file Show an archived arc spectrum located in pypeit/data/arc_liens/reid_arxiv positional arguments: - file Arxiv filename, e.g. gemini_gmos_r831_ham.fits + file Arxiv filename, e.g. gemini_gmos_r831_ham.fits options: - -h, --help show this help message and exit - --det DET Detector number (default: 1) + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) + --det DET Detector number (default: 1) \ No newline at end of file diff --git a/doc/help/pypeit_show_pixflat.rst b/doc/help/pypeit_show_pixflat.rst index d7394e32e3..9fa6d85e78 100644 --- a/doc/help/pypeit_show_pixflat.rst +++ b/doc/help/pypeit_show_pixflat.rst @@ -1,17 +1,32 @@ .. code-block:: console $ pypeit_show_pixflat -h - usage: pypeit_show_pixflat [-h] [--det DET [DET ...]] file + usage: pypeit_show_pixflat [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--det DET [DET ...]] + file Show an archived Pixel Flat image in a ginga window. positional arguments: - file Pixel Flat filename, e.g. - pixelflat_keck_lris_blue.fits.gz + file Pixel Flat filename, e.g. + pixelflat_keck_lris_blue.fits.gz options: - -h, --help show this help message and exit - --det DET [DET ...] Detector(s) to show. If more than one, list the detectors - as, e.g. --det 1 2 to show detectors 1 and 2. If not - provided, all detectors will be shown. (default: None) + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) + --det DET [DET ...] Detector(s) to show. If more than one, list the + detectors as, e.g. --det 1 2 to show detectors 1 and 2. + If not provided, all detectors will be shown. (default: + None) \ No newline at end of file diff --git a/doc/help/pypeit_show_wvcalib.rst b/doc/help/pypeit_show_wvcalib.rst index b993d3fd27..b5285535e2 100644 --- a/doc/help/pypeit_show_wvcalib.rst +++ b/doc/help/pypeit_show_wvcalib.rst @@ -1,7 +1,9 @@ .. code-block:: console $ pypeit_show_wvcalib -h - usage: pypeit_show_wvcalib [-h] [--slit_file SLIT_FILE] [--is_order] [--try_old] + usage: pypeit_show_wvcalib [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--slit_file SLIT_FILE] + [--is_order] [--try_old] file slit_order Show the result of wavelength calibration @@ -12,6 +14,18 @@ options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) --slit_file SLIT_FILE Slit file (default: None) --is_order Input slit/order is an order (default: False) diff --git a/doc/help/pypeit_skysub_regions.rst b/doc/help/pypeit_skysub_regions.rst index 43f669eeda..a98d180651 100644 --- a/doc/help/pypeit_skysub_regions.rst +++ b/doc/help/pypeit_skysub_regions.rst @@ -1,8 +1,9 @@ .. code-block:: console $ pypeit_skysub_regions -h - usage: pypeit_skysub_regions [-h] [--det DET] [-o] [-i] [-f] [-s] [-v VERBOSITY] - [--try_old] + usage: pypeit_skysub_regions [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--det DET] [-o] [-i] [-f] + [-s] [--try_old] file Display a spec2d frame and interactively define the sky regions using a GUI. Run @@ -13,16 +14,24 @@ options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) --det DET Detector (default: 1) -o, --overwrite Overwrite any existing files/directories (default: False) -i, --initial Use initial slit edges? (default: False) -f, --flexure Use flexure corrected slit edges? (default: False) -s, --standard List standard stars as well? (default: False) - -v, --verbosity VERBOSITY - Verbosity level between 0 [none] and 2 [all]. Default: - 1. Level 2 writes a log with filename - skysub_regions_YYYYMMDD-HHMM.log (default: 1) --try_old Attempt to load old datamodel versions. A crash may ensue.. (default: False) \ No newline at end of file diff --git a/doc/help/pypeit_tellfit.rst b/doc/help/pypeit_tellfit.rst index a5264b0994..11e67069d3 100644 --- a/doc/help/pypeit_tellfit.rst +++ b/doc/help/pypeit_tellfit.rst @@ -1,9 +1,10 @@ .. code-block:: console $ pypeit_tellfit -h - usage: pypeit_tellfit [-h] [--objmodel {qso,star,poly}] [-r REDSHIFT] - [-g TELL_GRID] [-p PCA_FILE] [-t TELL_FILE] [--debug] - [--plot] [--par_outfile PAR_OUTFILE] [-v VERBOSITY] + usage: pypeit_tellfit [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--objmodel {qso,star,poly}] + [-r REDSHIFT] [-g TELL_GRID] [-p PCA_FILE] [-t TELL_FILE] + [--debug] [--plot] [--par_outfile PAR_OUTFILE] [--chk_version] spec1dfile @@ -15,6 +16,17 @@ options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. --objmodel {qso,star,poly} The object model to be used for telluric fitting. Currently the options are: ``qso``, ``star``, and @@ -66,10 +78,6 @@ --par_outfile PAR_OUTFILE Name of output file to save the parameters used by the fit - -v, --verbosity VERBOSITY - Verbosity level between 0 [none] and 2 [all]. Default: - 1. Level 2 writes a log with filename tellfit_YYYYMMDD- - HHMM.log --chk_version Ensure the datamodels are from the current PypeIt version. By default (consistent with previous functionality) this is not enforced and crashes may diff --git a/doc/help/pypeit_trace_edges.rst b/doc/help/pypeit_trace_edges.rst index 59c58f4db4..41eda978ba 100644 --- a/doc/help/pypeit_trace_edges.rst +++ b/doc/help/pypeit_trace_edges.rst @@ -1,15 +1,28 @@ .. code-block:: console $ pypeit_trace_edges -h - usage: pypeit_trace_edges [-h] (-f PYPEIT_FILE | -t TRACE_FILE) [-g GROUP] - [-d [DETECTOR ...]] [-s SPECTROGRAPH] [-b BINNING] - [-p REDUX_PATH] [-c CALIB_DIR] [-o] [--debug DEBUG] - [--show] [-v VERBOSITY] + usage: pypeit_trace_edges [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] (-f PYPEIT_FILE | + -t TRACE_FILE) [-g GROUP] [-d [DETECTOR ...]] + [-s SPECTROGRAPH] [-b BINNING] [-p REDUX_PATH] + [-c CALIB_DIR] [-o] [--debug DEBUG] Trace slit edges options: -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: default) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) -f, --pypeit_file PYPEIT_FILE PypeIt reduction file (default: None) -t, --trace_file TRACE_FILE @@ -68,10 +81,4 @@ and the slit and order matching. (3) Also show the individual polynomial fits to the detected edges. (default: 0) - --show DEPRECATED! If set, the code will assume you mean to set - --debug 1. (default: False) - -v, --verbosity VERBOSITY - Verbosity level between 0 [none] and 2 [all]. Default: - 1. Level 2 writes a log with filename - trace_edges_YYYYMMDD-HHMM.log (default: 1) \ No newline at end of file diff --git a/doc/help/pypeit_version.rst b/doc/help/pypeit_version.rst index a27cdef972..db83f1e72a 100644 --- a/doc/help/pypeit_version.rst +++ b/doc/help/pypeit_version.rst @@ -1,8 +1,21 @@ .. code-block:: console $ pypeit_version -h - usage: pypeit_version [-h] + usage: pypeit_version [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] options: - -h, --help show this help message and exit + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) \ No newline at end of file diff --git a/doc/help/pypeit_view_fits.rst b/doc/help/pypeit_view_fits.rst index 8b3a4b3ca9..373b197f8c 100644 --- a/doc/help/pypeit_view_fits.rst +++ b/doc/help/pypeit_view_fits.rst @@ -1,57 +1,70 @@ .. code-block:: console $ pypeit_view_fits -h - usage: pypeit_view_fits [-h] [--list] [--proc] [--bkg_file BKG_FILE] - [--exten EXTEN] [--det [DET ...]] [--chname CHNAME] - [--showmask] [--embed] + usage: pypeit_view_fits [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--list] [--proc] + [--bkg_file BKG_FILE] [--exten EXTEN] [--det [DET ...]] + [--chname CHNAME] [--showmask] [--embed] spectrograph file View FITS files with ginga positional arguments: - spectrograph A valid spectrograph identifier: aat_uhrf, apf_levy, - bok_bc, gemini_flamingos1, gemini_flamingos2, - gemini_gmos_north_e2v, gemini_gmos_north_ham, - gemini_gmos_north_ham_ns, gemini_gmos_south_ham, - gemini_gnirs_echelle, gemini_gnirs_ifu, gtc_maat, - gtc_osiris, gtc_osiris_plus, jwst_nircam, jwst_nirspec, - keck_deimos, keck_esi, keck_hires, keck_kcrm, keck_kcwi, - keck_lris_blue, keck_lris_blue_orig, keck_lris_red, - keck_lris_red_mark4, keck_lris_red_orig, keck_mosfire, - keck_nires, keck_nirspec_high, keck_nirspec_high_old, - keck_nirspec_low, lbt_luci1, lbt_luci2, lbt_mods1b, - lbt_mods1b_proc, lbt_mods1r, lbt_mods1r_proc, lbt_mods2b, - lbt_mods2b_proc, lbt_mods2r, lbt_mods2r_proc, ldt_deveny, - magellan_fire, magellan_fire_long, magellan_mage, - mdm_modspec, mdm_osmos_mdm4k, mdm_osmos_r4k, - mmt_binospec, mmt_bluechannel, mmt_mmirs, not_alfosc, - not_alfosc_vert, ntt_efosc2, p200_dbsp_blue, - p200_dbsp_red, p200_ngps_i, p200_ngps_r, p200_tspec, - shane_kast_blue, shane_kast_red, shane_kast_red_ret, - soar_goodman_blue, soar_goodman_red, subaru_focas, - tng_dolores, vlt_fors2, vlt_sinfoni, vlt_xshooter_nir, - vlt_xshooter_uvb, vlt_xshooter_vis, wht_isis_blue, - wht_isis_red - file FITS file + spectrograph A valid spectrograph identifier: aat_uhrf, apf_levy, + bok_bc, gemini_flamingos1, gemini_flamingos2, + gemini_gmos_north_e2v, gemini_gmos_north_ham, + gemini_gmos_north_ham_ns, gemini_gmos_south_ham, + gemini_gnirs_echelle, gemini_gnirs_ifu, gtc_maat, + gtc_osiris, gtc_osiris_plus, jwst_nircam, jwst_nirspec, + keck_deimos, keck_esi, keck_hires, keck_kcrm, keck_kcwi, + keck_lris_blue, keck_lris_blue_orig, keck_lris_red, + keck_lris_red_mark4, keck_lris_red_orig, keck_mosfire, + keck_nires, keck_nirspec_high, keck_nirspec_high_old, + keck_nirspec_low, lbt_luci1, lbt_luci2, lbt_mods1b, + lbt_mods1b_proc, lbt_mods1r, lbt_mods1r_proc, + lbt_mods2b, lbt_mods2b_proc, lbt_mods2r, + lbt_mods2r_proc, ldt_deveny, magellan_fire, + magellan_fire_long, magellan_mage, mdm_modspec, + mdm_osmos_mdm4k, mdm_osmos_r4k, mmt_binospec, + mmt_bluechannel, mmt_mmirs, not_alfosc, not_alfosc_vert, + ntt_efosc2, p200_dbsp_blue, p200_dbsp_red, p200_ngps_i, + p200_ngps_r, p200_tspec, shane_kast_blue, + shane_kast_red, shane_kast_red_ret, soar_goodman_blue, + soar_goodman_red, subaru_focas, tng_dolores, vlt_fors2, + vlt_sinfoni, vlt_xshooter_nir, vlt_xshooter_uvb, + vlt_xshooter_vis, wht_isis_blue, wht_isis_red + file FITS file options: - -h, --help show this help message and exit - --list List the extensions only? (default: False) - --proc Process the image (i.e. orient, overscan subtract, - multiply by gain) using pypeit.images.buildimage. - (default: False) - --bkg_file BKG_FILE FITS file to be subtracted from the image in file.--proc - must be set in order for this option to work. (default: - None) - --exten EXTEN Show a FITS extension in the raw file. Note --proc and - --mosaic will not work with this option. (default: None) - --det [DET ...] Detector(s) to show. If more than one, the list of - detectors, i.e. --det 4 8 to show detectors 4 and 8. This - combination must be one of the allowed mosaics hard-coded - for the selected spectrograph. Using "mosaic" for - gemini_gmos, keck_deimos, or keck_lris will show the - mosaic of all detectors. (default: 1) - --chname CHNAME Name of Ginga tab (default: Image) - --showmask Overplot masked pixels (default: False) - --embed Upon completion embed in ipython shell (default: False) + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) + --list List the extensions only? (default: False) + --proc Process the image (i.e. orient, overscan subtract, + multiply by gain) using pypeit.images.buildimage. + (default: False) + --bkg_file BKG_FILE FITS file to be subtracted from the image in file.--proc + must be set in order for this option to work. (default: + None) + --exten EXTEN Show a FITS extension in the raw file. Note --proc and + --mosaic will not work with this option. (default: None) + --det [DET ...] Detector(s) to show. If more than one, the list of + detectors, i.e. --det 4 8 to show detectors 4 and 8. + This combination must be one of the allowed mosaics + hard-coded for the selected spectrograph. Using "mosaic" + for gemini_gmos, keck_deimos, or keck_lris will show the + mosaic of all detectors. (default: 1) + --chname CHNAME Name of Ginga tab (default: Image) + --showmask Overplot masked pixels (default: False) + --embed Upon completion embed in ipython shell (default: False) \ No newline at end of file diff --git a/doc/help/run_pypeit.rst b/doc/help/run_pypeit.rst index 1d026912a5..eef555289f 100644 --- a/doc/help/run_pypeit.rst +++ b/doc/help/run_pypeit.rst @@ -1,11 +1,12 @@ .. code-block:: console $ run_pypeit -h - usage: run_pypeit [-h] [-v VERBOSITY] [-r REDUX_PATH] [-m] [-s] [-o] [-c] + usage: run_pypeit [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [-r REDUX_PATH] [-m] [-s] [-o] [-c] pypeit_file PypeIt: The Python Spectroscopic Data Reduction Pipeline - Version 1.18.2.dev173+gcdaaa7636 + Version 1.18.2.dev171+g808a323c9 Available spectrographs include: aat_uhrf, apf_levy, bok_bc, gemini_flamingos1, gemini_flamingos2, @@ -33,7 +34,16 @@ options: -h, --help show this help message and exit -v, --verbosity VERBOSITY - Verbosity level between 0 [none] and 2 [all] + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. -r, --redux_path REDUX_PATH Path to directory for the reduction. Only advised for testing diff --git a/pypeit/scripts/scriptbase.py b/pypeit/scripts/scriptbase.py index a1080a114c..b3aa6ba2c9 100644 --- a/pypeit/scripts/scriptbase.py +++ b/pypeit/scripts/scriptbase.py @@ -219,9 +219,9 @@ def get_parser(cls, description=None, width=None, # Add the logging options parser.add_argument( '-v', '--verbosity', type=int, default=2, - help='Verbosity level, which must be 0, 1, or 2. Level 0 only includes warning and ' - 'error messages, level 1 also includes informational messages, and level 2 also ' - 'includes debugging messages and includes the calling function in the message.' + help='Verbosity level, which must be 0, 1, or 2. Level 0 includes warning and error ' + 'messages, level 1 adds informational messages, and level 2 adds debugging ' + 'messages and the calling sequence.' ) parser.add_argument( '--log_file', type=str, default='default' if default_log_file else None, From ae1aff9260ec7ae6e5d28de9db6e1420f09a264a Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Fri, 17 Oct 2025 13:53:49 -0700 Subject: [PATCH 20/33] typing and intersphinx --- doc/api/pypeit.alignframe.rst | 2 +- doc/api/pypeit.archive.rst | 2 +- doc/api/pypeit.bitmask.rst | 2 +- doc/api/pypeit.bspline.bspline.rst | 2 +- doc/api/pypeit.bspline.rst | 2 +- doc/api/pypeit.bspline.util.rst | 2 +- doc/api/pypeit.cache.rst | 2 +- doc/api/pypeit.calibframe.rst | 2 +- doc/api/pypeit.calibrations.rst | 2 +- doc/api/pypeit.coadd1d.rst | 2 +- doc/api/pypeit.coadd2d.rst | 2 +- doc/api/pypeit.coadd3d.rst | 2 +- doc/api/pypeit.core.arc.rst | 2 +- doc/api/pypeit.core.atmextinction.rst | 2 +- doc/api/pypeit.core.basis.rst | 2 +- doc/api/pypeit.core.coadd.rst | 2 +- doc/api/pypeit.core.collate.rst | 2 +- doc/api/pypeit.core.combine.rst | 2 +- .../pypeit.core.convert_DEIMOSsavfiles.rst | 2 +- doc/api/pypeit.core.datacube.rst | 2 +- doc/api/pypeit.core.extract.rst | 2 +- doc/api/pypeit.core.findobj_skymask.rst | 2 +- doc/api/pypeit.core.fitting.rst | 2 +- doc/api/pypeit.core.flat.rst | 2 +- doc/api/pypeit.core.flexure.rst | 2 +- doc/api/pypeit.core.flux_calib.rst | 2 +- doc/api/pypeit.core.framematch.rst | 2 +- doc/api/pypeit.core.gui.edge_inspector.rst | 2 +- doc/api/pypeit.core.gui.gui_util.rst | 2 +- doc/api/pypeit.core.gui.identify.rst | 2 +- doc/api/pypeit.core.gui.object_find.rst | 2 +- doc/api/pypeit.core.gui.rst | 2 +- doc/api/pypeit.core.gui.skysub_regions.rst | 2 +- doc/api/pypeit.core.meta.rst | 2 +- doc/api/pypeit.core.moment.rst | 2 +- doc/api/pypeit.core.mosaic.rst | 2 +- doc/api/pypeit.core.parse.rst | 2 +- doc/api/pypeit.core.pca.rst | 2 +- doc/api/pypeit.core.pixels.rst | 2 +- doc/api/pypeit.core.plot.rst | 2 +- doc/api/pypeit.core.procimg.rst | 2 +- doc/api/pypeit.core.pydl.rst | 2 +- doc/api/pypeit.core.qa.rst | 2 +- doc/api/pypeit.core.rst | 2 +- doc/api/pypeit.core.scattlight.rst | 2 +- doc/api/pypeit.core.skysub.rst | 2 +- doc/api/pypeit.core.slitdesign_matching.rst | 2 +- doc/api/pypeit.core.spectrum.rst | 2 +- doc/api/pypeit.core.standard.rst | 2 +- doc/api/pypeit.core.telluric.rst | 2 +- doc/api/pypeit.core.trace.rst | 2 +- doc/api/pypeit.core.tracewave.rst | 2 +- doc/api/pypeit.core.transform.rst | 2 +- doc/api/pypeit.core.wave.rst | 2 +- doc/api/pypeit.core.wavecal.autoid.rst | 2 +- doc/api/pypeit.core.wavecal.defs.rst | 2 +- doc/api/pypeit.core.wavecal.echelle.rst | 2 +- .../pypeit.core.wavecal.kdtree_generator.rst | 2 +- doc/api/pypeit.core.wavecal.patterns.rst | 2 +- doc/api/pypeit.core.wavecal.rst | 2 +- doc/api/pypeit.core.wavecal.templates.rst | 2 +- doc/api/pypeit.core.wavecal.waveio.rst | 2 +- doc/api/pypeit.core.wavecal.wv_fitting.rst | 2 +- doc/api/pypeit.core.wavecal.wvutils.rst | 2 +- doc/api/pypeit.datamodel.rst | 2 +- doc/api/pypeit.display.display.rst | 2 +- doc/api/pypeit.display.rst | 2 +- doc/api/pypeit.display.slitwavelength.rst | 2 +- doc/api/pypeit.display.spec1dview.rst | 2 +- doc/api/pypeit.edgetrace.rst | 2 +- doc/api/pypeit.exceptions.rst | 2 +- doc/api/pypeit.extraction.rst | 2 +- doc/api/pypeit.find_objects.rst | 2 +- doc/api/pypeit.flatfield.rst | 2 +- doc/api/pypeit.fluxcalibrate.rst | 2 +- doc/api/pypeit.history.rst | 2 +- doc/api/pypeit.images.bitmaskarray.rst | 2 +- doc/api/pypeit.images.buildimage.rst | 2 +- doc/api/pypeit.images.combineimage.rst | 2 +- doc/api/pypeit.images.detector_container.rst | 2 +- doc/api/pypeit.images.imagebitmask.rst | 2 +- doc/api/pypeit.images.mosaic.rst | 2 +- doc/api/pypeit.images.pypeitimage.rst | 2 +- doc/api/pypeit.images.rawimage.rst | 2 +- doc/api/pypeit.images.rst | 2 +- doc/api/pypeit.inputfiles.rst | 2 +- doc/api/pypeit.io.rst | 2 +- doc/api/pypeit.logger.rst | 2 +- doc/api/pypeit.manual_extract.rst | 2 +- doc/api/pypeit.metadata.rst | 2 +- doc/api/pypeit.move_median.mmpy.rst | 2 +- doc/api/pypeit.move_median.move_median.rst | 2 +- doc/api/pypeit.move_median.rst | 2 +- doc/api/pypeit.onespec.rst | 2 +- doc/api/pypeit.orderstack.rst | 2 +- doc/api/pypeit.par.parset.rst | 2 +- doc/api/pypeit.par.pypeitpar.rst | 2 +- doc/api/pypeit.par.rst | 2 +- doc/api/pypeit.par.util.rst | 2 +- doc/api/pypeit.pypeit.rst | 2 +- doc/api/pypeit.pypeitdata.rst | 2 +- doc/api/pypeit.pypeitsetup.rst | 2 +- doc/api/pypeit.rst | 2 +- doc/api/pypeit.sampling.rst | 2 +- doc/api/pypeit.scattlight.rst | 2 +- doc/api/pypeit.scripts.arxiv_solution.rst | 2 +- doc/api/pypeit.scripts.cache_github_data.rst | 2 +- doc/api/pypeit.scripts.chk_alignments.rst | 2 +- doc/api/pypeit.scripts.chk_edges.rst | 2 +- doc/api/pypeit.scripts.chk_flats.rst | 2 +- doc/api/pypeit.scripts.chk_flexure.rst | 2 +- doc/api/pypeit.scripts.chk_for_calibs.rst | 2 +- doc/api/pypeit.scripts.chk_noise_1dspec.rst | 2 +- doc/api/pypeit.scripts.chk_noise_2dspec.rst | 2 +- doc/api/pypeit.scripts.chk_plugins.rst | 2 +- doc/api/pypeit.scripts.chk_scattlight.rst | 2 +- doc/api/pypeit.scripts.chk_tilts.rst | 2 +- doc/api/pypeit.scripts.chk_wavecalib.rst | 2 +- doc/api/pypeit.scripts.clean_cache.rst | 2 +- doc/api/pypeit.scripts.coadd_1dspec.rst | 2 +- doc/api/pypeit.scripts.coadd_2dspec.rst | 2 +- doc/api/pypeit.scripts.coadd_datacube.rst | 2 +- doc/api/pypeit.scripts.collate_1d.rst | 2 +- doc/api/pypeit.scripts.compare_sky.rst | 2 +- doc/api/pypeit.scripts.compile_wvarxiv.rst | 2 +- doc/api/pypeit.scripts.edge_inspector.rst | 2 +- doc/api/pypeit.scripts.extract_datacube.rst | 2 +- doc/api/pypeit.scripts.flux_calib.rst | 2 +- doc/api/pypeit.scripts.flux_setup.rst | 2 +- doc/api/pypeit.scripts.identify.rst | 2 +- .../pypeit.scripts.install_extinctfile.rst | 2 +- doc/api/pypeit.scripts.install_linelist.rst | 2 +- doc/api/pypeit.scripts.install_ql_calibs.rst | 2 +- doc/api/pypeit.scripts.install_telluric.rst | 2 +- doc/api/pypeit.scripts.install_wvarxiv.rst | 2 +- doc/api/pypeit.scripts.lowrdx_skyspec.rst | 2 +- doc/api/pypeit.scripts.multislit_flexure.rst | 2 +- doc/api/pypeit.scripts.obslog.rst | 2 +- doc/api/pypeit.scripts.parse_slits.rst | 2 +- doc/api/pypeit.scripts.print_bpm.rst | 2 +- doc/api/pypeit.scripts.qa_html.rst | 2 +- doc/api/pypeit.scripts.ql.rst | 2 +- doc/api/pypeit.scripts.rst | 2 +- doc/api/pypeit.scripts.run_pypeit.rst | 2 +- doc/api/pypeit.scripts.run_to_calibstep.rst | 2 +- doc/api/pypeit.scripts.scriptbase.rst | 2 +- doc/api/pypeit.scripts.sensfunc.rst | 2 +- doc/api/pypeit.scripts.setup.rst | 2 +- doc/api/pypeit.scripts.setup_coadd2d.rst | 2 +- doc/api/pypeit.scripts.show_1dspec.rst | 2 +- doc/api/pypeit.scripts.show_2dspec.rst | 2 +- doc/api/pypeit.scripts.show_arxiv.rst | 2 +- doc/api/pypeit.scripts.show_pixflat.rst | 2 +- doc/api/pypeit.scripts.show_wvcalib.rst | 2 +- doc/api/pypeit.scripts.skysub_regions.rst | 2 +- doc/api/pypeit.scripts.tellfit.rst | 2 +- doc/api/pypeit.scripts.trace_edges.rst | 2 +- doc/api/pypeit.scripts.version.rst | 2 +- doc/api/pypeit.scripts.view_fits.rst | 2 +- doc/api/pypeit.sensfilearchive.rst | 2 +- doc/api/pypeit.sensfunc.rst | 2 +- doc/api/pypeit.setup_gui.controller.rst | 2 +- doc/api/pypeit.setup_gui.dialog_helpers.rst | 2 +- doc/api/pypeit.setup_gui.model.rst | 2 +- doc/api/pypeit.setup_gui.rst | 2 +- doc/api/pypeit.setup_gui.text_viewer.rst | 2 +- doc/api/pypeit.setup_gui.view.rst | 2 +- doc/api/pypeit.slittrace.rst | 2 +- doc/api/pypeit.spec2dobj.rst | 2 +- doc/api/pypeit.specobj.rst | 2 +- doc/api/pypeit.specobjs.rst | 2 +- doc/api/pypeit.spectrographs.aat_uhrf.rst | 2 +- doc/api/pypeit.spectrographs.apf_levy.rst | 2 +- doc/api/pypeit.spectrographs.bok_bc.rst | 2 +- .../pypeit.spectrographs.gemini_flamingos.rst | 2 +- doc/api/pypeit.spectrographs.gemini_gmos.rst | 2 +- doc/api/pypeit.spectrographs.gemini_gnirs.rst | 2 +- doc/api/pypeit.spectrographs.gtc_osiris.rst | 2 +- doc/api/pypeit.spectrographs.jwst_nircam.rst | 2 +- doc/api/pypeit.spectrographs.jwst_nirspec.rst | 2 +- doc/api/pypeit.spectrographs.keck_deimos.rst | 2 +- doc/api/pypeit.spectrographs.keck_esi.rst | 2 +- doc/api/pypeit.spectrographs.keck_hires.rst | 2 +- doc/api/pypeit.spectrographs.keck_kcwi.rst | 2 +- doc/api/pypeit.spectrographs.keck_lris.rst | 2 +- doc/api/pypeit.spectrographs.keck_mosfire.rst | 2 +- doc/api/pypeit.spectrographs.keck_nires.rst | 2 +- doc/api/pypeit.spectrographs.keck_nirspec.rst | 2 +- doc/api/pypeit.spectrographs.lbt_luci.rst | 2 +- doc/api/pypeit.spectrographs.lbt_mods.rst | 2 +- doc/api/pypeit.spectrographs.ldt_deveny.rst | 2 +- .../pypeit.spectrographs.magellan_fire.rst | 2 +- .../pypeit.spectrographs.magellan_mage.rst | 2 +- doc/api/pypeit.spectrographs.mdm_modspec.rst | 2 +- doc/api/pypeit.spectrographs.mdm_osmos.rst | 2 +- doc/api/pypeit.spectrographs.mmt_binospec.rst | 2 +- .../pypeit.spectrographs.mmt_bluechannel.rst | 2 +- doc/api/pypeit.spectrographs.mmt_mmirs.rst | 2 +- doc/api/pypeit.spectrographs.not_alfosc.rst | 2 +- doc/api/pypeit.spectrographs.ntt_efosc2.rst | 2 +- doc/api/pypeit.spectrographs.opticalmodel.rst | 2 +- doc/api/pypeit.spectrographs.p200_dbsp.rst | 2 +- doc/api/pypeit.spectrographs.p200_ngps.rst | 2 +- doc/api/pypeit.spectrographs.p200_tspec.rst | 2 +- doc/api/pypeit.spectrographs.rst | 2 +- doc/api/pypeit.spectrographs.shane_kast.rst | 2 +- doc/api/pypeit.spectrographs.slitmask.rst | 2 +- doc/api/pypeit.spectrographs.soar_goodman.rst | 2 +- doc/api/pypeit.spectrographs.spectrograph.rst | 2 +- doc/api/pypeit.spectrographs.subaru_focas.rst | 2 +- doc/api/pypeit.spectrographs.tng_dolores.rst | 2 +- doc/api/pypeit.spectrographs.util.rst | 2 +- doc/api/pypeit.spectrographs.vlt_fors.rst | 2 +- doc/api/pypeit.spectrographs.vlt_sinfoni.rst | 2 +- doc/api/pypeit.spectrographs.vlt_xshooter.rst | 2 +- doc/api/pypeit.spectrographs.wht_isis.rst | 2 +- doc/api/pypeit.specutils.pypeit_loaders.rst | 2 +- doc/api/pypeit.specutils.rst | 2 +- doc/api/pypeit.telescopes.rst | 2 +- doc/api/pypeit.tracepca.rst | 2 +- doc/api/pypeit.utils.rst | 2 +- doc/api/pypeit.wavecalib.rst | 2 +- doc/api/pypeit.wavemodel.rst | 2 +- doc/api/pypeit.wavetilts.rst | 2 +- doc/conf.py | 12 ++++++++++-- doc/help/run_pypeit.rst | 2 +- doc/include/dependencies_table.rst | 2 +- pypeit/calibframe.py | 2 +- pypeit/logger.py | 19 +++++++++---------- pypeit/specobjs.py | 19 ++++++++----------- pypeit/spectrographs/p200_dbsp.py | 12 +++++------- pypeit/spectrographs/p200_ngps.py | 10 ++++------ pyproject.toml | 10 ++++++---- 233 files changed, 269 insertions(+), 267 deletions(-) diff --git a/doc/api/pypeit.alignframe.rst b/doc/api/pypeit.alignframe.rst index 84c2e09b99..615ec00d90 100644 --- a/doc/api/pypeit.alignframe.rst +++ b/doc/api/pypeit.alignframe.rst @@ -4,5 +4,5 @@ pypeit.alignframe module .. automodule:: pypeit.alignframe :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.archive.rst b/doc/api/pypeit.archive.rst index 8e2b7601d6..264a407430 100644 --- a/doc/api/pypeit.archive.rst +++ b/doc/api/pypeit.archive.rst @@ -4,5 +4,5 @@ pypeit.archive module .. automodule:: pypeit.archive :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.bitmask.rst b/doc/api/pypeit.bitmask.rst index 117923ef64..c2cb4b0458 100644 --- a/doc/api/pypeit.bitmask.rst +++ b/doc/api/pypeit.bitmask.rst @@ -4,5 +4,5 @@ pypeit.bitmask module .. automodule:: pypeit.bitmask :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.bspline.bspline.rst b/doc/api/pypeit.bspline.bspline.rst index 9ed6fc5fb1..283eec1ed4 100644 --- a/doc/api/pypeit.bspline.bspline.rst +++ b/doc/api/pypeit.bspline.bspline.rst @@ -4,5 +4,5 @@ pypeit.bspline.bspline module .. automodule:: pypeit.bspline.bspline :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.bspline.rst b/doc/api/pypeit.bspline.rst index ec6d32e890..209cb47557 100644 --- a/doc/api/pypeit.bspline.rst +++ b/doc/api/pypeit.bspline.rst @@ -16,5 +16,5 @@ Module contents .. automodule:: pypeit.bspline :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.bspline.util.rst b/doc/api/pypeit.bspline.util.rst index 8f69625b8a..86f3cf56a8 100644 --- a/doc/api/pypeit.bspline.util.rst +++ b/doc/api/pypeit.bspline.util.rst @@ -4,5 +4,5 @@ pypeit.bspline.util module .. automodule:: pypeit.bspline.util :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.cache.rst b/doc/api/pypeit.cache.rst index aafab8a328..35387481de 100644 --- a/doc/api/pypeit.cache.rst +++ b/doc/api/pypeit.cache.rst @@ -4,5 +4,5 @@ pypeit.cache module .. automodule:: pypeit.cache :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.calibframe.rst b/doc/api/pypeit.calibframe.rst index 3c8fc7e3ea..95f08625e9 100644 --- a/doc/api/pypeit.calibframe.rst +++ b/doc/api/pypeit.calibframe.rst @@ -4,5 +4,5 @@ pypeit.calibframe module .. automodule:: pypeit.calibframe :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.calibrations.rst b/doc/api/pypeit.calibrations.rst index 5720bddb0f..ec7b03135d 100644 --- a/doc/api/pypeit.calibrations.rst +++ b/doc/api/pypeit.calibrations.rst @@ -4,5 +4,5 @@ pypeit.calibrations module .. automodule:: pypeit.calibrations :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.coadd1d.rst b/doc/api/pypeit.coadd1d.rst index 6af070ab54..da591347b3 100644 --- a/doc/api/pypeit.coadd1d.rst +++ b/doc/api/pypeit.coadd1d.rst @@ -4,5 +4,5 @@ pypeit.coadd1d module .. automodule:: pypeit.coadd1d :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.coadd2d.rst b/doc/api/pypeit.coadd2d.rst index 1c42ab32bb..981f45c378 100644 --- a/doc/api/pypeit.coadd2d.rst +++ b/doc/api/pypeit.coadd2d.rst @@ -4,5 +4,5 @@ pypeit.coadd2d module .. automodule:: pypeit.coadd2d :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.coadd3d.rst b/doc/api/pypeit.coadd3d.rst index f6d6dbc5ac..aca179f489 100644 --- a/doc/api/pypeit.coadd3d.rst +++ b/doc/api/pypeit.coadd3d.rst @@ -4,5 +4,5 @@ pypeit.coadd3d module .. automodule:: pypeit.coadd3d :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.arc.rst b/doc/api/pypeit.core.arc.rst index f5ef7a388f..5374a01e11 100644 --- a/doc/api/pypeit.core.arc.rst +++ b/doc/api/pypeit.core.arc.rst @@ -4,5 +4,5 @@ pypeit.core.arc module .. automodule:: pypeit.core.arc :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.atmextinction.rst b/doc/api/pypeit.core.atmextinction.rst index 21b6f6bb56..eccd614e29 100644 --- a/doc/api/pypeit.core.atmextinction.rst +++ b/doc/api/pypeit.core.atmextinction.rst @@ -4,5 +4,5 @@ pypeit.core.atmextinction module .. automodule:: pypeit.core.atmextinction :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.basis.rst b/doc/api/pypeit.core.basis.rst index ae76d09c44..dc2ebd322c 100644 --- a/doc/api/pypeit.core.basis.rst +++ b/doc/api/pypeit.core.basis.rst @@ -4,5 +4,5 @@ pypeit.core.basis module .. automodule:: pypeit.core.basis :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.coadd.rst b/doc/api/pypeit.core.coadd.rst index 80e352d9f7..bc90b8e397 100644 --- a/doc/api/pypeit.core.coadd.rst +++ b/doc/api/pypeit.core.coadd.rst @@ -4,5 +4,5 @@ pypeit.core.coadd module .. automodule:: pypeit.core.coadd :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.collate.rst b/doc/api/pypeit.core.collate.rst index 070124a927..fe829b5238 100644 --- a/doc/api/pypeit.core.collate.rst +++ b/doc/api/pypeit.core.collate.rst @@ -4,5 +4,5 @@ pypeit.core.collate module .. automodule:: pypeit.core.collate :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.combine.rst b/doc/api/pypeit.core.combine.rst index 877e2f6cf0..0351f48fca 100644 --- a/doc/api/pypeit.core.combine.rst +++ b/doc/api/pypeit.core.combine.rst @@ -4,5 +4,5 @@ pypeit.core.combine module .. automodule:: pypeit.core.combine :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.convert_DEIMOSsavfiles.rst b/doc/api/pypeit.core.convert_DEIMOSsavfiles.rst index 8b6dfeb745..c7e47cbb3d 100644 --- a/doc/api/pypeit.core.convert_DEIMOSsavfiles.rst +++ b/doc/api/pypeit.core.convert_DEIMOSsavfiles.rst @@ -4,5 +4,5 @@ pypeit.core.convert\_DEIMOSsavfiles module .. automodule:: pypeit.core.convert_DEIMOSsavfiles :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.datacube.rst b/doc/api/pypeit.core.datacube.rst index cf07b5ad0c..dad6f1c141 100644 --- a/doc/api/pypeit.core.datacube.rst +++ b/doc/api/pypeit.core.datacube.rst @@ -4,5 +4,5 @@ pypeit.core.datacube module .. automodule:: pypeit.core.datacube :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.extract.rst b/doc/api/pypeit.core.extract.rst index 5af4e87bbe..12ea740369 100644 --- a/doc/api/pypeit.core.extract.rst +++ b/doc/api/pypeit.core.extract.rst @@ -4,5 +4,5 @@ pypeit.core.extract module .. automodule:: pypeit.core.extract :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.findobj_skymask.rst b/doc/api/pypeit.core.findobj_skymask.rst index 0e512a7260..a9e3ca2576 100644 --- a/doc/api/pypeit.core.findobj_skymask.rst +++ b/doc/api/pypeit.core.findobj_skymask.rst @@ -4,5 +4,5 @@ pypeit.core.findobj\_skymask module .. automodule:: pypeit.core.findobj_skymask :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.fitting.rst b/doc/api/pypeit.core.fitting.rst index 3dce3b2da3..6923840242 100644 --- a/doc/api/pypeit.core.fitting.rst +++ b/doc/api/pypeit.core.fitting.rst @@ -4,5 +4,5 @@ pypeit.core.fitting module .. automodule:: pypeit.core.fitting :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.flat.rst b/doc/api/pypeit.core.flat.rst index b4ee1f8597..f84feb8cb7 100644 --- a/doc/api/pypeit.core.flat.rst +++ b/doc/api/pypeit.core.flat.rst @@ -4,5 +4,5 @@ pypeit.core.flat module .. automodule:: pypeit.core.flat :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.flexure.rst b/doc/api/pypeit.core.flexure.rst index 59f883767c..e39f7ff2cb 100644 --- a/doc/api/pypeit.core.flexure.rst +++ b/doc/api/pypeit.core.flexure.rst @@ -4,5 +4,5 @@ pypeit.core.flexure module .. automodule:: pypeit.core.flexure :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.flux_calib.rst b/doc/api/pypeit.core.flux_calib.rst index 91a11f1ff9..69acf72c89 100644 --- a/doc/api/pypeit.core.flux_calib.rst +++ b/doc/api/pypeit.core.flux_calib.rst @@ -4,5 +4,5 @@ pypeit.core.flux\_calib module .. automodule:: pypeit.core.flux_calib :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.framematch.rst b/doc/api/pypeit.core.framematch.rst index 71619e6be8..3e860687ba 100644 --- a/doc/api/pypeit.core.framematch.rst +++ b/doc/api/pypeit.core.framematch.rst @@ -4,5 +4,5 @@ pypeit.core.framematch module .. automodule:: pypeit.core.framematch :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.gui.edge_inspector.rst b/doc/api/pypeit.core.gui.edge_inspector.rst index cde51e8e23..31622f4014 100644 --- a/doc/api/pypeit.core.gui.edge_inspector.rst +++ b/doc/api/pypeit.core.gui.edge_inspector.rst @@ -4,5 +4,5 @@ pypeit.core.gui.edge\_inspector module .. automodule:: pypeit.core.gui.edge_inspector :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.gui.gui_util.rst b/doc/api/pypeit.core.gui.gui_util.rst index fb7313f235..7cb1843b4a 100644 --- a/doc/api/pypeit.core.gui.gui_util.rst +++ b/doc/api/pypeit.core.gui.gui_util.rst @@ -4,5 +4,5 @@ pypeit.core.gui.gui\_util module .. automodule:: pypeit.core.gui.gui_util :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.gui.identify.rst b/doc/api/pypeit.core.gui.identify.rst index 48e0081520..810fd12dee 100644 --- a/doc/api/pypeit.core.gui.identify.rst +++ b/doc/api/pypeit.core.gui.identify.rst @@ -4,5 +4,5 @@ pypeit.core.gui.identify module .. automodule:: pypeit.core.gui.identify :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.gui.object_find.rst b/doc/api/pypeit.core.gui.object_find.rst index 34e3e5a5d5..fac1b76db3 100644 --- a/doc/api/pypeit.core.gui.object_find.rst +++ b/doc/api/pypeit.core.gui.object_find.rst @@ -4,5 +4,5 @@ pypeit.core.gui.object\_find module .. automodule:: pypeit.core.gui.object_find :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.gui.rst b/doc/api/pypeit.core.gui.rst index bb25a0fcdb..74ccc7f26b 100644 --- a/doc/api/pypeit.core.gui.rst +++ b/doc/api/pypeit.core.gui.rst @@ -19,5 +19,5 @@ Module contents .. automodule:: pypeit.core.gui :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.gui.skysub_regions.rst b/doc/api/pypeit.core.gui.skysub_regions.rst index 96f871fb93..2cc2a61b24 100644 --- a/doc/api/pypeit.core.gui.skysub_regions.rst +++ b/doc/api/pypeit.core.gui.skysub_regions.rst @@ -4,5 +4,5 @@ pypeit.core.gui.skysub\_regions module .. automodule:: pypeit.core.gui.skysub_regions :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.meta.rst b/doc/api/pypeit.core.meta.rst index ff3e2420f9..283eefb51d 100644 --- a/doc/api/pypeit.core.meta.rst +++ b/doc/api/pypeit.core.meta.rst @@ -4,5 +4,5 @@ pypeit.core.meta module .. automodule:: pypeit.core.meta :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.moment.rst b/doc/api/pypeit.core.moment.rst index a0401ebf0b..3e3daff15f 100644 --- a/doc/api/pypeit.core.moment.rst +++ b/doc/api/pypeit.core.moment.rst @@ -4,5 +4,5 @@ pypeit.core.moment module .. automodule:: pypeit.core.moment :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.mosaic.rst b/doc/api/pypeit.core.mosaic.rst index 539860d09c..c4097b96d3 100644 --- a/doc/api/pypeit.core.mosaic.rst +++ b/doc/api/pypeit.core.mosaic.rst @@ -4,5 +4,5 @@ pypeit.core.mosaic module .. automodule:: pypeit.core.mosaic :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.parse.rst b/doc/api/pypeit.core.parse.rst index 93fb74347f..5179e04993 100644 --- a/doc/api/pypeit.core.parse.rst +++ b/doc/api/pypeit.core.parse.rst @@ -4,5 +4,5 @@ pypeit.core.parse module .. automodule:: pypeit.core.parse :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.pca.rst b/doc/api/pypeit.core.pca.rst index dc61f90e7a..3434baa974 100644 --- a/doc/api/pypeit.core.pca.rst +++ b/doc/api/pypeit.core.pca.rst @@ -4,5 +4,5 @@ pypeit.core.pca module .. automodule:: pypeit.core.pca :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.pixels.rst b/doc/api/pypeit.core.pixels.rst index ce1c64b5f2..307ba52cc9 100644 --- a/doc/api/pypeit.core.pixels.rst +++ b/doc/api/pypeit.core.pixels.rst @@ -4,5 +4,5 @@ pypeit.core.pixels module .. automodule:: pypeit.core.pixels :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.plot.rst b/doc/api/pypeit.core.plot.rst index 023be1d416..24d707104a 100644 --- a/doc/api/pypeit.core.plot.rst +++ b/doc/api/pypeit.core.plot.rst @@ -4,5 +4,5 @@ pypeit.core.plot module .. automodule:: pypeit.core.plot :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.procimg.rst b/doc/api/pypeit.core.procimg.rst index 93d92dd703..4a080bd41d 100644 --- a/doc/api/pypeit.core.procimg.rst +++ b/doc/api/pypeit.core.procimg.rst @@ -4,5 +4,5 @@ pypeit.core.procimg module .. automodule:: pypeit.core.procimg :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.pydl.rst b/doc/api/pypeit.core.pydl.rst index 333878e97b..69b6b07628 100644 --- a/doc/api/pypeit.core.pydl.rst +++ b/doc/api/pypeit.core.pydl.rst @@ -4,5 +4,5 @@ pypeit.core.pydl module .. automodule:: pypeit.core.pydl :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.qa.rst b/doc/api/pypeit.core.qa.rst index d0ef56463c..21c2069752 100644 --- a/doc/api/pypeit.core.qa.rst +++ b/doc/api/pypeit.core.qa.rst @@ -4,5 +4,5 @@ pypeit.core.qa module .. automodule:: pypeit.core.qa :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.rst b/doc/api/pypeit.core.rst index be869a634d..a3adbc77eb 100644 --- a/doc/api/pypeit.core.rst +++ b/doc/api/pypeit.core.rst @@ -58,5 +58,5 @@ Module contents .. automodule:: pypeit.core :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.scattlight.rst b/doc/api/pypeit.core.scattlight.rst index a4cb0d8d77..206c805dbd 100644 --- a/doc/api/pypeit.core.scattlight.rst +++ b/doc/api/pypeit.core.scattlight.rst @@ -4,5 +4,5 @@ pypeit.core.scattlight module .. automodule:: pypeit.core.scattlight :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.skysub.rst b/doc/api/pypeit.core.skysub.rst index 0d29c42b4f..a324ff4ddb 100644 --- a/doc/api/pypeit.core.skysub.rst +++ b/doc/api/pypeit.core.skysub.rst @@ -4,5 +4,5 @@ pypeit.core.skysub module .. automodule:: pypeit.core.skysub :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.slitdesign_matching.rst b/doc/api/pypeit.core.slitdesign_matching.rst index 14e98d8ca1..5739157ee5 100644 --- a/doc/api/pypeit.core.slitdesign_matching.rst +++ b/doc/api/pypeit.core.slitdesign_matching.rst @@ -4,5 +4,5 @@ pypeit.core.slitdesign\_matching module .. automodule:: pypeit.core.slitdesign_matching :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.spectrum.rst b/doc/api/pypeit.core.spectrum.rst index 443c33effe..19e5f75c81 100644 --- a/doc/api/pypeit.core.spectrum.rst +++ b/doc/api/pypeit.core.spectrum.rst @@ -4,5 +4,5 @@ pypeit.core.spectrum module .. automodule:: pypeit.core.spectrum :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.standard.rst b/doc/api/pypeit.core.standard.rst index f4935d321c..0027131e07 100644 --- a/doc/api/pypeit.core.standard.rst +++ b/doc/api/pypeit.core.standard.rst @@ -4,5 +4,5 @@ pypeit.core.standard module .. automodule:: pypeit.core.standard :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.telluric.rst b/doc/api/pypeit.core.telluric.rst index 815d1c518f..ef6f244c09 100644 --- a/doc/api/pypeit.core.telluric.rst +++ b/doc/api/pypeit.core.telluric.rst @@ -4,5 +4,5 @@ pypeit.core.telluric module .. automodule:: pypeit.core.telluric :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.trace.rst b/doc/api/pypeit.core.trace.rst index 3e7c041eb1..c3c4888edf 100644 --- a/doc/api/pypeit.core.trace.rst +++ b/doc/api/pypeit.core.trace.rst @@ -4,5 +4,5 @@ pypeit.core.trace module .. automodule:: pypeit.core.trace :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.tracewave.rst b/doc/api/pypeit.core.tracewave.rst index 46bbd45844..9b4053aa05 100644 --- a/doc/api/pypeit.core.tracewave.rst +++ b/doc/api/pypeit.core.tracewave.rst @@ -4,5 +4,5 @@ pypeit.core.tracewave module .. automodule:: pypeit.core.tracewave :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.transform.rst b/doc/api/pypeit.core.transform.rst index 78471138a1..c67f14bdcd 100644 --- a/doc/api/pypeit.core.transform.rst +++ b/doc/api/pypeit.core.transform.rst @@ -4,5 +4,5 @@ pypeit.core.transform module .. automodule:: pypeit.core.transform :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.wave.rst b/doc/api/pypeit.core.wave.rst index a05a664abe..fcd37adbd8 100644 --- a/doc/api/pypeit.core.wave.rst +++ b/doc/api/pypeit.core.wave.rst @@ -4,5 +4,5 @@ pypeit.core.wave module .. automodule:: pypeit.core.wave :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.wavecal.autoid.rst b/doc/api/pypeit.core.wavecal.autoid.rst index e5f8d56941..fbfd7292e4 100644 --- a/doc/api/pypeit.core.wavecal.autoid.rst +++ b/doc/api/pypeit.core.wavecal.autoid.rst @@ -4,5 +4,5 @@ pypeit.core.wavecal.autoid module .. automodule:: pypeit.core.wavecal.autoid :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.wavecal.defs.rst b/doc/api/pypeit.core.wavecal.defs.rst index 3938431f5e..9fca638707 100644 --- a/doc/api/pypeit.core.wavecal.defs.rst +++ b/doc/api/pypeit.core.wavecal.defs.rst @@ -4,5 +4,5 @@ pypeit.core.wavecal.defs module .. automodule:: pypeit.core.wavecal.defs :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.wavecal.echelle.rst b/doc/api/pypeit.core.wavecal.echelle.rst index 2a659d1d1b..f7edab12d9 100644 --- a/doc/api/pypeit.core.wavecal.echelle.rst +++ b/doc/api/pypeit.core.wavecal.echelle.rst @@ -4,5 +4,5 @@ pypeit.core.wavecal.echelle module .. automodule:: pypeit.core.wavecal.echelle :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.wavecal.kdtree_generator.rst b/doc/api/pypeit.core.wavecal.kdtree_generator.rst index f4e35c3527..23895b115a 100644 --- a/doc/api/pypeit.core.wavecal.kdtree_generator.rst +++ b/doc/api/pypeit.core.wavecal.kdtree_generator.rst @@ -4,5 +4,5 @@ pypeit.core.wavecal.kdtree\_generator module .. automodule:: pypeit.core.wavecal.kdtree_generator :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.wavecal.patterns.rst b/doc/api/pypeit.core.wavecal.patterns.rst index 860eaa2a70..542fc1d70f 100644 --- a/doc/api/pypeit.core.wavecal.patterns.rst +++ b/doc/api/pypeit.core.wavecal.patterns.rst @@ -4,5 +4,5 @@ pypeit.core.wavecal.patterns module .. automodule:: pypeit.core.wavecal.patterns :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.wavecal.rst b/doc/api/pypeit.core.wavecal.rst index 87e4afadf7..11bb552bb6 100644 --- a/doc/api/pypeit.core.wavecal.rst +++ b/doc/api/pypeit.core.wavecal.rst @@ -23,5 +23,5 @@ Module contents .. automodule:: pypeit.core.wavecal :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.wavecal.templates.rst b/doc/api/pypeit.core.wavecal.templates.rst index 4e1d5cab21..487c556058 100644 --- a/doc/api/pypeit.core.wavecal.templates.rst +++ b/doc/api/pypeit.core.wavecal.templates.rst @@ -4,5 +4,5 @@ pypeit.core.wavecal.templates module .. automodule:: pypeit.core.wavecal.templates :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.wavecal.waveio.rst b/doc/api/pypeit.core.wavecal.waveio.rst index 4539fd7481..44a6d5326d 100644 --- a/doc/api/pypeit.core.wavecal.waveio.rst +++ b/doc/api/pypeit.core.wavecal.waveio.rst @@ -4,5 +4,5 @@ pypeit.core.wavecal.waveio module .. automodule:: pypeit.core.wavecal.waveio :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.wavecal.wv_fitting.rst b/doc/api/pypeit.core.wavecal.wv_fitting.rst index 7761d76aca..d4c8802f58 100644 --- a/doc/api/pypeit.core.wavecal.wv_fitting.rst +++ b/doc/api/pypeit.core.wavecal.wv_fitting.rst @@ -4,5 +4,5 @@ pypeit.core.wavecal.wv\_fitting module .. automodule:: pypeit.core.wavecal.wv_fitting :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.core.wavecal.wvutils.rst b/doc/api/pypeit.core.wavecal.wvutils.rst index 8b9472c0a3..fd6031be18 100644 --- a/doc/api/pypeit.core.wavecal.wvutils.rst +++ b/doc/api/pypeit.core.wavecal.wvutils.rst @@ -4,5 +4,5 @@ pypeit.core.wavecal.wvutils module .. automodule:: pypeit.core.wavecal.wvutils :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.datamodel.rst b/doc/api/pypeit.datamodel.rst index d1db02666b..536fdf9485 100644 --- a/doc/api/pypeit.datamodel.rst +++ b/doc/api/pypeit.datamodel.rst @@ -4,5 +4,5 @@ pypeit.datamodel module .. automodule:: pypeit.datamodel :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.display.display.rst b/doc/api/pypeit.display.display.rst index 25f2924ccb..8369b666bf 100644 --- a/doc/api/pypeit.display.display.rst +++ b/doc/api/pypeit.display.display.rst @@ -4,5 +4,5 @@ pypeit.display.display module .. automodule:: pypeit.display.display :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.display.rst b/doc/api/pypeit.display.rst index 7582d19dcb..32d358d331 100644 --- a/doc/api/pypeit.display.rst +++ b/doc/api/pypeit.display.rst @@ -17,5 +17,5 @@ Module contents .. automodule:: pypeit.display :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.display.slitwavelength.rst b/doc/api/pypeit.display.slitwavelength.rst index 055705fd4e..e8b1bd2e84 100644 --- a/doc/api/pypeit.display.slitwavelength.rst +++ b/doc/api/pypeit.display.slitwavelength.rst @@ -4,5 +4,5 @@ pypeit.display.slitwavelength module .. automodule:: pypeit.display.slitwavelength :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.display.spec1dview.rst b/doc/api/pypeit.display.spec1dview.rst index 75af322a42..4e1ba0299e 100644 --- a/doc/api/pypeit.display.spec1dview.rst +++ b/doc/api/pypeit.display.spec1dview.rst @@ -4,5 +4,5 @@ pypeit.display.spec1dview module .. automodule:: pypeit.display.spec1dview :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.edgetrace.rst b/doc/api/pypeit.edgetrace.rst index 7e3b76235e..909e843561 100644 --- a/doc/api/pypeit.edgetrace.rst +++ b/doc/api/pypeit.edgetrace.rst @@ -4,5 +4,5 @@ pypeit.edgetrace module .. automodule:: pypeit.edgetrace :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.exceptions.rst b/doc/api/pypeit.exceptions.rst index de33695d16..10d9f15af8 100644 --- a/doc/api/pypeit.exceptions.rst +++ b/doc/api/pypeit.exceptions.rst @@ -4,5 +4,5 @@ pypeit.exceptions module .. automodule:: pypeit.exceptions :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.extraction.rst b/doc/api/pypeit.extraction.rst index 2eaa1514a1..89452fadfd 100644 --- a/doc/api/pypeit.extraction.rst +++ b/doc/api/pypeit.extraction.rst @@ -4,5 +4,5 @@ pypeit.extraction module .. automodule:: pypeit.extraction :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.find_objects.rst b/doc/api/pypeit.find_objects.rst index c5dc028ecb..123e54321f 100644 --- a/doc/api/pypeit.find_objects.rst +++ b/doc/api/pypeit.find_objects.rst @@ -4,5 +4,5 @@ pypeit.find\_objects module .. automodule:: pypeit.find_objects :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.flatfield.rst b/doc/api/pypeit.flatfield.rst index 362a068033..33942734d7 100644 --- a/doc/api/pypeit.flatfield.rst +++ b/doc/api/pypeit.flatfield.rst @@ -4,5 +4,5 @@ pypeit.flatfield module .. automodule:: pypeit.flatfield :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.fluxcalibrate.rst b/doc/api/pypeit.fluxcalibrate.rst index c01b4edcd5..e179d53e59 100644 --- a/doc/api/pypeit.fluxcalibrate.rst +++ b/doc/api/pypeit.fluxcalibrate.rst @@ -4,5 +4,5 @@ pypeit.fluxcalibrate module .. automodule:: pypeit.fluxcalibrate :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.history.rst b/doc/api/pypeit.history.rst index e38d3798f6..6d703f60b8 100644 --- a/doc/api/pypeit.history.rst +++ b/doc/api/pypeit.history.rst @@ -4,5 +4,5 @@ pypeit.history module .. automodule:: pypeit.history :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.images.bitmaskarray.rst b/doc/api/pypeit.images.bitmaskarray.rst index dbd79716cd..271183a03c 100644 --- a/doc/api/pypeit.images.bitmaskarray.rst +++ b/doc/api/pypeit.images.bitmaskarray.rst @@ -4,5 +4,5 @@ pypeit.images.bitmaskarray module .. automodule:: pypeit.images.bitmaskarray :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.images.buildimage.rst b/doc/api/pypeit.images.buildimage.rst index fc88ced1ec..35ee0fa320 100644 --- a/doc/api/pypeit.images.buildimage.rst +++ b/doc/api/pypeit.images.buildimage.rst @@ -4,5 +4,5 @@ pypeit.images.buildimage module .. automodule:: pypeit.images.buildimage :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.images.combineimage.rst b/doc/api/pypeit.images.combineimage.rst index 3efa5b8e71..4a9f9dc84d 100644 --- a/doc/api/pypeit.images.combineimage.rst +++ b/doc/api/pypeit.images.combineimage.rst @@ -4,5 +4,5 @@ pypeit.images.combineimage module .. automodule:: pypeit.images.combineimage :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.images.detector_container.rst b/doc/api/pypeit.images.detector_container.rst index 46c35f241b..06e67883a7 100644 --- a/doc/api/pypeit.images.detector_container.rst +++ b/doc/api/pypeit.images.detector_container.rst @@ -4,5 +4,5 @@ pypeit.images.detector\_container module .. automodule:: pypeit.images.detector_container :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.images.imagebitmask.rst b/doc/api/pypeit.images.imagebitmask.rst index f42a35bf98..a3f114e909 100644 --- a/doc/api/pypeit.images.imagebitmask.rst +++ b/doc/api/pypeit.images.imagebitmask.rst @@ -4,5 +4,5 @@ pypeit.images.imagebitmask module .. automodule:: pypeit.images.imagebitmask :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.images.mosaic.rst b/doc/api/pypeit.images.mosaic.rst index 1832094d51..60785f5dcb 100644 --- a/doc/api/pypeit.images.mosaic.rst +++ b/doc/api/pypeit.images.mosaic.rst @@ -4,5 +4,5 @@ pypeit.images.mosaic module .. automodule:: pypeit.images.mosaic :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.images.pypeitimage.rst b/doc/api/pypeit.images.pypeitimage.rst index 7448aa4d8a..739833f861 100644 --- a/doc/api/pypeit.images.pypeitimage.rst +++ b/doc/api/pypeit.images.pypeitimage.rst @@ -4,5 +4,5 @@ pypeit.images.pypeitimage module .. automodule:: pypeit.images.pypeitimage :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.images.rawimage.rst b/doc/api/pypeit.images.rawimage.rst index 96d3f76f8d..fb37d28b99 100644 --- a/doc/api/pypeit.images.rawimage.rst +++ b/doc/api/pypeit.images.rawimage.rst @@ -4,5 +4,5 @@ pypeit.images.rawimage module .. automodule:: pypeit.images.rawimage :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.images.rst b/doc/api/pypeit.images.rst index 5c91411ff8..6d7a2108dd 100644 --- a/doc/api/pypeit.images.rst +++ b/doc/api/pypeit.images.rst @@ -22,5 +22,5 @@ Module contents .. automodule:: pypeit.images :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.inputfiles.rst b/doc/api/pypeit.inputfiles.rst index f4cbaf7243..d41d2b1d51 100644 --- a/doc/api/pypeit.inputfiles.rst +++ b/doc/api/pypeit.inputfiles.rst @@ -4,5 +4,5 @@ pypeit.inputfiles module .. automodule:: pypeit.inputfiles :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.io.rst b/doc/api/pypeit.io.rst index 1d6d4f5b7a..d9fe9ab1c3 100644 --- a/doc/api/pypeit.io.rst +++ b/doc/api/pypeit.io.rst @@ -4,5 +4,5 @@ pypeit.io module .. automodule:: pypeit.io :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.logger.rst b/doc/api/pypeit.logger.rst index 3866c64c1a..c3cdfbe745 100644 --- a/doc/api/pypeit.logger.rst +++ b/doc/api/pypeit.logger.rst @@ -4,5 +4,5 @@ pypeit.logger module .. automodule:: pypeit.logger :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.manual_extract.rst b/doc/api/pypeit.manual_extract.rst index 163ef5b563..276c31ab33 100644 --- a/doc/api/pypeit.manual_extract.rst +++ b/doc/api/pypeit.manual_extract.rst @@ -4,5 +4,5 @@ pypeit.manual\_extract module .. automodule:: pypeit.manual_extract :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.metadata.rst b/doc/api/pypeit.metadata.rst index 8d03b2542a..357de7dffc 100644 --- a/doc/api/pypeit.metadata.rst +++ b/doc/api/pypeit.metadata.rst @@ -4,5 +4,5 @@ pypeit.metadata module .. automodule:: pypeit.metadata :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.move_median.mmpy.rst b/doc/api/pypeit.move_median.mmpy.rst index bffa1c8169..c1182bf148 100644 --- a/doc/api/pypeit.move_median.mmpy.rst +++ b/doc/api/pypeit.move_median.mmpy.rst @@ -4,5 +4,5 @@ pypeit.move\_median.mmpy module .. automodule:: pypeit.move_median.mmpy :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.move_median.move_median.rst b/doc/api/pypeit.move_median.move_median.rst index 88aac4dce3..b898760bfa 100644 --- a/doc/api/pypeit.move_median.move_median.rst +++ b/doc/api/pypeit.move_median.move_median.rst @@ -4,5 +4,5 @@ pypeit.move\_median.move\_median module .. automodule:: pypeit.move_median.move_median :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.move_median.rst b/doc/api/pypeit.move_median.rst index 854ac14d1f..7e619b2728 100644 --- a/doc/api/pypeit.move_median.rst +++ b/doc/api/pypeit.move_median.rst @@ -16,5 +16,5 @@ Module contents .. automodule:: pypeit.move_median :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.onespec.rst b/doc/api/pypeit.onespec.rst index efa9a11872..bdbc3d1bd9 100644 --- a/doc/api/pypeit.onespec.rst +++ b/doc/api/pypeit.onespec.rst @@ -4,5 +4,5 @@ pypeit.onespec module .. automodule:: pypeit.onespec :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.orderstack.rst b/doc/api/pypeit.orderstack.rst index b82f53d120..377c012a82 100644 --- a/doc/api/pypeit.orderstack.rst +++ b/doc/api/pypeit.orderstack.rst @@ -4,5 +4,5 @@ pypeit.orderstack module .. automodule:: pypeit.orderstack :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.par.parset.rst b/doc/api/pypeit.par.parset.rst index 51af2207b5..2625b685c6 100644 --- a/doc/api/pypeit.par.parset.rst +++ b/doc/api/pypeit.par.parset.rst @@ -4,5 +4,5 @@ pypeit.par.parset module .. automodule:: pypeit.par.parset :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.par.pypeitpar.rst b/doc/api/pypeit.par.pypeitpar.rst index 1a417a5733..f768d0c5df 100644 --- a/doc/api/pypeit.par.pypeitpar.rst +++ b/doc/api/pypeit.par.pypeitpar.rst @@ -4,5 +4,5 @@ pypeit.par.pypeitpar module .. automodule:: pypeit.par.pypeitpar :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.par.rst b/doc/api/pypeit.par.rst index 820da9ac6e..a1cb78198e 100644 --- a/doc/api/pypeit.par.rst +++ b/doc/api/pypeit.par.rst @@ -17,5 +17,5 @@ Module contents .. automodule:: pypeit.par :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.par.util.rst b/doc/api/pypeit.par.util.rst index a12153f198..41c0759e62 100644 --- a/doc/api/pypeit.par.util.rst +++ b/doc/api/pypeit.par.util.rst @@ -4,5 +4,5 @@ pypeit.par.util module .. automodule:: pypeit.par.util :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.pypeit.rst b/doc/api/pypeit.pypeit.rst index b965d28b5c..2ea06e4d42 100644 --- a/doc/api/pypeit.pypeit.rst +++ b/doc/api/pypeit.pypeit.rst @@ -4,5 +4,5 @@ pypeit.pypeit module .. automodule:: pypeit.pypeit :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.pypeitdata.rst b/doc/api/pypeit.pypeitdata.rst index 98416058d9..7d161849d8 100644 --- a/doc/api/pypeit.pypeitdata.rst +++ b/doc/api/pypeit.pypeitdata.rst @@ -4,5 +4,5 @@ pypeit.pypeitdata module .. automodule:: pypeit.pypeitdata :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.pypeitsetup.rst b/doc/api/pypeit.pypeitsetup.rst index f5f1f3600e..2807e4e001 100644 --- a/doc/api/pypeit.pypeitsetup.rst +++ b/doc/api/pypeit.pypeitsetup.rst @@ -4,5 +4,5 @@ pypeit.pypeitsetup module .. automodule:: pypeit.pypeitsetup :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.rst b/doc/api/pypeit.rst index bd0295ee9a..4d45281c12 100644 --- a/doc/api/pypeit.rst +++ b/doc/api/pypeit.rst @@ -72,5 +72,5 @@ Module contents .. automodule:: pypeit :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.sampling.rst b/doc/api/pypeit.sampling.rst index 24f1cdcda7..6304b16e74 100644 --- a/doc/api/pypeit.sampling.rst +++ b/doc/api/pypeit.sampling.rst @@ -4,5 +4,5 @@ pypeit.sampling module .. automodule:: pypeit.sampling :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scattlight.rst b/doc/api/pypeit.scattlight.rst index 32fb877dce..64298e4e0f 100644 --- a/doc/api/pypeit.scattlight.rst +++ b/doc/api/pypeit.scattlight.rst @@ -4,5 +4,5 @@ pypeit.scattlight module .. automodule:: pypeit.scattlight :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.arxiv_solution.rst b/doc/api/pypeit.scripts.arxiv_solution.rst index edae4df2cf..21cfd65236 100644 --- a/doc/api/pypeit.scripts.arxiv_solution.rst +++ b/doc/api/pypeit.scripts.arxiv_solution.rst @@ -4,5 +4,5 @@ pypeit.scripts.arxiv\_solution module .. automodule:: pypeit.scripts.arxiv_solution :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.cache_github_data.rst b/doc/api/pypeit.scripts.cache_github_data.rst index e087ac3b92..77f99ad901 100644 --- a/doc/api/pypeit.scripts.cache_github_data.rst +++ b/doc/api/pypeit.scripts.cache_github_data.rst @@ -4,5 +4,5 @@ pypeit.scripts.cache\_github\_data module .. automodule:: pypeit.scripts.cache_github_data :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.chk_alignments.rst b/doc/api/pypeit.scripts.chk_alignments.rst index 958feec23b..aee1627497 100644 --- a/doc/api/pypeit.scripts.chk_alignments.rst +++ b/doc/api/pypeit.scripts.chk_alignments.rst @@ -4,5 +4,5 @@ pypeit.scripts.chk\_alignments module .. automodule:: pypeit.scripts.chk_alignments :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.chk_edges.rst b/doc/api/pypeit.scripts.chk_edges.rst index 630c2de797..4121b193eb 100644 --- a/doc/api/pypeit.scripts.chk_edges.rst +++ b/doc/api/pypeit.scripts.chk_edges.rst @@ -4,5 +4,5 @@ pypeit.scripts.chk\_edges module .. automodule:: pypeit.scripts.chk_edges :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.chk_flats.rst b/doc/api/pypeit.scripts.chk_flats.rst index 510b8f4c1a..3085d0e626 100644 --- a/doc/api/pypeit.scripts.chk_flats.rst +++ b/doc/api/pypeit.scripts.chk_flats.rst @@ -4,5 +4,5 @@ pypeit.scripts.chk\_flats module .. automodule:: pypeit.scripts.chk_flats :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.chk_flexure.rst b/doc/api/pypeit.scripts.chk_flexure.rst index 90d71dfdb8..28b91e40ac 100644 --- a/doc/api/pypeit.scripts.chk_flexure.rst +++ b/doc/api/pypeit.scripts.chk_flexure.rst @@ -4,5 +4,5 @@ pypeit.scripts.chk\_flexure module .. automodule:: pypeit.scripts.chk_flexure :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.chk_for_calibs.rst b/doc/api/pypeit.scripts.chk_for_calibs.rst index cd429e9ac5..82c2e57eb5 100644 --- a/doc/api/pypeit.scripts.chk_for_calibs.rst +++ b/doc/api/pypeit.scripts.chk_for_calibs.rst @@ -4,5 +4,5 @@ pypeit.scripts.chk\_for\_calibs module .. automodule:: pypeit.scripts.chk_for_calibs :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.chk_noise_1dspec.rst b/doc/api/pypeit.scripts.chk_noise_1dspec.rst index 560de66329..a624c15820 100644 --- a/doc/api/pypeit.scripts.chk_noise_1dspec.rst +++ b/doc/api/pypeit.scripts.chk_noise_1dspec.rst @@ -4,5 +4,5 @@ pypeit.scripts.chk\_noise\_1dspec module .. automodule:: pypeit.scripts.chk_noise_1dspec :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.chk_noise_2dspec.rst b/doc/api/pypeit.scripts.chk_noise_2dspec.rst index 5c560be154..6655cdb096 100644 --- a/doc/api/pypeit.scripts.chk_noise_2dspec.rst +++ b/doc/api/pypeit.scripts.chk_noise_2dspec.rst @@ -4,5 +4,5 @@ pypeit.scripts.chk\_noise\_2dspec module .. automodule:: pypeit.scripts.chk_noise_2dspec :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.chk_plugins.rst b/doc/api/pypeit.scripts.chk_plugins.rst index debcc3f6cd..57b4fce0d9 100644 --- a/doc/api/pypeit.scripts.chk_plugins.rst +++ b/doc/api/pypeit.scripts.chk_plugins.rst @@ -4,5 +4,5 @@ pypeit.scripts.chk\_plugins module .. automodule:: pypeit.scripts.chk_plugins :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.chk_scattlight.rst b/doc/api/pypeit.scripts.chk_scattlight.rst index d5fbe11b4a..8e6bd9679b 100644 --- a/doc/api/pypeit.scripts.chk_scattlight.rst +++ b/doc/api/pypeit.scripts.chk_scattlight.rst @@ -4,5 +4,5 @@ pypeit.scripts.chk\_scattlight module .. automodule:: pypeit.scripts.chk_scattlight :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.chk_tilts.rst b/doc/api/pypeit.scripts.chk_tilts.rst index 8e5ad4d8b3..8359ca8d7a 100644 --- a/doc/api/pypeit.scripts.chk_tilts.rst +++ b/doc/api/pypeit.scripts.chk_tilts.rst @@ -4,5 +4,5 @@ pypeit.scripts.chk\_tilts module .. automodule:: pypeit.scripts.chk_tilts :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.chk_wavecalib.rst b/doc/api/pypeit.scripts.chk_wavecalib.rst index 600c049107..c5079ff13d 100644 --- a/doc/api/pypeit.scripts.chk_wavecalib.rst +++ b/doc/api/pypeit.scripts.chk_wavecalib.rst @@ -4,5 +4,5 @@ pypeit.scripts.chk\_wavecalib module .. automodule:: pypeit.scripts.chk_wavecalib :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.clean_cache.rst b/doc/api/pypeit.scripts.clean_cache.rst index 4e0a38cd9e..450989a581 100644 --- a/doc/api/pypeit.scripts.clean_cache.rst +++ b/doc/api/pypeit.scripts.clean_cache.rst @@ -4,5 +4,5 @@ pypeit.scripts.clean\_cache module .. automodule:: pypeit.scripts.clean_cache :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.coadd_1dspec.rst b/doc/api/pypeit.scripts.coadd_1dspec.rst index a789145dad..6dd433b8f7 100644 --- a/doc/api/pypeit.scripts.coadd_1dspec.rst +++ b/doc/api/pypeit.scripts.coadd_1dspec.rst @@ -4,5 +4,5 @@ pypeit.scripts.coadd\_1dspec module .. automodule:: pypeit.scripts.coadd_1dspec :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.coadd_2dspec.rst b/doc/api/pypeit.scripts.coadd_2dspec.rst index 404d0a4f68..e558e1f2c5 100644 --- a/doc/api/pypeit.scripts.coadd_2dspec.rst +++ b/doc/api/pypeit.scripts.coadd_2dspec.rst @@ -4,5 +4,5 @@ pypeit.scripts.coadd\_2dspec module .. automodule:: pypeit.scripts.coadd_2dspec :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.coadd_datacube.rst b/doc/api/pypeit.scripts.coadd_datacube.rst index 06509d7e86..88c529ca9f 100644 --- a/doc/api/pypeit.scripts.coadd_datacube.rst +++ b/doc/api/pypeit.scripts.coadd_datacube.rst @@ -4,5 +4,5 @@ pypeit.scripts.coadd\_datacube module .. automodule:: pypeit.scripts.coadd_datacube :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.collate_1d.rst b/doc/api/pypeit.scripts.collate_1d.rst index bf584d81ca..f196a27ab3 100644 --- a/doc/api/pypeit.scripts.collate_1d.rst +++ b/doc/api/pypeit.scripts.collate_1d.rst @@ -4,5 +4,5 @@ pypeit.scripts.collate\_1d module .. automodule:: pypeit.scripts.collate_1d :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.compare_sky.rst b/doc/api/pypeit.scripts.compare_sky.rst index 9b6a45a373..cfb5a70e23 100644 --- a/doc/api/pypeit.scripts.compare_sky.rst +++ b/doc/api/pypeit.scripts.compare_sky.rst @@ -4,5 +4,5 @@ pypeit.scripts.compare\_sky module .. automodule:: pypeit.scripts.compare_sky :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.compile_wvarxiv.rst b/doc/api/pypeit.scripts.compile_wvarxiv.rst index fe84bc1030..6f705b1215 100644 --- a/doc/api/pypeit.scripts.compile_wvarxiv.rst +++ b/doc/api/pypeit.scripts.compile_wvarxiv.rst @@ -4,5 +4,5 @@ pypeit.scripts.compile\_wvarxiv module .. automodule:: pypeit.scripts.compile_wvarxiv :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.edge_inspector.rst b/doc/api/pypeit.scripts.edge_inspector.rst index dd72240f07..e3712bdee3 100644 --- a/doc/api/pypeit.scripts.edge_inspector.rst +++ b/doc/api/pypeit.scripts.edge_inspector.rst @@ -4,5 +4,5 @@ pypeit.scripts.edge\_inspector module .. automodule:: pypeit.scripts.edge_inspector :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.extract_datacube.rst b/doc/api/pypeit.scripts.extract_datacube.rst index 49f3a9d13e..13bd3f4b51 100644 --- a/doc/api/pypeit.scripts.extract_datacube.rst +++ b/doc/api/pypeit.scripts.extract_datacube.rst @@ -4,5 +4,5 @@ pypeit.scripts.extract\_datacube module .. automodule:: pypeit.scripts.extract_datacube :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.flux_calib.rst b/doc/api/pypeit.scripts.flux_calib.rst index ccd28f2d12..76bd8bddc6 100644 --- a/doc/api/pypeit.scripts.flux_calib.rst +++ b/doc/api/pypeit.scripts.flux_calib.rst @@ -4,5 +4,5 @@ pypeit.scripts.flux\_calib module .. automodule:: pypeit.scripts.flux_calib :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.flux_setup.rst b/doc/api/pypeit.scripts.flux_setup.rst index 606d57b719..6f06b12766 100644 --- a/doc/api/pypeit.scripts.flux_setup.rst +++ b/doc/api/pypeit.scripts.flux_setup.rst @@ -4,5 +4,5 @@ pypeit.scripts.flux\_setup module .. automodule:: pypeit.scripts.flux_setup :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.identify.rst b/doc/api/pypeit.scripts.identify.rst index 334d3100f8..8a5d93b120 100644 --- a/doc/api/pypeit.scripts.identify.rst +++ b/doc/api/pypeit.scripts.identify.rst @@ -4,5 +4,5 @@ pypeit.scripts.identify module .. automodule:: pypeit.scripts.identify :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.install_extinctfile.rst b/doc/api/pypeit.scripts.install_extinctfile.rst index 785256d3d9..078cf09b02 100644 --- a/doc/api/pypeit.scripts.install_extinctfile.rst +++ b/doc/api/pypeit.scripts.install_extinctfile.rst @@ -4,5 +4,5 @@ pypeit.scripts.install\_extinctfile module .. automodule:: pypeit.scripts.install_extinctfile :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.install_linelist.rst b/doc/api/pypeit.scripts.install_linelist.rst index 45254e949b..810be134d3 100644 --- a/doc/api/pypeit.scripts.install_linelist.rst +++ b/doc/api/pypeit.scripts.install_linelist.rst @@ -4,5 +4,5 @@ pypeit.scripts.install\_linelist module .. automodule:: pypeit.scripts.install_linelist :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.install_ql_calibs.rst b/doc/api/pypeit.scripts.install_ql_calibs.rst index 374e443c84..068b6b1fe2 100644 --- a/doc/api/pypeit.scripts.install_ql_calibs.rst +++ b/doc/api/pypeit.scripts.install_ql_calibs.rst @@ -4,5 +4,5 @@ pypeit.scripts.install\_ql\_calibs module .. automodule:: pypeit.scripts.install_ql_calibs :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.install_telluric.rst b/doc/api/pypeit.scripts.install_telluric.rst index 0f913253a6..f46e2a1d9b 100644 --- a/doc/api/pypeit.scripts.install_telluric.rst +++ b/doc/api/pypeit.scripts.install_telluric.rst @@ -4,5 +4,5 @@ pypeit.scripts.install\_telluric module .. automodule:: pypeit.scripts.install_telluric :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.install_wvarxiv.rst b/doc/api/pypeit.scripts.install_wvarxiv.rst index c83f192be2..a143a1befc 100644 --- a/doc/api/pypeit.scripts.install_wvarxiv.rst +++ b/doc/api/pypeit.scripts.install_wvarxiv.rst @@ -4,5 +4,5 @@ pypeit.scripts.install\_wvarxiv module .. automodule:: pypeit.scripts.install_wvarxiv :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.lowrdx_skyspec.rst b/doc/api/pypeit.scripts.lowrdx_skyspec.rst index b25ec1d4d6..e92fc8238b 100644 --- a/doc/api/pypeit.scripts.lowrdx_skyspec.rst +++ b/doc/api/pypeit.scripts.lowrdx_skyspec.rst @@ -4,5 +4,5 @@ pypeit.scripts.lowrdx\_skyspec module .. automodule:: pypeit.scripts.lowrdx_skyspec :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.multislit_flexure.rst b/doc/api/pypeit.scripts.multislit_flexure.rst index d70c5c3da3..b44ce3d888 100644 --- a/doc/api/pypeit.scripts.multislit_flexure.rst +++ b/doc/api/pypeit.scripts.multislit_flexure.rst @@ -4,5 +4,5 @@ pypeit.scripts.multislit\_flexure module .. automodule:: pypeit.scripts.multislit_flexure :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.obslog.rst b/doc/api/pypeit.scripts.obslog.rst index 1860afd21d..d8bb3a332b 100644 --- a/doc/api/pypeit.scripts.obslog.rst +++ b/doc/api/pypeit.scripts.obslog.rst @@ -4,5 +4,5 @@ pypeit.scripts.obslog module .. automodule:: pypeit.scripts.obslog :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.parse_slits.rst b/doc/api/pypeit.scripts.parse_slits.rst index 7b6a9721ec..85708b7f2c 100644 --- a/doc/api/pypeit.scripts.parse_slits.rst +++ b/doc/api/pypeit.scripts.parse_slits.rst @@ -4,5 +4,5 @@ pypeit.scripts.parse\_slits module .. automodule:: pypeit.scripts.parse_slits :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.print_bpm.rst b/doc/api/pypeit.scripts.print_bpm.rst index d042ad948d..99b13618b8 100644 --- a/doc/api/pypeit.scripts.print_bpm.rst +++ b/doc/api/pypeit.scripts.print_bpm.rst @@ -4,5 +4,5 @@ pypeit.scripts.print\_bpm module .. automodule:: pypeit.scripts.print_bpm :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.qa_html.rst b/doc/api/pypeit.scripts.qa_html.rst index dfa683d908..154bc7a06e 100644 --- a/doc/api/pypeit.scripts.qa_html.rst +++ b/doc/api/pypeit.scripts.qa_html.rst @@ -4,5 +4,5 @@ pypeit.scripts.qa\_html module .. automodule:: pypeit.scripts.qa_html :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.ql.rst b/doc/api/pypeit.scripts.ql.rst index f36b2b44e1..d1c6f255d2 100644 --- a/doc/api/pypeit.scripts.ql.rst +++ b/doc/api/pypeit.scripts.ql.rst @@ -4,5 +4,5 @@ pypeit.scripts.ql module .. automodule:: pypeit.scripts.ql :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.rst b/doc/api/pypeit.scripts.rst index 7651da94bd..a93cea0ccf 100644 --- a/doc/api/pypeit.scripts.rst +++ b/doc/api/pypeit.scripts.rst @@ -67,5 +67,5 @@ Module contents .. automodule:: pypeit.scripts :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.run_pypeit.rst b/doc/api/pypeit.scripts.run_pypeit.rst index 6dc3a384a0..ac81f1f6ed 100644 --- a/doc/api/pypeit.scripts.run_pypeit.rst +++ b/doc/api/pypeit.scripts.run_pypeit.rst @@ -4,5 +4,5 @@ pypeit.scripts.run\_pypeit module .. automodule:: pypeit.scripts.run_pypeit :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.run_to_calibstep.rst b/doc/api/pypeit.scripts.run_to_calibstep.rst index 6f60edf3f7..9dc79466f5 100644 --- a/doc/api/pypeit.scripts.run_to_calibstep.rst +++ b/doc/api/pypeit.scripts.run_to_calibstep.rst @@ -4,5 +4,5 @@ pypeit.scripts.run\_to\_calibstep module .. automodule:: pypeit.scripts.run_to_calibstep :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.scriptbase.rst b/doc/api/pypeit.scripts.scriptbase.rst index 7b74bba8f0..a6f42fa83d 100644 --- a/doc/api/pypeit.scripts.scriptbase.rst +++ b/doc/api/pypeit.scripts.scriptbase.rst @@ -4,5 +4,5 @@ pypeit.scripts.scriptbase module .. automodule:: pypeit.scripts.scriptbase :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.sensfunc.rst b/doc/api/pypeit.scripts.sensfunc.rst index d3ff38d134..f5caa176fc 100644 --- a/doc/api/pypeit.scripts.sensfunc.rst +++ b/doc/api/pypeit.scripts.sensfunc.rst @@ -4,5 +4,5 @@ pypeit.scripts.sensfunc module .. automodule:: pypeit.scripts.sensfunc :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.setup.rst b/doc/api/pypeit.scripts.setup.rst index 0f2b649539..ec508fd88e 100644 --- a/doc/api/pypeit.scripts.setup.rst +++ b/doc/api/pypeit.scripts.setup.rst @@ -4,5 +4,5 @@ pypeit.scripts.setup module .. automodule:: pypeit.scripts.setup :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.setup_coadd2d.rst b/doc/api/pypeit.scripts.setup_coadd2d.rst index 2e399b66b4..5726197e54 100644 --- a/doc/api/pypeit.scripts.setup_coadd2d.rst +++ b/doc/api/pypeit.scripts.setup_coadd2d.rst @@ -4,5 +4,5 @@ pypeit.scripts.setup\_coadd2d module .. automodule:: pypeit.scripts.setup_coadd2d :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.show_1dspec.rst b/doc/api/pypeit.scripts.show_1dspec.rst index 1d07b42b39..4408a0b28b 100644 --- a/doc/api/pypeit.scripts.show_1dspec.rst +++ b/doc/api/pypeit.scripts.show_1dspec.rst @@ -4,5 +4,5 @@ pypeit.scripts.show\_1dspec module .. automodule:: pypeit.scripts.show_1dspec :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.show_2dspec.rst b/doc/api/pypeit.scripts.show_2dspec.rst index c56d79a3b4..bd0d852800 100644 --- a/doc/api/pypeit.scripts.show_2dspec.rst +++ b/doc/api/pypeit.scripts.show_2dspec.rst @@ -4,5 +4,5 @@ pypeit.scripts.show\_2dspec module .. automodule:: pypeit.scripts.show_2dspec :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.show_arxiv.rst b/doc/api/pypeit.scripts.show_arxiv.rst index 857eec6004..d488ba1b7d 100644 --- a/doc/api/pypeit.scripts.show_arxiv.rst +++ b/doc/api/pypeit.scripts.show_arxiv.rst @@ -4,5 +4,5 @@ pypeit.scripts.show\_arxiv module .. automodule:: pypeit.scripts.show_arxiv :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.show_pixflat.rst b/doc/api/pypeit.scripts.show_pixflat.rst index b66038acae..0d60f793ff 100644 --- a/doc/api/pypeit.scripts.show_pixflat.rst +++ b/doc/api/pypeit.scripts.show_pixflat.rst @@ -4,5 +4,5 @@ pypeit.scripts.show\_pixflat module .. automodule:: pypeit.scripts.show_pixflat :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.show_wvcalib.rst b/doc/api/pypeit.scripts.show_wvcalib.rst index 259597fa3c..7fcf5f3f7f 100644 --- a/doc/api/pypeit.scripts.show_wvcalib.rst +++ b/doc/api/pypeit.scripts.show_wvcalib.rst @@ -4,5 +4,5 @@ pypeit.scripts.show\_wvcalib module .. automodule:: pypeit.scripts.show_wvcalib :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.skysub_regions.rst b/doc/api/pypeit.scripts.skysub_regions.rst index 650e018b58..ee005c96ba 100644 --- a/doc/api/pypeit.scripts.skysub_regions.rst +++ b/doc/api/pypeit.scripts.skysub_regions.rst @@ -4,5 +4,5 @@ pypeit.scripts.skysub\_regions module .. automodule:: pypeit.scripts.skysub_regions :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.tellfit.rst b/doc/api/pypeit.scripts.tellfit.rst index 154b686a6c..be6ac4bcb8 100644 --- a/doc/api/pypeit.scripts.tellfit.rst +++ b/doc/api/pypeit.scripts.tellfit.rst @@ -4,5 +4,5 @@ pypeit.scripts.tellfit module .. automodule:: pypeit.scripts.tellfit :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.trace_edges.rst b/doc/api/pypeit.scripts.trace_edges.rst index fa294855de..0eb5a94368 100644 --- a/doc/api/pypeit.scripts.trace_edges.rst +++ b/doc/api/pypeit.scripts.trace_edges.rst @@ -4,5 +4,5 @@ pypeit.scripts.trace\_edges module .. automodule:: pypeit.scripts.trace_edges :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.version.rst b/doc/api/pypeit.scripts.version.rst index bacaeea955..c95dd6d560 100644 --- a/doc/api/pypeit.scripts.version.rst +++ b/doc/api/pypeit.scripts.version.rst @@ -4,5 +4,5 @@ pypeit.scripts.version module .. automodule:: pypeit.scripts.version :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.scripts.view_fits.rst b/doc/api/pypeit.scripts.view_fits.rst index 83254de9c8..1c7891f15d 100644 --- a/doc/api/pypeit.scripts.view_fits.rst +++ b/doc/api/pypeit.scripts.view_fits.rst @@ -4,5 +4,5 @@ pypeit.scripts.view\_fits module .. automodule:: pypeit.scripts.view_fits :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.sensfilearchive.rst b/doc/api/pypeit.sensfilearchive.rst index a07e96643e..f24eb2edaa 100644 --- a/doc/api/pypeit.sensfilearchive.rst +++ b/doc/api/pypeit.sensfilearchive.rst @@ -4,5 +4,5 @@ pypeit.sensfilearchive module .. automodule:: pypeit.sensfilearchive :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.sensfunc.rst b/doc/api/pypeit.sensfunc.rst index 75daf69e4c..ee84e70122 100644 --- a/doc/api/pypeit.sensfunc.rst +++ b/doc/api/pypeit.sensfunc.rst @@ -4,5 +4,5 @@ pypeit.sensfunc module .. automodule:: pypeit.sensfunc :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.setup_gui.controller.rst b/doc/api/pypeit.setup_gui.controller.rst index 10a7b29399..db4869ae60 100644 --- a/doc/api/pypeit.setup_gui.controller.rst +++ b/doc/api/pypeit.setup_gui.controller.rst @@ -4,5 +4,5 @@ pypeit.setup\_gui.controller module .. automodule:: pypeit.setup_gui.controller :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.setup_gui.dialog_helpers.rst b/doc/api/pypeit.setup_gui.dialog_helpers.rst index 619df10f59..5eec963f90 100644 --- a/doc/api/pypeit.setup_gui.dialog_helpers.rst +++ b/doc/api/pypeit.setup_gui.dialog_helpers.rst @@ -4,5 +4,5 @@ pypeit.setup\_gui.dialog\_helpers module .. automodule:: pypeit.setup_gui.dialog_helpers :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.setup_gui.model.rst b/doc/api/pypeit.setup_gui.model.rst index 2895051dc3..a43f428914 100644 --- a/doc/api/pypeit.setup_gui.model.rst +++ b/doc/api/pypeit.setup_gui.model.rst @@ -4,5 +4,5 @@ pypeit.setup\_gui.model module .. automodule:: pypeit.setup_gui.model :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.setup_gui.rst b/doc/api/pypeit.setup_gui.rst index 7bfbe99cc0..e329bf7d04 100644 --- a/doc/api/pypeit.setup_gui.rst +++ b/doc/api/pypeit.setup_gui.rst @@ -19,5 +19,5 @@ Module contents .. automodule:: pypeit.setup_gui :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.setup_gui.text_viewer.rst b/doc/api/pypeit.setup_gui.text_viewer.rst index fa2e4eada2..aef7f6d522 100644 --- a/doc/api/pypeit.setup_gui.text_viewer.rst +++ b/doc/api/pypeit.setup_gui.text_viewer.rst @@ -4,5 +4,5 @@ pypeit.setup\_gui.text\_viewer module .. automodule:: pypeit.setup_gui.text_viewer :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.setup_gui.view.rst b/doc/api/pypeit.setup_gui.view.rst index 7411f4dcc6..f807e00e2e 100644 --- a/doc/api/pypeit.setup_gui.view.rst +++ b/doc/api/pypeit.setup_gui.view.rst @@ -4,5 +4,5 @@ pypeit.setup\_gui.view module .. automodule:: pypeit.setup_gui.view :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.slittrace.rst b/doc/api/pypeit.slittrace.rst index 104e97dff8..097489b480 100644 --- a/doc/api/pypeit.slittrace.rst +++ b/doc/api/pypeit.slittrace.rst @@ -4,5 +4,5 @@ pypeit.slittrace module .. automodule:: pypeit.slittrace :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spec2dobj.rst b/doc/api/pypeit.spec2dobj.rst index d283bcfc79..5dd8cbd874 100644 --- a/doc/api/pypeit.spec2dobj.rst +++ b/doc/api/pypeit.spec2dobj.rst @@ -4,5 +4,5 @@ pypeit.spec2dobj module .. automodule:: pypeit.spec2dobj :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.specobj.rst b/doc/api/pypeit.specobj.rst index 55ed3e3a80..bbe382d261 100644 --- a/doc/api/pypeit.specobj.rst +++ b/doc/api/pypeit.specobj.rst @@ -4,5 +4,5 @@ pypeit.specobj module .. automodule:: pypeit.specobj :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.specobjs.rst b/doc/api/pypeit.specobjs.rst index 03d81d1072..d8b95cec0b 100644 --- a/doc/api/pypeit.specobjs.rst +++ b/doc/api/pypeit.specobjs.rst @@ -4,5 +4,5 @@ pypeit.specobjs module .. automodule:: pypeit.specobjs :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.aat_uhrf.rst b/doc/api/pypeit.spectrographs.aat_uhrf.rst index 2f4e657404..f34ebbcb91 100644 --- a/doc/api/pypeit.spectrographs.aat_uhrf.rst +++ b/doc/api/pypeit.spectrographs.aat_uhrf.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.aat\_uhrf module .. automodule:: pypeit.spectrographs.aat_uhrf :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.apf_levy.rst b/doc/api/pypeit.spectrographs.apf_levy.rst index 93b2ac050c..205d05b973 100644 --- a/doc/api/pypeit.spectrographs.apf_levy.rst +++ b/doc/api/pypeit.spectrographs.apf_levy.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.apf\_levy module .. automodule:: pypeit.spectrographs.apf_levy :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.bok_bc.rst b/doc/api/pypeit.spectrographs.bok_bc.rst index 9e277e5459..9290c6a5ae 100644 --- a/doc/api/pypeit.spectrographs.bok_bc.rst +++ b/doc/api/pypeit.spectrographs.bok_bc.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.bok\_bc module .. automodule:: pypeit.spectrographs.bok_bc :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.gemini_flamingos.rst b/doc/api/pypeit.spectrographs.gemini_flamingos.rst index c32fb046fb..0d99f81d00 100644 --- a/doc/api/pypeit.spectrographs.gemini_flamingos.rst +++ b/doc/api/pypeit.spectrographs.gemini_flamingos.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.gemini\_flamingos module .. automodule:: pypeit.spectrographs.gemini_flamingos :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.gemini_gmos.rst b/doc/api/pypeit.spectrographs.gemini_gmos.rst index cfbcb8e216..9c43af4468 100644 --- a/doc/api/pypeit.spectrographs.gemini_gmos.rst +++ b/doc/api/pypeit.spectrographs.gemini_gmos.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.gemini\_gmos module .. automodule:: pypeit.spectrographs.gemini_gmos :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.gemini_gnirs.rst b/doc/api/pypeit.spectrographs.gemini_gnirs.rst index c4f4337ee4..5b7756bc07 100644 --- a/doc/api/pypeit.spectrographs.gemini_gnirs.rst +++ b/doc/api/pypeit.spectrographs.gemini_gnirs.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.gemini\_gnirs module .. automodule:: pypeit.spectrographs.gemini_gnirs :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.gtc_osiris.rst b/doc/api/pypeit.spectrographs.gtc_osiris.rst index 649568bc94..00a36f8951 100644 --- a/doc/api/pypeit.spectrographs.gtc_osiris.rst +++ b/doc/api/pypeit.spectrographs.gtc_osiris.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.gtc\_osiris module .. automodule:: pypeit.spectrographs.gtc_osiris :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.jwst_nircam.rst b/doc/api/pypeit.spectrographs.jwst_nircam.rst index 2586066cd3..26d13542c3 100644 --- a/doc/api/pypeit.spectrographs.jwst_nircam.rst +++ b/doc/api/pypeit.spectrographs.jwst_nircam.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.jwst\_nircam module .. automodule:: pypeit.spectrographs.jwst_nircam :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.jwst_nirspec.rst b/doc/api/pypeit.spectrographs.jwst_nirspec.rst index aacb241203..dd1ff849d8 100644 --- a/doc/api/pypeit.spectrographs.jwst_nirspec.rst +++ b/doc/api/pypeit.spectrographs.jwst_nirspec.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.jwst\_nirspec module .. automodule:: pypeit.spectrographs.jwst_nirspec :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.keck_deimos.rst b/doc/api/pypeit.spectrographs.keck_deimos.rst index d2468c4a8d..195a2748b6 100644 --- a/doc/api/pypeit.spectrographs.keck_deimos.rst +++ b/doc/api/pypeit.spectrographs.keck_deimos.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.keck\_deimos module .. automodule:: pypeit.spectrographs.keck_deimos :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.keck_esi.rst b/doc/api/pypeit.spectrographs.keck_esi.rst index b87b500d22..810488e07a 100644 --- a/doc/api/pypeit.spectrographs.keck_esi.rst +++ b/doc/api/pypeit.spectrographs.keck_esi.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.keck\_esi module .. automodule:: pypeit.spectrographs.keck_esi :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.keck_hires.rst b/doc/api/pypeit.spectrographs.keck_hires.rst index c109b7ecbe..6936885e4b 100644 --- a/doc/api/pypeit.spectrographs.keck_hires.rst +++ b/doc/api/pypeit.spectrographs.keck_hires.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.keck\_hires module .. automodule:: pypeit.spectrographs.keck_hires :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.keck_kcwi.rst b/doc/api/pypeit.spectrographs.keck_kcwi.rst index d9a2553071..cc14d51436 100644 --- a/doc/api/pypeit.spectrographs.keck_kcwi.rst +++ b/doc/api/pypeit.spectrographs.keck_kcwi.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.keck\_kcwi module .. automodule:: pypeit.spectrographs.keck_kcwi :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.keck_lris.rst b/doc/api/pypeit.spectrographs.keck_lris.rst index 70b960af3a..b193db1528 100644 --- a/doc/api/pypeit.spectrographs.keck_lris.rst +++ b/doc/api/pypeit.spectrographs.keck_lris.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.keck\_lris module .. automodule:: pypeit.spectrographs.keck_lris :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.keck_mosfire.rst b/doc/api/pypeit.spectrographs.keck_mosfire.rst index 7393aec196..5b7d38c20c 100644 --- a/doc/api/pypeit.spectrographs.keck_mosfire.rst +++ b/doc/api/pypeit.spectrographs.keck_mosfire.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.keck\_mosfire module .. automodule:: pypeit.spectrographs.keck_mosfire :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.keck_nires.rst b/doc/api/pypeit.spectrographs.keck_nires.rst index aedecf22d6..798e86eda5 100644 --- a/doc/api/pypeit.spectrographs.keck_nires.rst +++ b/doc/api/pypeit.spectrographs.keck_nires.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.keck\_nires module .. automodule:: pypeit.spectrographs.keck_nires :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.keck_nirspec.rst b/doc/api/pypeit.spectrographs.keck_nirspec.rst index a912c73f18..37bb6e19eb 100644 --- a/doc/api/pypeit.spectrographs.keck_nirspec.rst +++ b/doc/api/pypeit.spectrographs.keck_nirspec.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.keck\_nirspec module .. automodule:: pypeit.spectrographs.keck_nirspec :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.lbt_luci.rst b/doc/api/pypeit.spectrographs.lbt_luci.rst index 59f138eed3..3119381f1f 100644 --- a/doc/api/pypeit.spectrographs.lbt_luci.rst +++ b/doc/api/pypeit.spectrographs.lbt_luci.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.lbt\_luci module .. automodule:: pypeit.spectrographs.lbt_luci :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.lbt_mods.rst b/doc/api/pypeit.spectrographs.lbt_mods.rst index 1baf2dc95d..e969150ba6 100644 --- a/doc/api/pypeit.spectrographs.lbt_mods.rst +++ b/doc/api/pypeit.spectrographs.lbt_mods.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.lbt\_mods module .. automodule:: pypeit.spectrographs.lbt_mods :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.ldt_deveny.rst b/doc/api/pypeit.spectrographs.ldt_deveny.rst index b05d3111aa..00fb9a926b 100644 --- a/doc/api/pypeit.spectrographs.ldt_deveny.rst +++ b/doc/api/pypeit.spectrographs.ldt_deveny.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.ldt\_deveny module .. automodule:: pypeit.spectrographs.ldt_deveny :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.magellan_fire.rst b/doc/api/pypeit.spectrographs.magellan_fire.rst index 390851329a..3c6cc33a01 100644 --- a/doc/api/pypeit.spectrographs.magellan_fire.rst +++ b/doc/api/pypeit.spectrographs.magellan_fire.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.magellan\_fire module .. automodule:: pypeit.spectrographs.magellan_fire :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.magellan_mage.rst b/doc/api/pypeit.spectrographs.magellan_mage.rst index 8a2e67f7a5..da69a11f04 100644 --- a/doc/api/pypeit.spectrographs.magellan_mage.rst +++ b/doc/api/pypeit.spectrographs.magellan_mage.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.magellan\_mage module .. automodule:: pypeit.spectrographs.magellan_mage :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.mdm_modspec.rst b/doc/api/pypeit.spectrographs.mdm_modspec.rst index 5e669a872b..7e6cbf668e 100644 --- a/doc/api/pypeit.spectrographs.mdm_modspec.rst +++ b/doc/api/pypeit.spectrographs.mdm_modspec.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.mdm\_modspec module .. automodule:: pypeit.spectrographs.mdm_modspec :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.mdm_osmos.rst b/doc/api/pypeit.spectrographs.mdm_osmos.rst index bd9ca5a9dd..2b7e40a506 100644 --- a/doc/api/pypeit.spectrographs.mdm_osmos.rst +++ b/doc/api/pypeit.spectrographs.mdm_osmos.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.mdm\_osmos module .. automodule:: pypeit.spectrographs.mdm_osmos :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.mmt_binospec.rst b/doc/api/pypeit.spectrographs.mmt_binospec.rst index 221cca405a..07cf05d086 100644 --- a/doc/api/pypeit.spectrographs.mmt_binospec.rst +++ b/doc/api/pypeit.spectrographs.mmt_binospec.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.mmt\_binospec module .. automodule:: pypeit.spectrographs.mmt_binospec :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.mmt_bluechannel.rst b/doc/api/pypeit.spectrographs.mmt_bluechannel.rst index 28728e0bea..c364bcbe45 100644 --- a/doc/api/pypeit.spectrographs.mmt_bluechannel.rst +++ b/doc/api/pypeit.spectrographs.mmt_bluechannel.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.mmt\_bluechannel module .. automodule:: pypeit.spectrographs.mmt_bluechannel :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.mmt_mmirs.rst b/doc/api/pypeit.spectrographs.mmt_mmirs.rst index 443dc48211..5df5f2d01c 100644 --- a/doc/api/pypeit.spectrographs.mmt_mmirs.rst +++ b/doc/api/pypeit.spectrographs.mmt_mmirs.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.mmt\_mmirs module .. automodule:: pypeit.spectrographs.mmt_mmirs :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.not_alfosc.rst b/doc/api/pypeit.spectrographs.not_alfosc.rst index 7abd8d7919..3df83d1e40 100644 --- a/doc/api/pypeit.spectrographs.not_alfosc.rst +++ b/doc/api/pypeit.spectrographs.not_alfosc.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.not\_alfosc module .. automodule:: pypeit.spectrographs.not_alfosc :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.ntt_efosc2.rst b/doc/api/pypeit.spectrographs.ntt_efosc2.rst index d7162bcfc6..1363bc06ea 100644 --- a/doc/api/pypeit.spectrographs.ntt_efosc2.rst +++ b/doc/api/pypeit.spectrographs.ntt_efosc2.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.ntt\_efosc2 module .. automodule:: pypeit.spectrographs.ntt_efosc2 :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.opticalmodel.rst b/doc/api/pypeit.spectrographs.opticalmodel.rst index 06e73a576e..2cd7f357b2 100644 --- a/doc/api/pypeit.spectrographs.opticalmodel.rst +++ b/doc/api/pypeit.spectrographs.opticalmodel.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.opticalmodel module .. automodule:: pypeit.spectrographs.opticalmodel :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.p200_dbsp.rst b/doc/api/pypeit.spectrographs.p200_dbsp.rst index 96e1d05150..2a757a3b2b 100644 --- a/doc/api/pypeit.spectrographs.p200_dbsp.rst +++ b/doc/api/pypeit.spectrographs.p200_dbsp.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.p200\_dbsp module .. automodule:: pypeit.spectrographs.p200_dbsp :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.p200_ngps.rst b/doc/api/pypeit.spectrographs.p200_ngps.rst index 7e16b030d6..2ce00db8ae 100644 --- a/doc/api/pypeit.spectrographs.p200_ngps.rst +++ b/doc/api/pypeit.spectrographs.p200_ngps.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.p200\_ngps module .. automodule:: pypeit.spectrographs.p200_ngps :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.p200_tspec.rst b/doc/api/pypeit.spectrographs.p200_tspec.rst index 8330e3a70e..ce9072d770 100644 --- a/doc/api/pypeit.spectrographs.p200_tspec.rst +++ b/doc/api/pypeit.spectrographs.p200_tspec.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.p200\_tspec module .. automodule:: pypeit.spectrographs.p200_tspec :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.rst b/doc/api/pypeit.spectrographs.rst index be51c00fdf..b0ad2afd4b 100644 --- a/doc/api/pypeit.spectrographs.rst +++ b/doc/api/pypeit.spectrographs.rst @@ -58,5 +58,5 @@ Module contents .. automodule:: pypeit.spectrographs :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.shane_kast.rst b/doc/api/pypeit.spectrographs.shane_kast.rst index f72ec77c84..fed74241a8 100644 --- a/doc/api/pypeit.spectrographs.shane_kast.rst +++ b/doc/api/pypeit.spectrographs.shane_kast.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.shane\_kast module .. automodule:: pypeit.spectrographs.shane_kast :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.slitmask.rst b/doc/api/pypeit.spectrographs.slitmask.rst index 4142e34c82..862a82193a 100644 --- a/doc/api/pypeit.spectrographs.slitmask.rst +++ b/doc/api/pypeit.spectrographs.slitmask.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.slitmask module .. automodule:: pypeit.spectrographs.slitmask :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.soar_goodman.rst b/doc/api/pypeit.spectrographs.soar_goodman.rst index ee0e21cf01..de65052f88 100644 --- a/doc/api/pypeit.spectrographs.soar_goodman.rst +++ b/doc/api/pypeit.spectrographs.soar_goodman.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.soar\_goodman module .. automodule:: pypeit.spectrographs.soar_goodman :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.spectrograph.rst b/doc/api/pypeit.spectrographs.spectrograph.rst index 6184f30efc..75f69edd8f 100644 --- a/doc/api/pypeit.spectrographs.spectrograph.rst +++ b/doc/api/pypeit.spectrographs.spectrograph.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.spectrograph module .. automodule:: pypeit.spectrographs.spectrograph :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.subaru_focas.rst b/doc/api/pypeit.spectrographs.subaru_focas.rst index 876c1d499f..e06437e0b7 100644 --- a/doc/api/pypeit.spectrographs.subaru_focas.rst +++ b/doc/api/pypeit.spectrographs.subaru_focas.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.subaru\_focas module .. automodule:: pypeit.spectrographs.subaru_focas :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.tng_dolores.rst b/doc/api/pypeit.spectrographs.tng_dolores.rst index d38be493a1..c273c07702 100644 --- a/doc/api/pypeit.spectrographs.tng_dolores.rst +++ b/doc/api/pypeit.spectrographs.tng_dolores.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.tng\_dolores module .. automodule:: pypeit.spectrographs.tng_dolores :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.util.rst b/doc/api/pypeit.spectrographs.util.rst index 6f39b4cdc9..42f772458d 100644 --- a/doc/api/pypeit.spectrographs.util.rst +++ b/doc/api/pypeit.spectrographs.util.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.util module .. automodule:: pypeit.spectrographs.util :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.vlt_fors.rst b/doc/api/pypeit.spectrographs.vlt_fors.rst index 0d009c8541..8782be7f9d 100644 --- a/doc/api/pypeit.spectrographs.vlt_fors.rst +++ b/doc/api/pypeit.spectrographs.vlt_fors.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.vlt\_fors module .. automodule:: pypeit.spectrographs.vlt_fors :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.vlt_sinfoni.rst b/doc/api/pypeit.spectrographs.vlt_sinfoni.rst index 1283ad7c5a..8596ec0586 100644 --- a/doc/api/pypeit.spectrographs.vlt_sinfoni.rst +++ b/doc/api/pypeit.spectrographs.vlt_sinfoni.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.vlt\_sinfoni module .. automodule:: pypeit.spectrographs.vlt_sinfoni :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.vlt_xshooter.rst b/doc/api/pypeit.spectrographs.vlt_xshooter.rst index 1efa16a196..b8eb23aa25 100644 --- a/doc/api/pypeit.spectrographs.vlt_xshooter.rst +++ b/doc/api/pypeit.spectrographs.vlt_xshooter.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.vlt\_xshooter module .. automodule:: pypeit.spectrographs.vlt_xshooter :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.spectrographs.wht_isis.rst b/doc/api/pypeit.spectrographs.wht_isis.rst index 145016c807..1f5dfc5e67 100644 --- a/doc/api/pypeit.spectrographs.wht_isis.rst +++ b/doc/api/pypeit.spectrographs.wht_isis.rst @@ -4,5 +4,5 @@ pypeit.spectrographs.wht\_isis module .. automodule:: pypeit.spectrographs.wht_isis :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.specutils.pypeit_loaders.rst b/doc/api/pypeit.specutils.pypeit_loaders.rst index 9c5df47ad2..0337beaa3b 100644 --- a/doc/api/pypeit.specutils.pypeit_loaders.rst +++ b/doc/api/pypeit.specutils.pypeit_loaders.rst @@ -4,5 +4,5 @@ pypeit.specutils.pypeit\_loaders module .. automodule:: pypeit.specutils.pypeit_loaders :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.specutils.rst b/doc/api/pypeit.specutils.rst index 29edc09fe8..2a0736c52d 100644 --- a/doc/api/pypeit.specutils.rst +++ b/doc/api/pypeit.specutils.rst @@ -15,5 +15,5 @@ Module contents .. automodule:: pypeit.specutils :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.telescopes.rst b/doc/api/pypeit.telescopes.rst index 453365f669..97b4fa4827 100644 --- a/doc/api/pypeit.telescopes.rst +++ b/doc/api/pypeit.telescopes.rst @@ -4,5 +4,5 @@ pypeit.telescopes module .. automodule:: pypeit.telescopes :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.tracepca.rst b/doc/api/pypeit.tracepca.rst index 63c13704f9..946d25b9dc 100644 --- a/doc/api/pypeit.tracepca.rst +++ b/doc/api/pypeit.tracepca.rst @@ -4,5 +4,5 @@ pypeit.tracepca module .. automodule:: pypeit.tracepca :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.utils.rst b/doc/api/pypeit.utils.rst index c38c747556..233eaf5d21 100644 --- a/doc/api/pypeit.utils.rst +++ b/doc/api/pypeit.utils.rst @@ -4,5 +4,5 @@ pypeit.utils module .. automodule:: pypeit.utils :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.wavecalib.rst b/doc/api/pypeit.wavecalib.rst index 3f244e5a5e..931242c8d4 100644 --- a/doc/api/pypeit.wavecalib.rst +++ b/doc/api/pypeit.wavecalib.rst @@ -4,5 +4,5 @@ pypeit.wavecalib module .. automodule:: pypeit.wavecalib :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.wavemodel.rst b/doc/api/pypeit.wavemodel.rst index 478fe9dfb9..7f8daeb026 100644 --- a/doc/api/pypeit.wavemodel.rst +++ b/doc/api/pypeit.wavemodel.rst @@ -4,5 +4,5 @@ pypeit.wavemodel module .. automodule:: pypeit.wavemodel :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.wavetilts.rst b/doc/api/pypeit.wavetilts.rst index 01a83f4302..8bcbc03c72 100644 --- a/doc/api/pypeit.wavetilts.rst +++ b/doc/api/pypeit.wavetilts.rst @@ -4,5 +4,5 @@ pypeit.wavetilts module .. automodule:: pypeit.wavetilts :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/conf.py b/doc/conf.py index b21b5eb06f..3cfb12c8ee 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -40,8 +40,8 @@ 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', + 'sphinx_autodoc_typehints', 'sphinx_design', -# 'sphinx.ext.autosectionlabel', ] # Nicer math rendering than sphinx default? @@ -65,6 +65,9 @@ autosectionlabel_prefix_document = True +# Type hints formatting +autodoc_typehints = "description" + # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -351,5 +354,10 @@ def get_package_version(): # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'python': ('https://docs.python.org/3', None)} +intersphinx_mapping = { + 'python': ('https://docs.python.org/3', None), + 'numpy': ('https://numpy.org/doc/stable/', None), + 'scipy': ('https://docs.scipy.org/doc/scipy/', None), + 'astropy': ('https://docs.astropy.org/en/stable/', None), +} diff --git a/doc/help/run_pypeit.rst b/doc/help/run_pypeit.rst index eef555289f..cec78da828 100644 --- a/doc/help/run_pypeit.rst +++ b/doc/help/run_pypeit.rst @@ -6,7 +6,7 @@ pypeit_file PypeIt: The Python Spectroscopic Data Reduction Pipeline - Version 1.18.2.dev171+g808a323c9 + Version 1.18.2.dev174+g3d63da230.d20251017 Available spectrographs include: aat_uhrf, apf_levy, bok_bc, gemini_flamingos1, gemini_flamingos2, diff --git a/doc/include/dependencies_table.rst b/doc/include/dependencies_table.rst index 70ac57cbe2..1d4caf0ac9 100644 --- a/doc/include/dependencies_table.rst +++ b/doc/include/dependencies_table.rst @@ -1,5 +1,5 @@ ======================= ============================================================================================================================================================================================================================================================================================================================================== Python Version ``>=3.11,<3.14`` Required for users ``IPython>=8.0.0``, ``PyERFA>=2.0.0``, ``PyYAML>=6.0``, ``astropy>=7.0``, ``bottleneck``, ``configobj>=5.0.6``, ``fast-histogram>=0.11``, ``ginga>=5.4.0``, ``linetools>=0.3.2``, ``matplotlib>=3.7``, ``numpy>=2.0``, ``packaging>=22.0``, ``pygithub``, ``pyqt6``, ``qtpy>=2.2.0``, ``scikit-learn>=1.2``, ``scipy>=1.9``, ``setuptools<81`` -Required for developers ``coverage``, ``docutils<0.22``, ``psutil``, ``pygit2``, ``pytest-astropy``, ``pytest-cov``, ``pytest-qt``, ``pytest>=7.0.0``, ``scikit-image>=0.23``, ``specutils>=2.0``, ``sphinx-automodapi``, ``sphinx-design``, ``sphinx>=1.6,<8``, ``sphinx_rtd_theme==2.0.0``, ``tox`` +Required for developers ``coverage``, ``docutils<0.22``, ``psutil``, ``pygit2``, ``pytest-astropy``, ``pytest-cov``, ``pytest-qt``, ``pytest>=7.0.0``, ``scikit-image>=0.23``, ``specutils>=2.0``, ``sphinx-autodoc-typehints>3.2``, ``sphinx-automodapi``, ``sphinx-design``, ``sphinx>6``, ``sphinx_rtd_theme==3.0.0``, ``tox`` ======================= ============================================================================================================================================================================================================================================================================================================================================== diff --git a/pypeit/calibframe.py b/pypeit/calibframe.py index 316673f139..ab0b51cbb4 100644 --- a/pypeit/calibframe.py +++ b/pypeit/calibframe.py @@ -93,7 +93,7 @@ def set_paths(self, odir, setup, calib_id, detname): :attr:`calib_id`, and :attr:`calib_key`. Args: - odir (:obj:`str`, `Path`_): + odir (:obj:`str`, :class:`~pathlib.Path`): Output directory for the processed calibration frames setup (:obj:`str`): The string identifier for the instrument setup/configuration; diff --git a/pypeit/logger.py b/pypeit/logger.py index 984383c203..ea3a52614e 100644 --- a/pypeit/logger.py +++ b/pypeit/logger.py @@ -12,7 +12,6 @@ import re import sys import traceback -from typing import Optional, List import warnings from IPython import embed @@ -39,9 +38,9 @@ def short_warning(message, category, filename, lineno, file=None, line=None): def color_text( text:str, - color:List[int], - bold:Optional[bool] = False, - nchar:Optional[int] = None + color:list[int], + bold:bool = False, + nchar:int | None = None ) -> str: """ Return an input string with escape characters to colorize text written to @@ -154,9 +153,9 @@ class PypeItLogger(logging.Logger): def init(self, level: int = logging.INFO, - stream: Optional[io.TextIOBase] = None, - log_file: Optional[str | Path] = None, - log_file_level: Optional[int] = None, + stream: io.TextIOBase | None = None, + log_file: str | Path | None = None, + log_file_level: int | None = None, ): """ Initialise the logger. @@ -332,9 +331,9 @@ def close_file(self): # add them as parameters here as well. def get_logger( level: int = logging.INFO, - stream: Optional[io.TextIOBase] = None, - log_file: Optional[str | Path] = None, - log_file_level: Optional[int] = None, + stream: io.TextIOBase | None = None, + log_file: str | Path | None = None, + log_file_level: int | None = None, ) -> PypeItLogger: """ Instantiate a new logger. diff --git a/pypeit/specobjs.py b/pypeit/specobjs.py index 7f80037ef4..541493b8fd 100644 --- a/pypeit/specobjs.py +++ b/pypeit/specobjs.py @@ -6,8 +6,6 @@ """ import os from pathlib import Path -import re -from typing import List from IPython import embed @@ -45,14 +43,11 @@ class SpecObjs: specobjs. Args: - specobjs (`numpy.ndarray`_, list, optional): + specobjs (:class:`numpy.ndarray`, :class:`list`, optional): One or more :class:`~pypeit.specobj.SpecObj` objects - header (`astropy.io.fits.Header`_, optional): + header (:class:`~astropy.io.fits.Header`, optional): Baseline header to use - Attributes: - summary (`astropy.table.Table`_): - Summary table (?) """ version = '1.0.0' @@ -1023,11 +1018,13 @@ def write_info(self, outfile, pypeline): # Write obj_tbl.write(outfile,format='ascii.fixed_width', overwrite=True) - def get_extraction_groups(self, model_full_slit=False) -> List[List[int]]: + def get_extraction_groups(self, model_full_slit=False) -> list[list[int]]: """ - Returns: - List[List[int]]: A list of extraction groups, each of which is a list of integer - object indices that should be extracted together by core.skysub.local_skysub_extract + Returns + ------- + A list of extraction groups, each of which is a list of integer + object indices that should be extracted together by + core.skysub.local_skysub_extract """ nobj = len(self.specobjs) diff --git a/pypeit/spectrographs/p200_dbsp.py b/pypeit/spectrographs/p200_dbsp.py index 7f21fd5dfd..18b7b981f6 100644 --- a/pypeit/spectrographs/p200_dbsp.py +++ b/pypeit/spectrographs/p200_dbsp.py @@ -3,8 +3,6 @@ .. include:: ../include/links.rst """ -from typing import List, Optional - import numpy as np from astropy.io import fits @@ -61,7 +59,7 @@ def init_meta(self): # Lamps self.meta['lampstat01'] = dict(ext=0, card='LAMPS') - def compound_meta(self, headarr: List[fits.Header], meta_key: str): + def compound_meta(self, headarr: list[fits.Header], meta_key: str): """ Methods to generate metadata requiring interpretation of the header data, instead of simply reading the value of a header card. @@ -192,7 +190,7 @@ class P200DBSPBlueSpectrograph(P200DBSPSpectrograph): supported = True comment = 'Blue camera' - def compound_meta(self, headarr: List[fits.Header], meta_key: str): + def compound_meta(self, headarr: list[fits.Header], meta_key: str): """ Methods to generate metadata requiring interpretation of the header data, instead of simply reading the value of a header card. @@ -217,7 +215,7 @@ def compound_meta(self, headarr: List[fits.Header], meta_key: str): return parse.binning2string(binspec, binspatial) raise PypeItError(f"Not ready for this compound meta: {meta_key}") - def get_detector_par(self, det: int, hdu: Optional[fits.HDUList] = None): + def get_detector_par(self, det: int, hdu: fits.HDUList | None = None): """ Return metadata for the selected detector. @@ -415,7 +413,7 @@ class P200DBSPRedSpectrograph(P200DBSPSpectrograph): supported = True comment = 'Red camera' - def compound_meta(self, headarr: List[fits.Header], meta_key: str): + def compound_meta(self, headarr: list[fits.Header], meta_key: str): """ Methods to generate metadata requiring interpretation of the header data, instead of simply reading the value of a header card. @@ -441,7 +439,7 @@ def compound_meta(self, headarr: List[fits.Header], meta_key: str): else: raise PypeItError(f"Not ready for this compound meta: {meta_key}") - def get_detector_par(self, det: int, hdu: Optional[fits.HDUList] = None): + def get_detector_par(self, det: int, hdu: fits.HDUList | None = None): """ Return metadata for the selected detector. diff --git a/pypeit/spectrographs/p200_ngps.py b/pypeit/spectrographs/p200_ngps.py index fe6a2ebe90..12cebdd84f 100644 --- a/pypeit/spectrographs/p200_ngps.py +++ b/pypeit/spectrographs/p200_ngps.py @@ -3,8 +3,6 @@ .. include:: ../include/links.rst """ -from typing import List, Optional - import numpy as np from astropy.io import fits @@ -178,7 +176,7 @@ def get_rawimage(self, raw_file, det): - def compound_meta(self, headarr: List[fits.Header], meta_key: str): + def compound_meta(self, headarr: list[fits.Header], meta_key: str): """ Methods to generate metadata requiring interpretation of the header data, instead of simply reading the value of a header card. @@ -222,7 +220,7 @@ def compound_meta(self, headarr: List[fits.Header], meta_key: str): raise PypeItError(f"Not ready for this compound meta: {meta_key}") - def get_detector_par(self, det: int, hdu: Optional[fits.HDUList] = None): + def get_detector_par(self, det: int, hdu: fits.HDUList | None = None): """ Return metadata for the selected detector. @@ -347,7 +345,7 @@ def get_rawimage(self, raw_file, det): # Pull image from detector 2 return super().get_rawimage(raw_file, det=2, sec_includes_binning=True) - def compound_meta(self, headarr: List[fits.Header], meta_key: str): + def compound_meta(self, headarr: list[fits.Header], meta_key: str): """ Methods to generate metadata requiring interpretation of the header data, instead of simply reading the value of a header card. @@ -391,7 +389,7 @@ def compound_meta(self, headarr: List[fits.Header], meta_key: str): raise PypeItError("Not ready for this compound meta: ", meta_key) - def get_detector_par(self, det: int, hdu: Optional[fits.HDUList] = None): + def get_detector_par(self, det: int, hdu: fits.HDUList | None = None): """ Return metadata for the selected detector. diff --git a/pyproject.toml b/pyproject.toml index 56eb5fdfb1..cf2e3b1be4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -68,11 +68,12 @@ test = [ "coverage", ] docs = [ - "sphinx>=1.6,<8", + "sphinx>6", "docutils<0.22", "sphinx-automodapi", "sphinx-design", - "sphinx_rtd_theme==2.0.0", + "sphinx_rtd_theme==3.0.0", + "sphinx-autodoc-typehints>3.2", ] devsuite = [ "psutil", @@ -87,11 +88,12 @@ dev = [ "tox", "pytest-cov", "coverage", - "sphinx>=1.6,<8", + "sphinx>6", "docutils<0.22", "sphinx-automodapi", "sphinx-design", - "sphinx_rtd_theme==2.0.0", + "sphinx_rtd_theme==3.0.0", + "sphinx-autodoc-typehints>3.2", "psutil", "pytest-qt", ] From f8c00f48259cf8767a7ad8ec7f69bed52f57d136 Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Wed, 22 Oct 2025 15:56:12 -0700 Subject: [PATCH 21/33] short warning doc string and doc update --- doc/pypeit_par.rst | 62 +++++++++++++++++++++++----------------------- pypeit/logger.py | 9 ++++--- 2 files changed, 37 insertions(+), 34 deletions(-) diff --git a/doc/pypeit_par.rst b/doc/pypeit_par.rst index 62c16a4548..36407566ec 100644 --- a/doc/pypeit_par.rst +++ b/doc/pypeit_par.rst @@ -594,21 +594,21 @@ Collate1DPar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.Collate1DPar` -========================= =============== ======= ======================================= ================================================================================================================================================================================================================================================================================================================================================================================================================== -Key Type Options Default Description -========================= =============== ======= ======================================= ================================================================================================================================================================================================================================================================================================================================================================================================================== -``dry_run`` bool .. False If set, the script will display the matching File and Object Ids but will not flux, coadd or archive. -``exclude_serendip`` bool .. False Whether to exclude SERENDIP objects from collating. -``exclude_slit_trace_bm`` list, str .. A list of slit trace bitmask bits that should be excluded. -``flux`` bool .. False If set, the script will flux calibrate using archived sensfuncs before coadding. -``ignore_flux`` bool .. False If set, the script will only coadd non-fluxed spectra even if flux data is present. Otherwise fluxed spectra are coadded if all spec1ds have been fluxed calibrated. -``match_using`` str .. ``ra/dec`` Determines how 1D spectra are matched as being the same object. Must be either 'pixel' or 'ra/dec'. -``outdir`` str .. ``/Users/dpelliccia/PypeIt/PypeIt/doc`` The path where all coadded output files and report files will be placed. -``refframe`` str .. .. Perform reference frame correction prior to coadding. Options are: observed, heliocentric, barycentric -``spec1d_outdir`` str .. .. The path where all modified spec1d files are placed. These are only created if flux calibration or refframe correction are asked for. -``tolerance`` str, float, int .. 1.0 The tolerance used when comparing the coordinates of objects. If two objects are within this distance from each other, they are considered the same object. If match_using is 'ra/dec' (the default) this is an angular distance. The defaults units are arcseconds but other units supported by astropy.coordinates.Angle can be used (`e.g.`, '0.003d' or '0h1m30s'). If match_using is 'pixel' this is a float. -``wv_rms_thresh`` float .. .. If set, any objects with a wavelength RMS > this value are skipped, else all wavelength RMS values are accepted. -========================= =============== ======= ======================================= ================================================================================================================================================================================================================================================================================================================================================================================================================== +========================= =============== ======= ============================================ ================================================================================================================================================================================================================================================================================================================================================================================================================== +Key Type Options Default Description +========================= =============== ======= ============================================ ================================================================================================================================================================================================================================================================================================================================================================================================================== +``dry_run`` bool .. False If set, the script will display the matching File and Object Ids but will not flux, coadd or archive. +``exclude_serendip`` bool .. False Whether to exclude SERENDIP objects from collating. +``exclude_slit_trace_bm`` list, str .. A list of slit trace bitmask bits that should be excluded. +``flux`` bool .. False If set, the script will flux calibrate using archived sensfuncs before coadding. +``ignore_flux`` bool .. False If set, the script will only coadd non-fluxed spectra even if flux data is present. Otherwise fluxed spectra are coadded if all spec1ds have been fluxed calibrated. +``match_using`` str .. ``ra/dec`` Determines how 1D spectra are matched as being the same object. Must be either 'pixel' or 'ra/dec'. +``outdir`` str .. ``/Users/westfall/Work/packages/pypeit/doc`` The path where all coadded output files and report files will be placed. +``refframe`` str .. .. Perform reference frame correction prior to coadding. Options are: observed, heliocentric, barycentric +``spec1d_outdir`` str .. .. The path where all modified spec1d files are placed. These are only created if flux calibration or refframe correction are asked for. +``tolerance`` str, float, int .. 1.0 The tolerance used when comparing the coordinates of objects. If two objects are within this distance from each other, they are considered the same object. If match_using is 'ra/dec' (the default) this is an angular distance. The defaults units are arcseconds but other units supported by astropy.coordinates.Angle can be used (`e.g.`, '0.003d' or '0h1m30s'). If match_using is 'pixel' this is a float. +``wv_rms_thresh`` float .. .. If set, any objects with a wavelength RMS > this value are skipped, else all wavelength RMS values are accepted. +========================= =============== ======= ============================================ ================================================================================================================================================================================================================================================================================================================================================================================================================== ---- @@ -661,22 +661,22 @@ ReduxPar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.ReduxPar` -====================== ============== ======= ======================================= ========================================================================================================================================================================================================================================================================================================================================================================================================== -Key Type Options Default Description -====================== ============== ======= ======================================= ========================================================================================================================================================================================================================================================================================================================================================================================================== -``calwin`` int, float .. 0 The window of time in hours to search for calibration frames for a science frame -``chk_version`` bool .. True If True enforce strict PypeIt version checking to ensure that all files were created with the current version of PypeIt. If set to False, the code will attempt to read out-of-date files and keep going. Beware (!!) that this can lead to unforeseen bugs that either cause the code to crash or lead to erroneous results. I.e., you really need to know what you are doing if you set this to False! -``detnum`` int, list .. .. Restrict reduction to a list of detector indices. In case of mosaic reduction (currently only available for Gemini/GMOS and Keck/DEIMOS) ``detnum`` should be a list of tuples of the detector indices that are mosaiced together. E.g., for Gemini/GMOS ``detnum`` would be ``[(1,2,3)]`` and for Keck/DEIMOS it would be ``[(1, 5), (2, 6), (3, 7), (4, 8)]`` -``ignore_bad_headers`` bool .. False Ignore bad headers (NOT recommended unless you know it is safe). -``maskIDs`` str, int, list .. .. Restrict reduction to a set of slitmask IDs Example syntax -- ``maskIDs = 818006,818015`` This must be used with detnum (for now). -``qadir`` str .. ``QA`` Directory relative to calling directory to write quality assessment files. -``quicklook`` bool .. False Run a quick look reduction? This is usually good if you want to quickly reduce the data (usually at the telescope in real time) to get an initial estimate of the data quality. -``redux_path`` str .. ``/Users/dpelliccia/PypeIt/PypeIt/doc`` Path to folder for performing reductions. Default is the current working directory. -``scidir`` str .. ``Science`` Directory relative to calling directory to write science files. -``slitspatnum`` str, list .. .. Restrict reduction to a set of slit DET:SPAT values (closest slit is used). Example syntax -- slitspatnum = DET01:175,DET01:205 or MSC02:2234 If you are re-running the code, (i.e. modifying one slit) you *must* have the precise SPAT_ID index. -``sortroot`` str .. .. A filename given to output the details of the sorted files. If None, the default is the root name of the pypeit file. If off, no output is produced. -``spectrograph`` str .. .. Spectrograph that provided the data to be reduced. See :ref:`instruments` for valid options. -====================== ============== ======= ======================================= ========================================================================================================================================================================================================================================================================================================================================================================================================== +====================== ============== ======= ============================================ ========================================================================================================================================================================================================================================================================================================================================================================================================== +Key Type Options Default Description +====================== ============== ======= ============================================ ========================================================================================================================================================================================================================================================================================================================================================================================================== +``calwin`` int, float .. 0 The window of time in hours to search for calibration frames for a science frame +``chk_version`` bool .. True If True enforce strict PypeIt version checking to ensure that all files were created with the current version of PypeIt. If set to False, the code will attempt to read out-of-date files and keep going. Beware (!!) that this can lead to unforeseen bugs that either cause the code to crash or lead to erroneous results. I.e., you really need to know what you are doing if you set this to False! +``detnum`` int, list .. .. Restrict reduction to a list of detector indices. In case of mosaic reduction (currently only available for Gemini/GMOS and Keck/DEIMOS) ``detnum`` should be a list of tuples of the detector indices that are mosaiced together. E.g., for Gemini/GMOS ``detnum`` would be ``[(1,2,3)]`` and for Keck/DEIMOS it would be ``[(1, 5), (2, 6), (3, 7), (4, 8)]`` +``ignore_bad_headers`` bool .. False Ignore bad headers (NOT recommended unless you know it is safe). +``maskIDs`` str, int, list .. .. Restrict reduction to a set of slitmask IDs Example syntax -- ``maskIDs = 818006,818015`` This must be used with detnum (for now). +``qadir`` str .. ``QA`` Directory relative to calling directory to write quality assessment files. +``quicklook`` bool .. False Run a quick look reduction? This is usually good if you want to quickly reduce the data (usually at the telescope in real time) to get an initial estimate of the data quality. +``redux_path`` str .. ``/Users/westfall/Work/packages/pypeit/doc`` Path to folder for performing reductions. Default is the current working directory. +``scidir`` str .. ``Science`` Directory relative to calling directory to write science files. +``slitspatnum`` str, list .. .. Restrict reduction to a set of slit DET:SPAT values (closest slit is used). Example syntax -- slitspatnum = DET01:175,DET01:205 or MSC02:2234 If you are re-running the code, (i.e. modifying one slit) you *must* have the precise SPAT_ID index. +``sortroot`` str .. .. A filename given to output the details of the sorted files. If None, the default is the root name of the pypeit file. If off, no output is produced. +``spectrograph`` str .. .. Spectrograph that provided the data to be reduced. See :ref:`instruments` for valid options. +====================== ============== ======= ============================================ ========================================================================================================================================================================================================================================================================================================================================================================================================== ---- diff --git a/pypeit/logger.py b/pypeit/logger.py index ea3a52614e..6ad47b1cb9 100644 --- a/pypeit/logger.py +++ b/pypeit/logger.py @@ -16,11 +16,14 @@ from IPython import embed +# NOTE: BEWARE of importing anything from pypeit into this module. It is likely +# to cause a circular import. + # TODO: Can we put this *inside* the logger? -def short_warning(message, category, filename, lineno, file=None, line=None): +def short_warning(message, category, filename, lineno, line=None): """ - Formatter for warning messages. Shortens default output to just the warning - type and warning message. + Overrides default formatting of warning messages. The only arguments used + are ``message`` and ``category``. See :func:`warnings.formatwarning`. """ return f'{category.__name__}: {message}' warnings.formatwarning = short_warning From 5df5a9da165dd603ccac931fd1ec9f7c9a3f268b Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Tue, 18 Nov 2025 15:47:39 -0800 Subject: [PATCH 22/33] test fixes --- pypeit/metadata.py | 2 +- pypeit/spectrographs/ldt_deveny.py | 4 ++-- pypeit/spectrographs/spectrograph.py | 4 ++-- pypeit/tests/test_metadata.py | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pypeit/metadata.py b/pypeit/metadata.py index ac391abf42..689db715f7 100644 --- a/pypeit/metadata.py +++ b/pypeit/metadata.py @@ -1989,5 +1989,5 @@ def get_row_for_filename(self, filename:str) -> table.Table: """ idx = self.table['filename'] == Path(filename).name if not any(idx): - msgs.error(f"Requested file {filename} not in the metadata table.") + raise PypeItError(f"Requested file {filename} not in the metadata table.") return self.table[idx].copy() diff --git a/pypeit/spectrographs/ldt_deveny.py b/pypeit/spectrographs/ldt_deveny.py index 5adbe68e78..2bf2edcf2f 100644 --- a/pypeit/spectrographs/ldt_deveny.py +++ b/pypeit/spectrographs/ldt_deveny.py @@ -564,10 +564,10 @@ def config_specific_par( case 'DV10 (2160/5000)': # Presently unsupported; no parameter changes - msgs.warn("The DV10 grating is not present supported; no config-specific pars set!") + log.warning("The DV10 grating is not present supported; no config-specific pars set!") case _: - msgs.warn("No recognized grating passed; no config-specific pars set!") + log.warning("No recognized grating passed; no config-specific pars set!") # Adjust parameters based on CCD binning bin_spec, bin_spat = parse.parse_binning(binning) diff --git a/pypeit/spectrographs/spectrograph.py b/pypeit/spectrographs/spectrograph.py index 5c0d6759d2..48fd4c378f 100644 --- a/pypeit/spectrographs/spectrograph.py +++ b/pypeit/spectrographs/spectrograph.py @@ -234,7 +234,7 @@ def config_specific_par( adjusted for configuration specific parameter values. """ if inp is None: - msgs.error("You have not included a standard or science file in your PypeIt file to determine the configuration") + raise PypeItError("You have not included a standard or science file in your PypeIt file to determine the configuration") return self.__class__.default_pypeit_par() if inp_par is None else inp_par def update_edgetracepar(self, par): @@ -493,7 +493,7 @@ def subheader_for_spec(self, row_fitstbl, raw_header, extra_header_cards=None, try: subheader[key] = row_fitstbl[key] except KeyError: - msgs.error( + raise PypeItError( f"Required SlicerIFU keyword {key} not present in your fitstbl/Header" ) diff --git a/pypeit/tests/test_metadata.py b/pypeit/tests/test_metadata.py index 3c1f89e774..c8d6a940a4 100644 --- a/pypeit/tests/test_metadata.py +++ b/pypeit/tests/test_metadata.py @@ -14,7 +14,7 @@ from pypeit.scripts.setup import Setup from pypeit.inputfiles import PypeItFile from astropy.table import Table -from pypeit.pypmsgs import PypeItError +from pypeit import PypeItError def test_read_combid(): From 08b8038cd74ffc411cfbd7d4d7778dc74633f4db Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Fri, 21 Nov 2025 13:09:06 -0800 Subject: [PATCH 23/33] rm pypmsgs --- pypeit/pypmsgs.py | 411 ---------------------------------------------- 1 file changed, 411 deletions(-) delete mode 100644 pypeit/pypmsgs.py diff --git a/pypeit/pypmsgs.py b/pypeit/pypmsgs.py deleted file mode 100644 index 4693b41fa0..0000000000 --- a/pypeit/pypmsgs.py +++ /dev/null @@ -1,411 +0,0 @@ -""" -Module for terminal and file logging. - -.. todo:: - Why not use pythons native logging package? - -""" -import datetime -import inspect -import io -import os -import sys - -# Imported for versioning -import astropy -import numpy -import scipy - -import pypeit -from pypeit.core.qa import close_qa -from pypeit import pypeit_user - -#pypeit_logger = None - -# Alphabetical list of developers -developers = ['ema', 'joe', 'milvang', 'rcooke', 'thsyu', 'xavier'] - - -class PypeItError(Exception): - pass - -class PypeItBitMaskError(PypeItError): - pass - -class PypeItDataModelError(PypeItError): - pass - -class PypeItPathError(PypeItError): - pass - - -class Messages: - """ - Create coloured text for messages printed to screen. - - For further details on colours see the following example: - http://ascii-table.com/ansi-escape-sequences.php - - Parameters - ---------- - log : str or file-like object,optional - Name of saved log file (no log will be saved if log==""). If None, no - log is saved. - verbosity : int - Level of verbosity. Options are - - 0 = No output - - 1 = Minimal output - - 2 = All output (default) - colors : bool - If true, the screen output will have colors, otherwise normal screen - output will be displayed - """ - def __init__(self, log=None, verbosity=None, colors=True): - - # Initialize other variables - self._defverb = 1 - - if pypeit_user in developers: - self._defverb = 2 - self._verbosity = self._defverb if verbosity is None else verbosity - - # TODO: Why are these two necessary? It would seem better to - # provide Messages with member functions that can operate on - # sciexp and pypeit_file instead of having them kept within the - # object itself... - self.sciexp = None - self.pypeit_file = None - self.qa_path = None - - # Initialize the log - self._log_to_stderr = self._verbosity != 0 - self._log = None - self._initialize_log_file(log=log) - - # Use colors? - self._start = None - self._end = None - self._black_CL = None - self._yellow_CL = None - self._blue_CL = None - self._green_CL = None - self._red_CL = None - self._white_RD = None - self._white_GR = None - self._white_BK = None - self._white_BL = None - self._black_YL = None - self._yellow_BK = None - - self.disablecolors() - if colors: - self.enablecolors() - - def _cleancolors(self, msg): - cols = [self._end, self._start, - self._black_CL, self._yellow_CL, self._blue_CL, self._green_CL, self._red_CL, - self._white_RD, self._white_GR, self._white_BK, self._white_BL, - self._black_YL, self._yellow_BK] - for i in cols: - msg = msg.replace(i, '') - return msg - - def _devmsg(self): - if self._verbosity == 2: - info = inspect.getouterframes(inspect.currentframe())[3] - devmsg = self._start + self._blue_CL + info[1].split('/')[-1] + ' ' + str(info[2]) \ - + ' ' + info[3] + '()' + self._end + ' - ' - else: - devmsg = '' - return devmsg - - def _print(self, premsg, msg, last=True, printDevMsg=True): - """ - Print to standard error and the log file - """ - devmsg = self._devmsg() if printDevMsg else '' - _msg = premsg+devmsg+msg - if self._log_to_stderr != 0: - print(_msg, file=sys.stderr) - if self._log: - clean_msg = self._cleancolors(_msg) - self._log.write(clean_msg+'\n' if last else clean_msg) - - def _initialize_log_file(self, log=None): - """ - Expects self._log is already None. - """ - - if log is None: - return - - self._log = log if isinstance(log, io.IOBase) else open(log, 'w') - - - self._log.write("------------------------------------------------------\n\n") - self._log.write("This log was generated with version {0:s} of PypeIt\n\n".format( - pypeit.__version__)) - self._log.write("You are using scipy version={:s}\n".format(scipy.__version__)) - self._log.write("You are using numpy version={:s}\n".format(numpy.__version__)) - self._log.write("You are using astropy version={:s}\n\n".format(astropy.__version__)) - self._log.write("------------------------------------------------------\n\n") - - def reset(self, log=None, verbosity=None, colors=True, log_to_stderr=None): - """ - Reinitialize the object. - - Needed so that there can be a default object for all modules, - but also a dynamically defined log file. - """ - # Initialize other variables - self._verbosity = self._defverb if verbosity is None else verbosity - if log_to_stderr is None: - self._log_to_stderr = self._verbosity != 0 - else: - self._log_to_stderr = log_to_stderr - - self.reset_log_file(log) - self.disablecolors() - if colors: - self.enablecolors() - - def reset_log_file(self, log): - if self._log: - self._log.close() - self._log = None - self._initialize_log_file(log=log) - - def close(self): - ''' - Close the log file before the code exits - ''' - close_qa(self.pypeit_file, self.qa_path) - return self.reset_log_file(None) - - def error(self, msg, cls='PypeItError'): - """ - Print an error message - """ - premsg = '\n'+self._start + self._white_RD + '[ERROR] ::' + self._end + ' ' - self._print(premsg, msg) - - # Close QA plots - close_qa(self.pypeit_file, self.qa_path) - - # TODO: This will go away when we merge in the logging PR - raise eval(cls)(msg) - - - def info(self, msg): - """ - Print an information message - """ - premsg = self._start + self._green_CL + '[INFO] ::' + self._end + ' ' - self._print(premsg, msg) - - def info_update(self, msg, last=False): - """ - Print an information message that needs to be updated - """ - premsg = '\r' + self._start + self._green_CL + '[INFO] ::' + self._end + ' ' - self._print(premsg, msg, last=last) - - def test(self, msg): - """ - Print a test message - """ - if self._verbosity == 2: - premsg = self._start + self._white_BL + '[TEST] ::' + self._end + ' ' - self._print(premsg, msg) - - def warn(self, msg): - """ - Print a warning message - """ - premsg = self._start + self._red_CL + '[WARNING] ::' + self._end + ' ' - self._print(premsg, msg) - - def bug(self, msg): - """ - Print a bug message - """ - premsg = self._start + self._white_BK + '[BUG] ::' + self._end + ' ' - self._print(premsg, msg) - - def work(self, msg): - """ - Print a work in progress message - """ - if self._verbosity == 2: - premsgp = self._start + self._black_CL + '[WORK IN ]::' + self._end + '\n' - premsgs = self._start + self._yellow_CL + '[PROGRESS]::' + self._end + ' ' - self._print(premsgp+premsgs, msg) - - def pypeitpar_text(self, msglist): - """ - Prepare a text string with the pypeit par formatting. - - Parameters - ---------- - msglist: list - A list containing the pypeit parameter strings. The last element of - the list must be the argument and the variable. For example, to - print: - - .. code-block:: ini - - [sensfunc] - [[UVIS]] - polycorrect = False - - you should set ``msglist = ['sensfunc', 'UVIS', 'polycorrect = False']``. - - Returns - ------- - parstring : str - The parameter string - """ - parstring = '\n' - premsg = ' ' - for ll, lin in enumerate(msglist): - thismsg = ll*' ' - if ll == len(msglist)-1: - thismsg += lin - else: - thismsg += (ll+1) * '[' + lin + (ll+1) * ']' - parstring += premsg + thismsg + '\n' - return parstring - - def pypeitpar(self, msglist): - """ - Print a message with the pypeit par formatting. - - Parameters - ---------- - msglist: list - A list containing the pypeit parameter strings. The last element of - the list must be the argument and the variable. For example, to - print: - - .. code-block:: ini - - [sensfunc] - [[UVIS]] - polycorrect = False - - you should set ``msglist = ['sensfunc', 'UVIS', 'polycorrect = False']``. - - """ - premsg = ' ' - for ll, lin in enumerate(msglist): - thismsg = ll*' ' - if ll == len(msglist)-1: - thismsg += lin - else: - thismsg += (ll+1) * '[' + lin + (ll+1) * ']' - self._print(premsg, thismsg, printDevMsg=False) - - def prindent(self, msg): - """ - Print an indent - """ - premsg = ' ' - self._print(premsg, msg) - - def input(self): - """ - Return a text string to be used to display input required from the user - """ - premsg = self._start + self._blue_CL + '[INPUT] ::' + self._end + ' ' - return premsg - - @staticmethod - def newline(): - """ - Return a text string containing a newline to be used with messages - """ - return '\n ' - - @staticmethod - def indent(): - """ - Return a text string containing an indent to be used with messages - """ - return ' ' - - # Set the colors - def enablecolors(self): - """ - Enable colored output text - """ - - # Start and end coloured text - self._start = '\x1B[' - self._end = '\x1B[' + '0m' - - # Clear Backgrounds - self._black_CL = '1;30m' - self._yellow_CL = '1;33m' - self._blue_CL = '1;34m' - self._green_CL = '1;32m' - self._red_CL = '1;31m' - - # Coloured Backgrounds - self._white_RD = '1;37;41m' - self._white_GR = '1;37;42m' - self._white_BK = '1;37;40m' - self._white_BL = '1;37;44m' - self._black_YL = '1;37;43m' - self._yellow_BK = '1;33;40m' - - def disablecolors(self): - """ - Disable colored output text - """ - - # Start and end coloured text - self._start = '' - self._end = '' - - # Clear Backgrounds - self._black_CL = '' - self._yellow_CL = '' - self._blue_CL = '' - self._green_CL = '' - self._red_CL = '' - - # Coloured Backgrounds - self._white_RD = '' - self._white_GR = '' - self._white_BK = '' - self._white_BL = '' - self._black_YL = '' - self._yellow_BK = '' - - def set_logfile_and_verbosity(self, scriptname, verbosity): - """ - Set the logfile name and verbosity level for a script run. - - PypeIt scripts (with the exception of run_pypeit) default to verbosity - level = 1. For certain scripts, having a more verbose output (with an - accompanying log file) would be helpful for debugging purposes. This - function provides the ability to set the ``msgs`` verbosity and create - a log file for those certain scripts. - - Log filenames have the form scriptname_YYYYMMDD_HHMM.log to differentiate - between different runs of the script. Timestamp is UT. - - Args: - scriptname (:obj:`str`, optional): - The name of the calling script for use in the logfile - verbosity (:obj:`int`, optional): - The requested verbosity, passed in from the argument parser. - Verbosity level between 0 [none] and 2 [all] - """ - # Create a UT timestamp (to the minute) for the log filename - timestamp = datetime.datetime.now(datetime.UTC).strftime("%Y%m%d-%H%M") - # Create a logfile only if verbosity == 2 - logname = f"{scriptname}_{timestamp}.log" if verbosity == 2 else None - # Set the verbosity in msgs - self.reset(log=logname, verbosity=verbosity) - From 349e6c28193b6c702796765e8ccb5f797bf543c3 Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Fri, 21 Nov 2025 13:09:42 -0800 Subject: [PATCH 24/33] clean up msgs --- pypeit/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pypeit/utils.py b/pypeit/utils.py index 8f790752aa..5941e05834 100644 --- a/pypeit/utils.py +++ b/pypeit/utils.py @@ -1083,7 +1083,7 @@ def smooth(x, window_len, window='flat'): case 'blackman': w = np.blackman(window_len) case _: - msgs.error(f'Unknown window type passed to smooth(): {window}') + raise PypeItError(f'Unknown window type passed to smooth(): {window}') y = np.convolve(w / w.sum(), s, mode='same') From a7b9fff265bac5686ad5aec5fb259286f872b35a Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Tue, 9 Dec 2025 09:48:39 -0800 Subject: [PATCH 25/33] msgs -> log --- pypeit/scripts/rectify_2dspec.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/pypeit/scripts/rectify_2dspec.py b/pypeit/scripts/rectify_2dspec.py index c9b52504bf..305a925b5f 100644 --- a/pypeit/scripts/rectify_2dspec.py +++ b/pypeit/scripts/rectify_2dspec.py @@ -11,7 +11,9 @@ import matplotlib.pyplot as plt from pypeit.scripts import scriptbase -from pypeit import spec2dobj, specobjs, msgs +from pypeit import spec2dobj +from pypeit import specobjs +from pypeit import log from pypeit.core import coadd from pypeit.core.wavecal import wvutils from pypeit.core.moment import moment1d @@ -43,7 +45,7 @@ def main(args): chk_version = not args.try_old for spec2file in args.files: - msgs.info(f'Processing file: {spec2file}') + log.info(f'Processing file: {spec2file}') # Get list of detectors hdr = fits.getheader(spec2file) detnames = hdr['HIERARCH ALLSPEC2D_DETS'].split(',') @@ -52,7 +54,7 @@ def main(args): hdu_list = [fits.PrimaryHDU()] for detname in detnames: - msgs.info(f'DETECTOR: {detname}') + log.info(f'DETECTOR: {detname}') spec2d = spec2dobj.Spec2DObj.from_file(spec2file, detname, chk_version=chk_version) pad = 10 # pixels to pad on each side slitmask = spec2d.slits.slit_img(pad=pad, flexure=spec2d.sci_spat_flexure) @@ -94,8 +96,8 @@ def main(args): if len(waves) == 0: - msgs.warn(f'There is a problem with the wavelengths on det {detname}. ' - f'The RECTIFIED 2D spectral image will not be created.') + log.warning(f'There is a problem with the wavelengths on det {detname}. ' + 'The RECTIFIED 2D spectral image will not be created.') continue wmax = np.ceil(spec2d.waveimg[spec2d.waveimg>0].max()) wmin = np.floor(spec2d.waveimg[spec2d.waveimg>0].min()) @@ -109,9 +111,9 @@ def main(args): # check if this slit was masked. If so, skip it. slitord_id = slitord_ids[slitidx] if not np.any(this_mask): - msgs.warn(f'Slit/order {slitord_id} on {detname} is fully masked. Skipping it.') + log.warn(f'Slit/order {slitord_id} on {detname} is fully masked. Skipping it.') continue - msgs.info(f'Rectifying slit/order {slitord_id}') + log.info(f'Rectifying slit/order {slitord_id}') slit_cen = spec2d.slits.center[:,slitidx] mask = spec2d.bpmmask.mask == 0 @@ -211,4 +213,4 @@ def main(args): out_file = spec2file.replace('spec2d', 'rectified_spec2d') hdulist = fits.HDUList(hdu_list) hdulist.writeto(out_file, overwrite=True) - msgs.info(f'Rectified images saved to {out_file}') \ No newline at end of file + log.info(f'Rectified images saved to {out_file}') \ No newline at end of file From f03390c9ce5f3f889bbc902503afd8255f659126 Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Tue, 9 Dec 2025 13:07:38 -0800 Subject: [PATCH 26/33] PR comments --- doc/conf.py | 1 + doc/pypeit_par.rst | 62 ++++++++++++++--------------- pypeit/coadd2d.py | 14 +++---- pypeit/core/flat.py | 1 + pypeit/core/wave.py | 3 -- pypeit/exceptions.py | 2 +- pypeit/images/detector_container.py | 1 - pypeit/par/parset.py | 12 ++++-- pypeit/par/util.py | 2 - pypeit/scripts/scriptbase.py | 8 ---- 10 files changed, 49 insertions(+), 57 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index 3cfb12c8ee..090ae77a67 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -357,6 +357,7 @@ def get_package_version(): intersphinx_mapping = { 'python': ('https://docs.python.org/3', None), 'numpy': ('https://numpy.org/doc/stable/', None), + 'matplotlib': ('https://matplotlib.org/stable/', None), 'scipy': ('https://docs.scipy.org/doc/scipy/', None), 'astropy': ('https://docs.astropy.org/en/stable/', None), } diff --git a/doc/pypeit_par.rst b/doc/pypeit_par.rst index e356987760..c1a71a66be 100644 --- a/doc/pypeit_par.rst +++ b/doc/pypeit_par.rst @@ -594,21 +594,21 @@ Collate1DPar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.Collate1DPar` -========================= =============== ======= ============================================ ================================================================================================================================================================================================================================================================================================================================================================================================================== -Key Type Options Default Description -========================= =============== ======= ============================================ ================================================================================================================================================================================================================================================================================================================================================================================================================== -``dry_run`` bool .. False If set, the script will display the matching File and Object Ids but will not flux, coadd or archive. -``exclude_serendip`` bool .. False Whether to exclude SERENDIP objects from collating. -``exclude_slit_trace_bm`` list, str .. A list of slit trace bitmask bits that should be excluded. -``flux`` bool .. False If set, the script will flux calibrate using archived sensfuncs before coadding. -``ignore_flux`` bool .. False If set, the script will only coadd non-fluxed spectra even if flux data is present. Otherwise fluxed spectra are coadded if all spec1ds have been fluxed calibrated. -``match_using`` str .. ``ra/dec`` Determines how 1D spectra are matched as being the same object. Must be either 'pixel' or 'ra/dec'. -``outdir`` str .. ``/Users/westfall/Work/packages/pypeit/doc`` The path where all coadded output files and report files will be placed. -``refframe`` str .. .. Perform reference frame correction prior to coadding. Options are: observed, heliocentric, barycentric -``spec1d_outdir`` str .. .. The path where all modified spec1d files are placed. These are only created if flux calibration or refframe correction are asked for. -``tolerance`` str, float, int .. 1.0 The tolerance used when comparing the coordinates of objects. If two objects are within this distance from each other, they are considered the same object. If match_using is 'ra/dec' (the default) this is an angular distance. The defaults units are arcseconds but other units supported by astropy.coordinates.Angle can be used (`e.g.`, '0.003d' or '0h1m30s'). If match_using is 'pixel' this is a float. -``wv_rms_thresh`` float .. .. If set, any objects with a wavelength RMS > this value are skipped, else all wavelength RMS values are accepted. -========================= =============== ======= ============================================ ================================================================================================================================================================================================================================================================================================================================================================================================================== +========================= =============== ======= ========== ================================================================================================================================================================================================================================================================================================================================================================================================================== +Key Type Options Default Description +========================= =============== ======= ========== ================================================================================================================================================================================================================================================================================================================================================================================================================== +``dry_run`` bool .. False If set, the script will display the matching File and Object Ids but will not flux, coadd or archive. +``exclude_serendip`` bool .. False Whether to exclude SERENDIP objects from collating. +``exclude_slit_trace_bm`` list, str .. A list of slit trace bitmask bits that should be excluded. +``flux`` bool .. False If set, the script will flux calibrate using archived sensfuncs before coadding. +``ignore_flux`` bool .. False If set, the script will only coadd non-fluxed spectra even if flux data is present. Otherwise fluxed spectra are coadded if all spec1ds have been fluxed calibrated. +``match_using`` str .. ``ra/dec`` Determines how 1D spectra are matched as being the same object. Must be either 'pixel' or 'ra/dec'. +``outdir`` str .. ``$PWD`` The path where all coadded output files and report files will be placed. +``refframe`` str .. .. Perform reference frame correction prior to coadding. Options are: observed, heliocentric, barycentric +``spec1d_outdir`` str .. .. The path where all modified spec1d files are placed. These are only created if flux calibration or refframe correction are asked for. +``tolerance`` str, float, int .. 1.0 The tolerance used when comparing the coordinates of objects. If two objects are within this distance from each other, they are considered the same object. If match_using is 'ra/dec' (the default) this is an angular distance. The defaults units are arcseconds but other units supported by astropy.coordinates.Angle can be used (`e.g.`, '0.003d' or '0h1m30s'). If match_using is 'pixel' this is a float. +``wv_rms_thresh`` float .. .. If set, any objects with a wavelength RMS > this value are skipped, else all wavelength RMS values are accepted. +========================= =============== ======= ========== ================================================================================================================================================================================================================================================================================================================================================================================================================== ---- @@ -661,22 +661,22 @@ ReduxPar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.ReduxPar` -====================== ============== ======= ============================================ ========================================================================================================================================================================================================================================================================================================================================================================================================== -Key Type Options Default Description -====================== ============== ======= ============================================ ========================================================================================================================================================================================================================================================================================================================================================================================================== -``calwin`` int, float .. 0 The window of time in hours to search for calibration frames for a science frame -``chk_version`` bool .. True If True enforce strict PypeIt version checking to ensure that all files were created with the current version of PypeIt. If set to False, the code will attempt to read out-of-date files and keep going. Beware (!!) that this can lead to unforeseen bugs that either cause the code to crash or lead to erroneous results. I.e., you really need to know what you are doing if you set this to False! -``detnum`` int, list .. .. Restrict reduction to a list of detector indices. In case of mosaic reduction (currently only available for Gemini/GMOS and Keck/DEIMOS) ``detnum`` should be a list of tuples of the detector indices that are mosaiced together. E.g., for Gemini/GMOS ``detnum`` would be ``[(1,2,3)]`` and for Keck/DEIMOS it would be ``[(1, 5), (2, 6), (3, 7), (4, 8)]`` -``ignore_bad_headers`` bool .. False Ignore bad headers (NOT recommended unless you know it is safe). -``maskIDs`` str, int, list .. .. Restrict reduction to a set of slitmask IDs Example syntax -- ``maskIDs = 818006,818015`` This must be used with detnum (for now). -``qadir`` str .. ``QA`` Directory relative to calling directory to write quality assessment files. -``quicklook`` bool .. False Run a quick look reduction? This is usually good if you want to quickly reduce the data (usually at the telescope in real time) to get an initial estimate of the data quality. -``redux_path`` str .. ``/Users/westfall/Work/packages/pypeit/doc`` Path to folder for performing reductions. Default is the current working directory. -``scidir`` str .. ``Science`` Directory relative to calling directory to write science files. -``slitspatnum`` str, list .. .. Restrict reduction to a set of slit DET:SPAT values (closest slit is used). Example syntax -- slitspatnum = DET01:175,DET01:205 or MSC02:2234 If you are re-running the code, (i.e. modifying one slit) you *must* have the precise SPAT_ID index. -``sortroot`` str .. .. A filename given to output the details of the sorted files. If None, the default is the root name of the pypeit file. If off, no output is produced. -``spectrograph`` str .. .. Spectrograph that provided the data to be reduced. See :ref:`instruments` for valid options. -====================== ============== ======= ============================================ ========================================================================================================================================================================================================================================================================================================================================================================================================== +====================== ============== ======= =========== ========================================================================================================================================================================================================================================================================================================================================================================================================== +Key Type Options Default Description +====================== ============== ======= =========== ========================================================================================================================================================================================================================================================================================================================================================================================================== +``calwin`` int, float .. 0 The window of time in hours to search for calibration frames for a science frame +``chk_version`` bool .. True If True enforce strict PypeIt version checking to ensure that all files were created with the current version of PypeIt. If set to False, the code will attempt to read out-of-date files and keep going. Beware (!!) that this can lead to unforeseen bugs that either cause the code to crash or lead to erroneous results. I.e., you really need to know what you are doing if you set this to False! +``detnum`` int, list .. .. Restrict reduction to a list of detector indices. In case of mosaic reduction (currently only available for Gemini/GMOS and Keck/DEIMOS) ``detnum`` should be a list of tuples of the detector indices that are mosaiced together. E.g., for Gemini/GMOS ``detnum`` would be ``[(1,2,3)]`` and for Keck/DEIMOS it would be ``[(1, 5), (2, 6), (3, 7), (4, 8)]`` +``ignore_bad_headers`` bool .. False Ignore bad headers (NOT recommended unless you know it is safe). +``maskIDs`` str, int, list .. .. Restrict reduction to a set of slitmask IDs Example syntax -- ``maskIDs = 818006,818015`` This must be used with detnum (for now). +``qadir`` str .. ``QA`` Directory relative to calling directory to write quality assessment files. +``quicklook`` bool .. False Run a quick look reduction? This is usually good if you want to quickly reduce the data (usually at the telescope in real time) to get an initial estimate of the data quality. +``redux_path`` str .. ``$PWD`` Path to folder for performing reductions. Default is the current working directory. +``scidir`` str .. ``Science`` Directory relative to calling directory to write science files. +``slitspatnum`` str, list .. .. Restrict reduction to a set of slit DET:SPAT values (closest slit is used). Example syntax -- slitspatnum = DET01:175,DET01:205 or MSC02:2234 If you are re-running the code, (i.e. modifying one slit) you *must* have the precise SPAT_ID index. +``sortroot`` str .. .. A filename given to output the details of the sorted files. If None, the default is the root name of the pypeit file. If off, no output is produced. +``spectrograph`` str .. .. Spectrograph that provided the data to be reduced. See :ref:`instruments` for valid options. +====================== ============== ======= =========== ========================================================================================================================================================================================================================================================================================================================================================================================================== ---- diff --git a/pypeit/coadd2d.py b/pypeit/coadd2d.py index 3c26149113..1a59fdd048 100644 --- a/pypeit/coadd2d.py +++ b/pypeit/coadd2d.py @@ -1611,14 +1611,14 @@ def snr_report(self, slitid, spat_pixpos, snr_bar): """ # Print out a report on the SNR - msg_string = '-------------------------------------' - msg_string += ' Summary for highest S/N object' - msg_string += ' found on slitid = {:d} '.format(slitid) - msg_string += '-------------------------------------' - msg_string += ' exp# spat_pixpos S/N' - msg_string += '-------------------------------------' + msg_string = '\n-------------------------------------\n' + msg_string += ' Summary for highest S/N object\n' + msg_string += f' found on slitid = {slitid} \n' + msg_string += '-------------------------------------\n' + msg_string += ' exp# spat_pixpos S/N\n' + msg_string += '-------------------------------------\n' for iexp, (spat,snr) in enumerate(zip(spat_pixpos, snr_bar)): - msg_string += ' {:2d} {:7.1f} {:5.2f}'.format(iexp, spat, snr) + msg_string += f' {iexp:2d} {spat:7.1f} {snr:5.2f}\n' msg_string += '-------------------------------------' log.info(msg_string) diff --git a/pypeit/core/flat.py b/pypeit/core/flat.py index cbfa72c6d2..7016be282c 100644 --- a/pypeit/core/flat.py +++ b/pypeit/core/flat.py @@ -16,6 +16,7 @@ from IPython import embed from pypeit import log +from pypeit import PypeItError from pypeit.core import coadd from pypeit import utils diff --git a/pypeit/core/wave.py b/pypeit/core/wave.py index ad74657779..d4ca34f7ee 100644 --- a/pypeit/core/wave.py +++ b/pypeit/core/wave.py @@ -16,9 +16,6 @@ from astropy.utils.iers import conf conf.auto_max_age = None - - -from pypeit import log from pypeit import PypeItError from IPython import embed diff --git a/pypeit/exceptions.py b/pypeit/exceptions.py index 05af3c3515..5625aebfc5 100644 --- a/pypeit/exceptions.py +++ b/pypeit/exceptions.py @@ -19,4 +19,4 @@ class PypeItDataModelError(PypeItError): pass class PypeItPathError(PypeItError): - pass \ No newline at end of file + pass diff --git a/pypeit/images/detector_container.py b/pypeit/images/detector_container.py index 5e2a9fc015..41ec7f99f4 100644 --- a/pypeit/images/detector_container.py +++ b/pypeit/images/detector_container.py @@ -11,7 +11,6 @@ import numpy as np from pypeit import datamodel -from pypeit import log from pypeit.core import procimg diff --git a/pypeit/par/parset.py b/pypeit/par/parset.py index 85568b4d2c..0edf3cb363 100644 --- a/pypeit/par/parset.py +++ b/pypeit/par/parset.py @@ -345,7 +345,7 @@ def _data_table_string(data_table, delimeter='print'): return '\n'.join(row_string)+'\n' @staticmethod - def _data_string(data, use_repr=False, verbatim=False): + def _data_string(data, use_repr=False, verbatim=False, check_dir=False): """ Convert a single datum into a string @@ -363,14 +363,18 @@ def _data_string(data, use_repr=False, verbatim=False): Use quotes around the provided string to indicate that the string should be representated in a verbatim (fixed width) font. + check_dir (:obj:`bool`, optional): + If ``data`` is a string, check if it matches the current working + directory and replace it with a generic string if it does. Returns: str: A string representation of the provided ``data``. """ if isinstance(data, str): + _data = '$PWD' if check_dir and data == os.getcwd() else data if verbatim: - return '..' if len(data) == 0 else '``' + data + '``' - return data + return '..' if len(_data) == 0 else '``' + _data + '``' + return _data if isinstance(data, list): # When the list is empty, return an empty string, which config_lines will append a "," to. # This allows ConfigObj to interpret it as an empty list, instead of string, when re-reading the @@ -727,7 +731,7 @@ def to_rst_table(self, parsets_listed=[]): data_table[i+1,1] = ', '.join([t.__name__ for t in self.dtype[k]]) data_table[i+1,3] = '..' if self.default[k] is None \ else ParSet._data_string(self.default[k], use_repr=False, - verbatim=True) + verbatim=True, check_dir=True) data_table[i+1,2] = '..' if self.options[k] is None \ else ParSet._data_string(self.options[k], use_repr=False, diff --git a/pypeit/par/util.py b/pypeit/par/util.py index 0f7067e0e9..5fe8118c16 100644 --- a/pypeit/par/util.py +++ b/pypeit/par/util.py @@ -10,9 +10,7 @@ from configobj import ConfigObj from IPython import embed -import numpy as np -from pypeit import log, __version__ from pypeit import PypeItError diff --git a/pypeit/scripts/scriptbase.py b/pypeit/scripts/scriptbase.py index b3aa6ba2c9..cb940734ff 100644 --- a/pypeit/scripts/scriptbase.py +++ b/pypeit/scripts/scriptbase.py @@ -117,14 +117,6 @@ def entry_point(cls): """ cls.main(cls.parse_args()) - # TODO: This could also be combined with the property decorator; see - # https://docs.python.org/3.9/library/functions.html#classmethod - # Order matters. - # - # @classmethod - # @property - # - # Leave as is for now. @classmethod def name(cls): """ From fac0cb0b968b16857522a856e8a91a42355673ff Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Tue, 9 Dec 2025 13:11:48 -0800 Subject: [PATCH 27/33] doc update --- doc/api/pypeit.scripts.rectify_2dspec.rst | 2 +- doc/help/pypeit_rectify_2dspec.rst | 33 ++++++++--- doc/include/gemini_gnirs_echelle_A.pypeit.rst | 58 +++++++++---------- 3 files changed, 54 insertions(+), 39 deletions(-) diff --git a/doc/api/pypeit.scripts.rectify_2dspec.rst b/doc/api/pypeit.scripts.rectify_2dspec.rst index f7d95d6bf9..b6e0ed4de3 100644 --- a/doc/api/pypeit.scripts.rectify_2dspec.rst +++ b/doc/api/pypeit.scripts.rectify_2dspec.rst @@ -4,5 +4,5 @@ pypeit.scripts.rectify\_2dspec module .. automodule:: pypeit.scripts.rectify_2dspec :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/help/pypeit_rectify_2dspec.rst b/doc/help/pypeit_rectify_2dspec.rst index 219eb20757..04a90a7603 100644 --- a/doc/help/pypeit_rectify_2dspec.rst +++ b/doc/help/pypeit_rectify_2dspec.rst @@ -1,20 +1,35 @@ .. code-block:: console $ pypeit_rectify_2dspec -h - usage: pypeit_rectify_2dspec [-h] [--no_rot] [--embed] [--try_old] [files ...] + usage: pypeit_rectify_2dspec [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--no_rot] [--embed] + [--try_old] + [files ...] Create an FITS file with rectified 2D sky-subtracted spectra for all slits/orders. positional arguments: - files PypeIt spec2d file(s) (default: None) + files PypeIt spec2d file(s) (default: None) options: - -h, --help show this help message and exit - --no_rot Do not rotate the rectified image to have wavelength on the - x-axis. (default: False) - --embed Embed in IPython shell in each detector loop, i.e., before saving - to disk. (default: False) - --try_old Attempt to load old datamodel versions. A crash may ensue.. - (default: False) + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) + --no_rot Do not rotate the rectified image to have wavelength on + the x-axis. (default: False) + --embed Embed in IPython shell in each detector loop, i.e., + before saving to disk. (default: False) + --try_old Attempt to load old datamodel versions. A crash may + ensue.. (default: False) \ No newline at end of file diff --git a/doc/include/gemini_gnirs_echelle_A.pypeit.rst b/doc/include/gemini_gnirs_echelle_A.pypeit.rst index d5582cd623..7060c8f25c 100644 --- a/doc/include/gemini_gnirs_echelle_A.pypeit.rst +++ b/doc/include/gemini_gnirs_echelle_A.pypeit.rst @@ -18,35 +18,35 @@ # Data block data read path /path/to/PypeIt-development-suite/RAW_DATA/gemini_gnirs_echelle/32_SB_SXD - filename | frametype | ra | dec | target | dispname | decker | binning | mjd | airmass | exptime | dispangle | dithoff | calib | comb_id | bkg_id - cN20170331S0216.fits | arc,science,tilt | 205.53380833 | 9.47733611 | pisco | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.3709743134 | 1.077 | 300.0 | 6.1887 | -0.34225501721318 | 0 | 5 | -1 - cN20170331S0217.fits | arc,science,tilt | 205.53380833 | 9.47733611 | pisco | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.3746886267 | 1.068 | 300.0 | 6.1887 | 2.65774498278682 | 0 | 6 | -1 - cN20170331S0218.fits | arc,science,tilt | 205.53380833 | 9.47733611 | pisco | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.3784029399 | 1.06 | 300.0 | 6.1887 | 2.65774498278682 | 0 | 7 | -1 - cN20170331S0219.fits | arc,science,tilt | 205.53380833 | 9.47733611 | pisco | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.3821513967 | 1.053 | 300.0 | 6.1887 | -0.34225501721318 | 0 | 8 | -1 - cN20170331S0220.fits | arc,science,tilt | 205.53380833 | 9.47733611 | pisco | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.3858649384 | 1.047 | 300.0 | 6.1887 | -0.34225501721318 | 0 | 9 | -1 - cN20170331S0221.fits | arc,science,tilt | 205.53380833 | 9.47733611 | pisco | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.389578673 | 1.041 | 300.0 | 6.1887 | 2.65774498278682 | 0 | 10 | -1 - cN20170331S0222.fits | arc,science,tilt | 205.53380833 | 9.47733611 | pisco | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.393291443 | 1.036 | 300.0 | 6.1887 | 2.65774498278682 | 0 | 11 | -1 - cN20170331S0223.fits | arc,science,tilt | 205.53380833 | 9.47733611 | pisco | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.3970400927 | 1.032 | 300.0 | 6.1887 | -0.34225501721318 | 0 | 12 | -1 - cN20170331S0206.fits | arc,standard,tilt | 192.84719583 | 12.37277778 | HIP62745 | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.356848156 | 1.029 | 10.0 | 6.1887 | 0.771149555867309 | 0 | 1 | -1 - cN20170331S0207.fits | arc,standard,tilt | 192.84719583 | 12.37277778 | HIP62745 | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.357060926 | 1.028 | 10.0 | 6.1887 | -2.22885044413268 | 0 | 2 | -1 - cN20170331S0208.fits | arc,standard,tilt | 192.84719583 | 12.37277778 | HIP62745 | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.3572769754 | 1.028 | 10.0 | 6.1887 | -2.22885044413268 | 0 | 3 | -1 - cN20170331S0209.fits | arc,standard,tilt | 192.84719583 | 12.37277778 | HIP62745 | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.3575292903 | 1.028 | 10.0 | 6.1887 | 0.771149555867309 | 0 | 4 | -1 - cN20170331S0246.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4632337656 | 1.052 | 5.0 | 6.1887 | 0.0 | 0 | -1 | -1 - cN20170331S0247.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4633936807 | 1.052 | 5.0 | 6.1887 | 0.0 | 0 | -1 | -1 - cN20170331S0248.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4635534029 | 1.052 | 5.0 | 6.1887 | 0.0 | 0 | -1 | -1 - cN20170331S0249.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4637127393 | 1.053 | 5.0 | 6.1887 | 0.0 | 0 | -1 | -1 - cN20170331S0250.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4638740048 | 1.053 | 5.0 | 6.1887 | 0.0 | 0 | -1 | -1 - cN20170331S0251.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.464033727 | 1.053 | 5.0 | 6.1887 | 0.0 | 0 | -1 | -1 - cN20170331S0252.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4641730017 | 1.053 | 0.84 | 6.1887 | 0.0 | 0 | -1 | -1 - cN20170331S0253.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4642846915 | 1.054 | 0.84 | 6.1887 | 0.0 | 0 | -1 | -1 - cN20170331S0254.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4643977316 | 1.054 | 0.84 | 6.1887 | 0.0 | 0 | -1 | -1 - cN20170331S0255.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.464510193 | 1.054 | 0.84 | 6.1887 | 0.0 | 0 | -1 | -1 - cN20170331S0256.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4646238119 | 1.054 | 0.84 | 6.1887 | 0.0 | 0 | -1 | -1 - cN20170331S0257.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4647383952 | 1.054 | 0.84 | 6.1887 | 0.0 | 0 | -1 | -1 - cN20170331S0258.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4648516282 | 1.055 | 0.84 | 6.1887 | 0.0 | 0 | -1 | -1 - cN20170331S0259.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4649642825 | 1.055 | 0.84 | 6.1887 | 0.0 | 0 | -1 | -1 - cN20170331S0260.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4650775156 | 1.055 | 0.84 | 6.1887 | 0.0 | 0 | -1 | -1 - cN20170331S0261.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4651915202 | 1.055 | 0.84 | 6.1887 | 0.0 | 0 | -1 | -1 + filename | frametype | ra | dec | target | dispname | decker | binning | mjd | airmass | exptime | dispangle | camera_pos | dithoff | calib | comb_id | bkg_id + cN20170331S0216.fits | arc,science,tilt | 205.53380833 | 9.47733611 | pisco | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.3709743134 | 1.077 | 300.0 | 6.1887 | ShortBlue_G5540 | -0.34225501721318 | 0 | 5 | -1 + cN20170331S0217.fits | arc,science,tilt | 205.53380833 | 9.47733611 | pisco | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.3746886267 | 1.068 | 300.0 | 6.1887 | ShortBlue_G5540 | 2.65774498278682 | 0 | 6 | -1 + cN20170331S0218.fits | arc,science,tilt | 205.53380833 | 9.47733611 | pisco | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.3784029399 | 1.06 | 300.0 | 6.1887 | ShortBlue_G5540 | 2.65774498278682 | 0 | 7 | -1 + cN20170331S0219.fits | arc,science,tilt | 205.53380833 | 9.47733611 | pisco | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.3821513967 | 1.053 | 300.0 | 6.1887 | ShortBlue_G5540 | -0.34225501721318 | 0 | 8 | -1 + cN20170331S0220.fits | arc,science,tilt | 205.53380833 | 9.47733611 | pisco | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.3858649384 | 1.047 | 300.0 | 6.1887 | ShortBlue_G5540 | -0.34225501721318 | 0 | 9 | -1 + cN20170331S0221.fits | arc,science,tilt | 205.53380833 | 9.47733611 | pisco | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.389578673 | 1.041 | 300.0 | 6.1887 | ShortBlue_G5540 | 2.65774498278682 | 0 | 10 | -1 + cN20170331S0222.fits | arc,science,tilt | 205.53380833 | 9.47733611 | pisco | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.393291443 | 1.036 | 300.0 | 6.1887 | ShortBlue_G5540 | 2.65774498278682 | 0 | 11 | -1 + cN20170331S0223.fits | arc,science,tilt | 205.53380833 | 9.47733611 | pisco | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.3970400927 | 1.032 | 300.0 | 6.1887 | ShortBlue_G5540 | -0.34225501721318 | 0 | 12 | -1 + cN20170331S0206.fits | arc,standard,tilt | 192.84719583 | 12.37277778 | HIP62745 | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.356848156 | 1.029 | 10.0 | 6.1887 | ShortBlue_G5540 | 0.771149555867309 | 0 | 1 | -1 + cN20170331S0207.fits | arc,standard,tilt | 192.84719583 | 12.37277778 | HIP62745 | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.357060926 | 1.028 | 10.0 | 6.1887 | ShortBlue_G5540 | -2.22885044413268 | 0 | 2 | -1 + cN20170331S0208.fits | arc,standard,tilt | 192.84719583 | 12.37277778 | HIP62745 | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.3572769754 | 1.028 | 10.0 | 6.1887 | ShortBlue_G5540 | -2.22885044413268 | 0 | 3 | -1 + cN20170331S0209.fits | arc,standard,tilt | 192.84719583 | 12.37277778 | HIP62745 | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.3575292903 | 1.028 | 10.0 | 6.1887 | ShortBlue_G5540 | 0.771149555867309 | 0 | 4 | -1 + cN20170331S0246.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4632337656 | 1.052 | 5.0 | 6.1887 | ShortBlue_G5540 | 0.0 | 0 | -1 | -1 + cN20170331S0247.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4633936807 | 1.052 | 5.0 | 6.1887 | ShortBlue_G5540 | 0.0 | 0 | -1 | -1 + cN20170331S0248.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4635534029 | 1.052 | 5.0 | 6.1887 | ShortBlue_G5540 | 0.0 | 0 | -1 | -1 + cN20170331S0249.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4637127393 | 1.053 | 5.0 | 6.1887 | ShortBlue_G5540 | 0.0 | 0 | -1 | -1 + cN20170331S0250.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4638740048 | 1.053 | 5.0 | 6.1887 | ShortBlue_G5540 | 0.0 | 0 | -1 | -1 + cN20170331S0251.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.464033727 | 1.053 | 5.0 | 6.1887 | ShortBlue_G5540 | 0.0 | 0 | -1 | -1 + cN20170331S0252.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4641730017 | 1.053 | 0.84 | 6.1887 | ShortBlue_G5540 | 0.0 | 0 | -1 | -1 + cN20170331S0253.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4642846915 | 1.054 | 0.84 | 6.1887 | ShortBlue_G5540 | 0.0 | 0 | -1 | -1 + cN20170331S0254.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4643977316 | 1.054 | 0.84 | 6.1887 | ShortBlue_G5540 | 0.0 | 0 | -1 | -1 + cN20170331S0255.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.464510193 | 1.054 | 0.84 | 6.1887 | ShortBlue_G5540 | 0.0 | 0 | -1 | -1 + cN20170331S0256.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4646238119 | 1.054 | 0.84 | 6.1887 | ShortBlue_G5540 | 0.0 | 0 | -1 | -1 + cN20170331S0257.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4647383952 | 1.054 | 0.84 | 6.1887 | ShortBlue_G5540 | 0.0 | 0 | -1 | -1 + cN20170331S0258.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4648516282 | 1.055 | 0.84 | 6.1887 | ShortBlue_G5540 | 0.0 | 0 | -1 | -1 + cN20170331S0259.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4649642825 | 1.055 | 0.84 | 6.1887 | ShortBlue_G5540 | 0.0 | 0 | -1 | -1 + cN20170331S0260.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4650775156 | 1.055 | 0.84 | 6.1887 | ShortBlue_G5540 | 0.0 | 0 | -1 | -1 + cN20170331S0261.fits | pixelflat,trace | 205.53380833 | 9.47733611 | GCALflat | 32/mmSB_G5533 | 0.68arcsec_G5530 | 1,1 | 57843.4651915202 | 1.055 | 0.84 | 6.1887 | ShortBlue_G5540 | 0.0 | 0 | -1 | -1 data end From 223866f251e0fd4d1b4257afd178b1a7393c312f Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Thu, 18 Dec 2025 12:51:00 -0800 Subject: [PATCH 28/33] rm more msgs --- pypeit/sensfunc.py | 31 ++++++++++++++++++++-------- pypeit/spectrographs/gemini_gmos.py | 13 +++++++----- pypeit/spectrographs/spectrograph.py | 7 ++++--- 3 files changed, 34 insertions(+), 17 deletions(-) diff --git a/pypeit/sensfunc.py b/pypeit/sensfunc.py index b8fe112dfe..a49f3d6b31 100644 --- a/pypeit/sensfunc.py +++ b/pypeit/sensfunc.py @@ -324,17 +324,28 @@ def unpack_std(self): elif hdul[1].header.get('DMODCLS') == 'OneSpec': spec = OneSpec.from_file(spec1d, chk_version=self.chk_version) if spec.head0['PYPELINE'] == 'Echelle': - msgs.error('Standard star 1D spectrum from OneSpec class cannot be used for Echelle data.') + raise PypeItError( + 'Standard star 1D spectrum from OneSpec class cannot be used for ' + 'Echelle data.' + ) if spec.fluxed: - msgs.error('Standard star 1D spectrum from OneSpec class is already fluxed ' - 'and cannot be used to generate the sensitivity function.') + raise PypeItError( + 'Standard star 1D spectrum from OneSpec class is already fluxed and ' + 'cannot be used to generate the sensitivity function.' + ) if self.par['use_flat']: - msgs.error('"use_flat" set to True, but standard star 1D spectrum from OneSpec class ' - 'does not contain the flat spectrum. The blaze function cannot be estimated.') + raise PypeItError( + '"use_flat" set to True, but standard star 1D spectrum from OneSpec ' + 'class does not contain the flat spectrum. The blaze function cannot ' + 'be estimated.' + ) if spec.ext_mode != self.par['extr']: - msgs.warn(f'Standard star 1D spectrum from OneSpec class was obtained using the' - f' {spec.ext_mode} extraction, while the requested extraction is {self.par["extr"]}. ' - f'The available {spec.ext_mode} extraction will be used instead.') + log.warning( + 'Standard star 1D spectrum from OneSpec class was obtained using the' + f' {spec.ext_mode} extraction, while the requested extraction is ' + f'{self.par["extr"]}. The available {spec.ext_mode} extraction will ' + 'be used instead.' + ) self.extr = spec.ext_mode # create sobjs_std @@ -344,7 +355,9 @@ def unpack_std(self): _sobj[f'{self.extr}_MASK'] |= spec.mask.astype(bool) _std_obj = specobjs.SpecObjs(specobjs=np.array([_sobj]), header=spec.head0) else: - msgs.error('Unrecognized class for the 1D spectrum file. Cannot read in the standard') + raise PypeItError( + 'Unrecognized class for the 1D spectrum file. Cannot read in the standard.' + ) # fill sobjs_std if sobjs_std is None: sobjs_std = _std_obj.copy() diff --git a/pypeit/spectrographs/gemini_gmos.py b/pypeit/spectrographs/gemini_gmos.py index f1307f9ee7..8809039c17 100644 --- a/pypeit/spectrographs/gemini_gmos.py +++ b/pypeit/spectrographs/gemini_gmos.py @@ -10,7 +10,6 @@ from astropy.coordinates import SkyCoord from astropy import units from astropy.time import Time -from astropy.wcs import wcs from astropy.io import fits from pypeit import log @@ -736,7 +735,7 @@ def get_maskdef_slitedges(self, ccdnum=None, filename=None, debug=None, # check if the mask design file exists if not Path(_maskfile).exists(): - msgs.error(f'The mask design file {_maskfile} does not exist.') + raise PypeItError(f'The mask design file {_maskfile} does not exist.') # read the mask design file mask_tbl = Table.read(_maskfile, format='fits') @@ -796,13 +795,17 @@ def maskdef_spec_minmax(self, maskfile=None, maskdef_ids=None, nspec=None, binni if maskfile is None or maskdef_ids is None or nspec is None: # If any of these are not provided, we cannot get the maskdef spec minmax # and we will use the whole spectral length instead. - msgs.warn('maskfile, maskdef_id, and nspec must be provided to get the maskdef spec minmax. ' - 'The whole spectral length will be used instead.') + log.warning( + 'maskfile, maskdef_id, and nspec must be provided to get the maskdef spec minmax. ' + 'The whole spectral length will be used instead.' + ) return None, None # check if the binning is provided, even if optional, it's needed for this spectrograph if binning is None: - msgs.error('Binning must be provided to get the slit edges from the mask definition file.') + raise PypeItError( + 'Binning must be provided to get the slit edges from the mask definition file.' + ) # Parse the binning bin_spec, _ = parse.parse_binning(binning) diff --git a/pypeit/spectrographs/spectrograph.py b/pypeit/spectrographs/spectrograph.py index a4c5433342..65f935427f 100644 --- a/pypeit/spectrographs/spectrograph.py +++ b/pypeit/spectrographs/spectrograph.py @@ -837,9 +837,10 @@ def maskdef_spec_minmax(maskfile=None, maskdef_ids=None, nspec=None, shift=150): :obj:`tuple`: A tuple of two `numpy.ndarray`_ with the spec min and max values. If the maskfile, maskdef_ids, or nspec are not provided, None is returned for both min and max. """ - - msgs.error('This spectrograph does not support the use of mask design to get the ' - 'maskdef spec minmax. Set `maskdef_spec_minmax=False`') + raise PypeItError( + 'This spectrograph does not support the use of mask design to get the maskdef spec ' + 'minmax. Set `maskdef_spec_minmax=False`' + ) def configuration_keys(self): """ From c7a01c5989b8457f508b279b416776af19234357 Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Mon, 5 Jan 2026 13:54:18 -0800 Subject: [PATCH 29/33] replace msgs --- pypeit/exposure.py | 33 +++++++-------- pypeit/metadata.py | 2 +- pypeit/outputfiles.py | 12 ++++-- pypeit/pypeit.py | 16 +++++--- pypeit/pypeit_steps.py | 70 ++++++++++++++++++++------------ pypeit/scripts/reduce_by_step.py | 50 +++++++++++++---------- 6 files changed, 107 insertions(+), 76 deletions(-) diff --git a/pypeit/exposure.py b/pypeit/exposure.py index 7143460d84..89191791f5 100644 --- a/pypeit/exposure.py +++ b/pypeit/exposure.py @@ -2,8 +2,8 @@ from astropy.io import fits -from pypeit import msgs - +from pypeit import log +from pypeit import PypeItError from pypeit import outputfiles from pypeit.core import parse from pypeit.display import display @@ -123,7 +123,7 @@ def process_exposure(spectrograph, fitstbl, par, frames:list, # Loop on the detectors for det in detectors: - msgs.info(f'Reducing detector {det}') + log.info(f'Reducing detector {det}') # Process sciImg, bkg_redux_sciimg = pypeit_steps.process_one_det( @@ -397,36 +397,37 @@ def reduce_exposure(spectrograph, fitstbl, par, frames, calib_ID, fitstbl, par, bg_frames) # Print status message - msgs_string = 'Reducing target {:s}'.format(fitstbl['target'][frames[0]]) + msgs.newline() + lstr = f'Reducing target {fitstbl["target"][frames[0]]}\n' # TODO: Print these when the frames are actually combined, # backgrounds are used, etc? - msgs_string += 'Combining frames:' + msgs.newline() + lstr += 'Combining frames:\n' for iframe in frames: - msgs_string += '{0:s}'.format(fitstbl['filename'][iframe]) + msgs.newline() - msgs.info(msgs_string) + lstr += f'{fitstbl["filename"][iframe]}\n' + log.info(lstr) if has_bg: - bg_msgs_string = '' + bg_lstr = '' for iframe in bg_frames: - bg_msgs_string += '{0:s}'.format(fitstbl['filename'][iframe]) + msgs.newline() - bg_msgs_string = msgs.newline() + 'Using background from frames:' + msgs.newline() + bg_msgs_string - msgs.info(bg_msgs_string) + bg_lstr += f'{fitstbl["filename"][iframe]}\n' + bg_lstr = '\nUsing background from frames:\n' + bg_lstr + log.info(bg_lstr) # Find the detectors to reduce detectors = spectrograph.select_detectors(subset=par['rdx']['detnum'] if par['rdx']['slitspatnum'] is None else par['rdx']['slitspatnum']) - msgs.info(f'Detectors to work on: {detectors}') + log.info(f'Detectors to work on: {detectors}') # ##################################### # Calibrations for det in detectors: - msgs.info(f'Calibrating detector {det}') + log.info(f'Calibrating detector {det}') # run/load calibration caliBrate = pypeit_steps.calib_one(spectrograph, fitstbl, par, det, calib_ID, calibrations_path, show=show, run_state=run_state, reuse_calibs=reuse_calibs) if not caliBrate.success: - msgs.warn(f'Calibrations for detector {det} were unsuccessful! The step ' - f'that failed was {caliBrate.failed_step}. Continuing by ' - f'skipping this detector.') + log.warning( + f'Calibrations for detector {det} were unsuccessful! The step that failed was ' + f'{caliBrate.failed_step}. Continuing by skipping this detector.' + ) # Remove from list of detectors detectors.remove(det) continue diff --git a/pypeit/metadata.py b/pypeit/metadata.py index 0ebf3233e3..cf453bda09 100644 --- a/pypeit/metadata.py +++ b/pypeit/metadata.py @@ -582,7 +582,7 @@ def get_frames_from_combid(self, comb_id): Raised if the 'comb_id' column has not been defined. """ if 'comb_id' not in self.keys(): - msgs.error('Cannot get frames from comb_id; run set_combination_groups.') + raise PypeItError('Cannot get frames from comb_id; run set_combination_groups.') # Frames frames = np.where(self['comb_id'] == comb_id)[0] diff --git a/pypeit/outputfiles.py b/pypeit/outputfiles.py index 3d53963e7a..89e1646b9c 100644 --- a/pypeit/outputfiles.py +++ b/pypeit/outputfiles.py @@ -7,7 +7,8 @@ import numpy as np from pathlib import Path -from pypeit import msgs +from pypeit import log +from pypeit import PypeItError def get_std_outfile(fitstbl, par, standard_frames:list): """ @@ -35,9 +36,12 @@ def get_std_outfile(fitstbl, par, standard_frames:list): std_outfile = par['reduce']['findobj']['std_spec1d'] if std_outfile is not None: if not par['reduce']['findobj']['use_std_trace']: - msgs.error('If you provide a standard star spectrum for tracing, you must set use_std_trace=True') + raise PypeItError( + 'If you provide a standard star spectrum for tracing, you must set ' + 'use_std_trace=True' + ) elif not Path(std_outfile).absolute().exists(): - msgs.error(f'Provided standard spec1d file does not exist: {std_outfile}') + raise PypeItError(f'Provided standard spec1d file does not exist: {std_outfile}') return std_outfile # TODO: Need to decide how to associate standards with @@ -52,7 +56,7 @@ def get_std_outfile(fitstbl, par, standard_frames:list): std_outfile = spec_output_file(fitstbl, par, std_frame) \ if isinstance(std_frame, (int,np.integer)) else None if std_outfile is not None and not std_outfile.is_file(): - msgs.error(f'Could not find standard file: {std_outfile}') + raise PypeItError(f'Could not find standard file: {std_outfile}') return std_outfile def intermediate_filename(itype:str, basename:str, det_name:str, diff --git a/pypeit/pypeit.py b/pypeit/pypeit.py index 0f91be792b..5ac30f4112 100644 --- a/pypeit/pypeit.py +++ b/pypeit/pypeit.py @@ -308,7 +308,7 @@ def reduce_calibID(spectrograph, par, fitstbl, calib_ID:str, # Find the indices of the science frames in this calibration group: grp_this = frame_indx[is_this & in_grp] - msgs.info(f'Found {len(grp_this)} {rtype} frames in calibration group {calib_ID}.') + log.info(f'Found {len(grp_this)} {rtype} frames in calibration group {calib_ID}.') # Associate standards (previously reduced above) for this setup if not reduce_standard: @@ -328,7 +328,7 @@ def reduce_calibID(spectrograph, par, fitstbl, calib_ID:str, # for now... # # Quicklook mode? # if self.par['rdx']['quicklook'] and j > 0: -# msgs.warn('PypeIt executed in quicklook mode. Only reducing science frames ' +# log.warning('PypeIt executed in quicklook mode. Only reducing science frames ' # 'in the first combination group!') # break # @@ -373,8 +373,12 @@ def reduce_calibID(spectrograph, par, fitstbl, calib_ID:str, history=history, skip_write_2d=par['scienceframe']['process']['skip_write_2d']) else: - msgs.warn('No spec2d and spec1d saved to file because the ' - 'calibration/reduction was not successful for all the detectors') + log.warning( + 'No spec2d and spec1d saved to file because the calibration/reduction was ' + 'not successful for all the detectors' + ) else: - msgs.warn(f'Output file: {fitstbl.construct_basename(frames[0])} already ' - 'exists. Set overwrite=True to recreate and overwrite.') + log.warning( + f'Output file: {fitstbl.construct_basename(frames[0])} already exists. Set ' + 'overwrite=True to recreate and overwrite.' + ) diff --git a/pypeit/pypeit_steps.py b/pypeit/pypeit_steps.py index f2cb1e07a8..43e188b5fd 100644 --- a/pypeit/pypeit_steps.py +++ b/pypeit/pypeit_steps.py @@ -10,7 +10,8 @@ from astropy.table import Table -from pypeit import msgs +from pypeit import log +from pypeit import PypeItError from pypeit.calibframe import CalibFrame from pypeit.images import buildimage from pypeit import specobjs @@ -65,8 +66,10 @@ def get_sci_metadata(spectrograph, fitstbl, frame:int, det): elif 'standard' in types: objtype_out = 'standard' else: - msgs.error('get_sci_metadata() should only be run on standard or science frames. ' - f'Types of this frame are: {types}') + raise PypeItError( + 'get_sci_metadata() should only be run on standard or science frames. Types of this ' + f'frame are: {types}' + ) calib_key = CalibFrame.construct_calib_key(fitstbl['setup'][frame], fitstbl['calib'][frame], spectrograph.get_det_name(det)) @@ -167,7 +170,7 @@ def calib_one(spectrograph, fitstbl, par, det, calib_ID, calibrations_path:str, # Instantiate Calibrations class user_slits = slittrace.merge_user_slit(par['rdx']['slitspatnum'], par['rdx']['maskIDs']) - msgs.info(f'Building/loading calibrations for detector {det}') + log.info(f'Building/loading calibrations for detector {det}') caliBrate = calibrations.Calibrations.get_instance( fitstbl, par['calibrations'], spectrograph, calibrations_path, calib_ID, grp_frames[0], det, @@ -178,16 +181,20 @@ def calib_one(spectrograph, fitstbl, par, det, calib_ID, calibrations_path:str, # Check if stop_at_step is not None and stop_at_step not in caliBrate.steps: - msgs.error(f"Requested stop_at_step={stop_at_step} is not a valid calibration step.\n Allowed steps are: {caliBrate.steps}") + raise PypeItError( + f"Requested stop_at_step={stop_at_step} is not a valid calibration step.\n Allowed " + f"steps are: {caliBrate.steps}" + ) # Run caliBrate.run_the_steps(stop_at_step=stop_at_step) # Success? if not caliBrate.success: - msgs.warn(f'Calibrations for detector {det} were unsuccessful! The step ' - f'that failed was {caliBrate.failed_step}. Continuing to next ' - f'detector.') + log.warning( + f'Calibrations for detector {det} were unsuccessful! The step that failed was ' + f'{caliBrate.failed_step}. Continuing to next detector.' + ) return caliBrate @@ -250,7 +257,7 @@ def process_one_det(spectrograph, fitstbl, par, frames:list, caliBrate = load_calibrations_for_frame( spectrograph, fitstbl, par, frames[0], det, calib_ID, calibrations_path) - msgs.info("Image processing begins for {} on det={}".format(basename, det)) + log.info(f"Image processing begins for {basename} on det={det}") # Is this a standard star? std_redux = objtype == 'standard' @@ -301,12 +308,12 @@ def process_one_det(spectrograph, fitstbl, par, frames:list, if not sci_outfile.parent.is_dir(): sci_outfile.parent.mkdir() sciImg.to_file(sci_outfile, overwrite=True) - msgs.info(f'Wrote intermediate science image to {sci_outfile}') + log.info(f'Wrote intermediate science image to {sci_outfile}') # Write out the background image? if bkg_outfile is not None and bkg_redux_sciimg is not None: bkg_redux_sciimg.to_file(bkg_outfile, overwrite=True) - msgs.info(f'Wrote intermediate background image to {bkg_outfile}') + log.info(f'Wrote intermediate background image to {bkg_outfile}') # Return return sciImg, bkg_redux_sciimg @@ -371,13 +378,13 @@ def findobj_on_det(sciImg, spectrograph, fitstbl, par, frames:list, calib_ID:str std_outfile) else: std_trace = None - msgs.info("Object finding begins for {} on det={}".format(basename, det)) + log.info("Object finding begins for {} on det={}".format(basename, det)) # Grab the calibrations caliBrate = load_calibrations_for_frame( spectrograph, fitstbl, par, frames[0], det, calib_ID, calibrations_path) - msgs.info(f'Reducing detector {det}') + log.info(f'Reducing detector {det}') # Instantiate Reduce object # Required for pypeline specific object @@ -519,8 +526,10 @@ def load_calibrations_for_frame(spectrograph, fitstbl, par, frame, det, caliBrate.run_the_steps(reload_only=True) if not caliBrate.success: - msgs.error(f'Calibrations for detector {det} were unsuccessful! The step ' - f'that failed was {caliBrate.failed_step}.') + raise PypeItError( + f'Calibrations for detector {det} were unsuccessful! The step that failed was ' + f'{caliBrate.failed_step}.' + ) return caliBrate @@ -583,16 +592,18 @@ def load_skyregions(spectrograph, fitstbl, par, frame, det, caliBrate, basename=io.remove_suffix(scifile)) regfile = Path(regfile).absolute() if not regfile.exists(): - msgs.error(f'Unable to find SkyRegions file: {regfile} . Create a SkyRegions ' - 'frame using pypeit_skysub_regions, or change the user_regions to ' - 'the percentage format. See documentation.') - msgs.info(f'Loading SkyRegions file: {regfile}') + raise PypeItError( + f'Unable to find SkyRegions file: {regfile} . Create a SkyRegions frame using ' + 'pypeit_skysub_regions, or change the user_regions to the percentage format. ' + 'See documentation.' + ) + log.info(f'Loading SkyRegions file: {regfile}') return buildimage.SkyRegions.from_file(regfile).image.astype(bool) skyregtxt = par['reduce']['skysub']['user_regions'] if isinstance(skyregtxt, list): skyregtxt = ",".join(skyregtxt) - msgs.info(f'Generating skysub mask based on the user defined regions: {skyregtxt}') + log.info(f'Generating skysub mask based on the user defined regions: {skyregtxt}') # NOTE : Do not include spatial flexure here! # It is included when generating the mask in the return statement below slits_left, slits_right, _ \ @@ -602,9 +613,14 @@ def load_skyregions(spectrograph, fitstbl, par, frame, det, caliBrate, # Get the regions status, regions = skysub.read_userregions(skyregtxt, caliBrate.slits.nslits, maxslitlength) if status == 1: - msgs.error("Unknown error in sky regions definition. Please check the value:" + msgs.newline() + skyregtxt) + raise PypeItError( + "Unknown error in sky regions definition. Please check the value:\n" + skyregtxt + ) elif status == 2: - msgs.error("Sky regions definition must contain a percentage range, and therefore must contain a ':'") + raise PypeItError( + "Sky regions definition must contain a percentage range, and therefore must " + "contain a ':'" + ) # Generate and return image return skysub.generate_mask(spectrograph.pypeline, regions, caliBrate.slits, slits_left, slits_right, spat_flexure=spat_flexure) @@ -693,7 +709,7 @@ def extract_det(spectrograph, fitstbl, par, scaleImg = sciImg.rel_scaleImg if not par['reduce']['extraction']['skip_extraction']: - msgs.info(f"Extraction begins for {basename} on det={det}") + log.info(f"Extraction begins for {basename} on det={det}") # Instantiate Reduce object # Required for pipeline specific object # At instantiation, the fullmask in self.sciImg is modified @@ -713,7 +729,7 @@ def extract_det(spectrograph, fitstbl, par, slitgpm = np.logical_not(exTract.extract_bpm) slitshift = exTract.slitshift else: - msgs.info(f"Extraction skipped for {basename} on det={det}") + log.info(f"Extraction skipped for {basename} on det={det}") # Since the extraction was not performed, fill the arrays with the best available information skymodel, bkg_redux_skymodel, objmodel, ivarmodel, outmask, sobjs = \ final_sky, \ @@ -887,7 +903,7 @@ def refframe_correct(spectrograph, par, slits, ra, dec, obstime, slitgpm=None, vel_corr = 0.0 if refframe in ['heliocentric', 'barycentric'] \ and par['calibrations']['wavelengths']['reference'] != 'pixel': - msgs.info("Performing a {0} correction".format(par['calibrations']['wavelengths']['refframe'])) + log.info(f"Performing a {par['calibrations']['wavelengths']['refframe']} correction") # Calculate correction radec = ltu.radec_to_coord((ra, dec)) vel, vel_corr = wave.geomotion_correct(radec, obstime, @@ -896,7 +912,7 @@ def refframe_correct(spectrograph, par, slits, ra, dec, obstime, slitgpm=None, spectrograph.telescope['elevation'], refframe) # Apply correction to objects - msgs.info('Applying {0} correction = {1:0.5f} km/s'.format(refframe, vel)) + log.info(f'Applying {refframe} correction = {vel:0.5f} km/s') if (sobjs is not None) and (sobjs.nobj != 0): # Loop on slits to apply gd_slitord = slits.slitord_id[slitgpm] @@ -913,7 +929,7 @@ def refframe_correct(spectrograph, par, slits, ra, dec, obstime, slitgpm=None, if waveimg is not None: waveimg *= vel_corr else: - msgs.info('A wavelength reference frame correction will not be performed.') + log.info('A wavelength reference frame correction will not be performed.') # Return the value of the correction and the corrected wavelength image return vel_corr, waveimg diff --git a/pypeit/scripts/reduce_by_step.py b/pypeit/scripts/reduce_by_step.py index 7fb66ae7b7..bd87398390 100644 --- a/pypeit/scripts/reduce_by_step.py +++ b/pypeit/scripts/reduce_by_step.py @@ -52,8 +52,8 @@ def main(args): from pypeit.core import parse from pypeit import pypeit from pypeit import pypeit_steps - from pypeit import msgs - from pypeit import pypmsgs + from pypeit import log + from pypeit import PypeItError from pypeit import outputfiles from pypeit.images import pypeitimage from pypeit import specobjs @@ -90,7 +90,7 @@ def main(args): else: det = pypeIt.spectrograph.select_detectors(subset=parse.eval_detectors(args.det)) if len(det) > 1: - msgs.error("The input --det must be a single detector or mosaic.") + raise PypeItError("The input --det must be a single detector or mosaic.") det = det[0] # detector name det_name = pypeIt.spectrograph.get_det_name(det) @@ -98,11 +98,14 @@ def main(args): # Find the frame mt_row = pypeIt.fitstbl['filename'] == args.frame if np.sum(mt_row) != 1: - msgs.error(f"Frame {args.frame} not found or not unique") + raise PypeItError(f"Frame {args.frame} not found or not unique") frame = int(np.where(mt_row)[0][0]) calib_IDs = pypeIt.fitstbl.find_frame_calib_groups(frame) if len(calib_IDs) != 1: - msgs.error(f"Frame {args.frame} is a calibration frame. This script is for science/standard frames only") + raise PypeItError( + f"Frame {args.frame} is a calibration frame. This script is for science/standard " + "frames only" + ) calib_ID = calib_IDs[0] # Sci metadata @@ -140,7 +143,7 @@ def main(args): if args.step == 'findobj': # Load intermediate frames needed for finding objects - msgs.info(f'Loading images for detector {det}') + log.info(f'Loading images for detector {det}') sciImg = pypeitimage.PypeItImage.from_file(sci_filename) if bg_frames is not None and len(bg_frames) > 0: bkg_redux_sciimg = pypeitimage.PypeItImage.from_file(bkg_filename) @@ -154,9 +157,12 @@ def main(args): try: std_outfile = outputfiles.get_std_outfile(pypeIt.fitstbl, pypeIt.par, frame_indx[is_standard]) - except pypmsgs.PypeItError: - msgs.warn('No reduced standard star spec1d file found for this science frame, but one was expected because it is in your PypeIt file.\n'+\ - 'Continuing without standard star information.') + except PypeItError: + log.warning( + 'No reduced standard star spec1d file found for this science frame, but ' + 'one was expected because it is in your PypeIt file.\n Continuing ' + 'without standard star information.' + ) std_outfile = None else: std_outfile = None @@ -202,53 +208,53 @@ def main(args): # Write # sobjs object found sobjs_obj_find.write_to_fits({}, spec1d_filename) - msgs.info(f'Wrote intermediate spec1d file with objects found to {spec1d_filename}') + log.info(f'Wrote intermediate spec1d file with objects found to {spec1d_filename}') # final sky image skyimg = pypeitimage.PypeItImage(final_global_sky) if not sky_filename.parent.is_dir(): sky_filename.parent.mkdir() skyimg.to_file(sky_filename, overwrite=True) - msgs.info(f'Wrote final sky image to {sky_filename}') + log.info(f'Wrote final sky image to {sky_filename}') # bkg_redux sky image if bkg_redux_global_sky is not None: bkgredux_skyimg = pypeitimage.PypeItImage(bkg_redux_global_sky) bkgredux_skyimg.to_file(bkgredux_sky_filename, overwrite=True) - msgs.info(f'Wrote bkg_redux final sky image to {bkgredux_sky_filename}') + log.info(f'Wrote bkg_redux final sky image to {bkgredux_sky_filename}') # slits _slits.to_file(slits_filename, overwrite=True) - msgs.info(f'Wrote intermediate slits to {slits_filename}') + log.info(f'Wrote intermediate slits to {slits_filename}') # updated sciImg sciImg.to_file(sci_filename, overwrite=True) - msgs.info(f'Wrote updated science image to {sci_filename}') + log.info(f'Wrote updated science image to {sci_filename}') # Extract? if args.step == 'extract': # Load intermediate frames needed for the extraction - msgs.info(f'Loading images for detector {det}') + log.info(f'Loading images for detector {det}') sciImg = pypeitimage.PypeItImage.from_file(sci_filename) # sky images - msgs.info(f'Loading sky image for detector {det}') + log.info(f'Loading sky image for detector {det}') if not sky_filename.is_file(): - msgs.error(f'Sky image {sky_filename} not found!') + raise PypeItError(f'Sky image {sky_filename} not found!') skyimg = pypeitimage.PypeItImage.from_file(sky_filename) skyimg = skyimg.image if bkgredux_sky_filename.is_file(): - msgs.info(f'Loading bkg_redux sky image for detector {det}') + log.info(f'Loading bkg_redux sky image for detector {det}') bkg_redux_skyimg = pypeitimage.PypeItImage.from_file(bkgredux_sky_filename) bkg_redux_skyimg = bkg_redux_skyimg.image else: bkg_redux_skyimg = None # specobjs from findobj - msgs.info(f'Loading spec1d file for detector {det}') + log.info(f'Loading spec1d file for detector {det}') if not spec1d_filename.is_file(): - msgs.error(f'spec1d file {spec1d_filename} not found!') + raise PypeItError(f'spec1d file {spec1d_filename} not found!') specobjs_objfind = specobjs.SpecObjs.from_fitsfile(spec1d_filename) # slits - msgs.info(f'Loading slits for detector {det}') + log.info(f'Loading slits for detector {det}') if not slits_filename.is_file(): - msgs.error(f'Slits file {slits_filename} not found!') + raise PypeItError(f'Slits file {slits_filename} not found!') calib_slits = slittrace.SlitTraceSet.from_file(slits_filename) # Container for Spec2DObj From 5979a52192374cf124625fde723c605de47d2115 Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Mon, 5 Jan 2026 14:05:28 -0800 Subject: [PATCH 30/33] doc update --- doc/api/pypeit.display.pypeit_modes.rst | 8 ++ doc/api/pypeit.display.rst | 1 + doc/api/pypeit.exposure.rst | 2 +- doc/api/pypeit.outputfiles.rst | 2 +- doc/api/pypeit.pypeit_steps.rst | 2 +- doc/api/pypeit.rst | 1 + doc/api/pypeit.scripts.reduce_by_step.rst | 2 +- doc/help/pypeit_reduce_by_step.rst | 47 +++++--- doc/help/pypeit_sensfunc.rst | 12 ++- doc/help/pypeit_view_fits.rst | 112 +++++++++++--------- doc/help/run_pypeit.rst | 43 ++++---- doc/include/class_datamodel_pypeitimage.rst | 46 ++++---- doc/include/class_datamodel_sensfunc.rst | 2 +- doc/include/class_datamodel_specobj.rst | 2 + doc/include/datamodel_arcimage.rst | 2 +- doc/include/datamodel_biasimage.rst | 2 +- doc/include/datamodel_darkimage.rst | 2 +- doc/include/datamodel_specobj.rst | 2 + doc/include/datamodel_tiltimage.rst | 2 +- doc/include/dependencies_table.rst | 2 +- doc/include/inst_detector_table.rst | 18 ++-- doc/pypeit_par.rst | 83 +++++++++------ 22 files changed, 233 insertions(+), 162 deletions(-) create mode 100644 doc/api/pypeit.display.pypeit_modes.rst diff --git a/doc/api/pypeit.display.pypeit_modes.rst b/doc/api/pypeit.display.pypeit_modes.rst new file mode 100644 index 0000000000..e4dfb7cc60 --- /dev/null +++ b/doc/api/pypeit.display.pypeit_modes.rst @@ -0,0 +1,8 @@ +pypeit.display.pypeit\_modes module +=================================== + +.. automodule:: pypeit.display.pypeit_modes + :members: + :private-members: + :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.display.rst b/doc/api/pypeit.display.rst index 32d358d331..63c2a04e31 100644 --- a/doc/api/pypeit.display.rst +++ b/doc/api/pypeit.display.rst @@ -8,6 +8,7 @@ Submodules :maxdepth: 4 pypeit.display.display + pypeit.display.pypeit_modes pypeit.display.slitwavelength pypeit.display.spec1dview diff --git a/doc/api/pypeit.exposure.rst b/doc/api/pypeit.exposure.rst index e4ae1ea186..2a01e16b9d 100644 --- a/doc/api/pypeit.exposure.rst +++ b/doc/api/pypeit.exposure.rst @@ -4,5 +4,5 @@ pypeit.exposure module .. automodule:: pypeit.exposure :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.outputfiles.rst b/doc/api/pypeit.outputfiles.rst index 09550cc98b..1530b347ad 100644 --- a/doc/api/pypeit.outputfiles.rst +++ b/doc/api/pypeit.outputfiles.rst @@ -4,5 +4,5 @@ pypeit.outputfiles module .. automodule:: pypeit.outputfiles :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.pypeit_steps.rst b/doc/api/pypeit.pypeit_steps.rst index 240904554a..ff773965d9 100644 --- a/doc/api/pypeit.pypeit_steps.rst +++ b/doc/api/pypeit.pypeit_steps.rst @@ -4,5 +4,5 @@ pypeit.pypeit\_steps module .. automodule:: pypeit.pypeit_steps :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/api/pypeit.rst b/doc/api/pypeit.rst index c73816107a..22b8864f3e 100644 --- a/doc/api/pypeit.rst +++ b/doc/api/pypeit.rst @@ -35,6 +35,7 @@ Submodules pypeit.coadd3d pypeit.datamodel pypeit.edgetrace + pypeit.exceptions pypeit.exposure pypeit.extraction pypeit.find_objects diff --git a/doc/api/pypeit.scripts.reduce_by_step.rst b/doc/api/pypeit.scripts.reduce_by_step.rst index 34a40ade75..cce51132da 100644 --- a/doc/api/pypeit.scripts.reduce_by_step.rst +++ b/doc/api/pypeit.scripts.reduce_by_step.rst @@ -4,5 +4,5 @@ pypeit.scripts.reduce\_by\_step module .. automodule:: pypeit.scripts.reduce_by_step :members: :private-members: - :undoc-members: :show-inheritance: + :undoc-members: diff --git a/doc/help/pypeit_reduce_by_step.rst b/doc/help/pypeit_reduce_by_step.rst index 7656e0e249..770c4a9f1b 100644 --- a/doc/help/pypeit_reduce_by_step.rst +++ b/doc/help/pypeit_reduce_by_step.rst @@ -1,26 +1,41 @@ .. code-block:: console $ pypeit_reduce_by_step -h - usage: pypeit_reduce_by_step [-h] [--det DET] [--show] pypeit_file frame step + usage: pypeit_reduce_by_step [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--det DET] [--show] + pypeit_file frame {process,findobj,extract} Run one of the PypeIt reduction steps on a single frame (and detector) positional arguments: - pypeit_file PypeIt reduction file (must have .pypeit extension) - frame Raw science/standard frame to reduce as listed in your PypeIt - file, e.g. b28.fits.gz. - step Reduction step to perform. Must be "process" to perform basic - image processing (bias subtraction, field flattening, etc), - "findobj" to perform object detection and initial sky - subtraction, or "extract" to extract 1D spectra. + pypeit_file PypeIt reduction file (must have .pypeit extension) + frame Raw science/standard frame to reduce as listed in your + PypeIt file, e.g. b28.fits.gz. + {process,findobj,extract} + Reduction step to perform. Must be "process" to perform + basic image processing (bias subtraction, field + flattening, etc), "findobj" to perform object detection + and sky subtraction, or "extract" to extract 1D spectra. options: - -h, --help show this help message and exit - --det DET Single detector number or Mosaic tuple. The Mosaic tuple must - include the parentheses and be provided as a string, e.g. - "(1,2)". Required, but the list of options is provided if nothing - is provided. - --show Show reduction steps via plots (which will block further - execution until clicked on) and outputs to ginga. Requires remote - control ginga session via "ginga --modules=RC,SlitWavelength &" + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. + --det DET Single detector number or Mosaic tuple. The Mosaic tuple + must include the parentheses and be provided as a + string, e.g. "(1,2)". Required, but the list of options + is provided if nothing is provided. + --show Show reduction steps via plots (which will block further + execution until clicked on) and outputs to ginga. + Requires remote control ginga session via "ginga + --modules=RC,SlitWavelength &" \ No newline at end of file diff --git a/doc/help/pypeit_sensfunc.rst b/doc/help/pypeit_sensfunc.rst index ea1679880c..e3ee92d384 100644 --- a/doc/help/pypeit_sensfunc.rst +++ b/doc/help/pypeit_sensfunc.rst @@ -5,14 +5,18 @@ [--log_level LOG_LEVEL] [--extr {OPT,BOX}] [--algorithm {UVIS,IR}] [--multi MULTI] [-o OUTFILE] [-s SENS_FILE] [-f] [--debug] [--par_outfile PAR_OUTFILE] - spec1dfile + spec1dfiles [spec1dfiles ...] Compute a sensitivity function positional arguments: - spec1dfile spec1d file for the standard that will be used to - compute the sensitivity function. This can be the output - file of `pypeit_coadd_1dspec` for non Echelle data. + spec1dfiles file(s) of the reduced standard star spectrum. These can + be either spec1d*.fits files or the output of + `pypeit_coadd_1dspec` (except for cross-dispersed + echelle data). Multiple files can be provided, but they + are helpful onlyif they cover different wavelength + ranges, since thisscript will splice (not combine) them + together. options: -h, --help show this help message and exit diff --git a/doc/help/pypeit_view_fits.rst b/doc/help/pypeit_view_fits.rst index 080b4bc695..c6e6ee0a32 100644 --- a/doc/help/pypeit_view_fits.rst +++ b/doc/help/pypeit_view_fits.rst @@ -1,60 +1,74 @@ .. code-block:: console $ pypeit_view_fits -h - usage: pypeit_view_fits [-h] [--list] [--proc] [--bkg_file BKG_FILE] [--inter] - [--exten EXTEN] [--det [DET ...]] [--chname CHNAME] - [--showmask] [--embed] + usage: pypeit_view_fits [-h] [-v VERBOSITY] [--log_file LOG_FILE] + [--log_level LOG_LEVEL] [--list] [--proc] + [--bkg_file BKG_FILE] [--inter] [--exten EXTEN] + [--det [DET ...]] [--chname CHNAME] [--showmask] + [--embed] spectrograph file View FITS files with ginga positional arguments: - spectrograph A valid spectrograph identifier: aat_uhrf, apf_levy, - bok_bc, gemini_flamingos1, gemini_flamingos2, - gemini_gmos_north_e2v, gemini_gmos_north_ham, - gemini_gmos_north_ham_ns, gemini_gmos_south_ham, - gemini_gnirs_echelle, gemini_gnirs_ifu, gtc_maat, - gtc_osiris, gtc_osiris_plus, jwst_nircam, jwst_nirspec, - keck_deimos, keck_esi, keck_hires, keck_kcrm, keck_kcwi, - keck_lris_blue, keck_lris_blue_orig, keck_lris_red, - keck_lris_red_mark4, keck_lris_red_orig, keck_mosfire, - keck_nires, keck_nirspec_high, keck_nirspec_high_old, - keck_nirspec_low, lbt_luci1, lbt_luci2, lbt_mods1b, - lbt_mods1b_proc, lbt_mods1r, lbt_mods1r_proc, lbt_mods2b, - lbt_mods2b_proc, lbt_mods2r, lbt_mods2r_proc, ldt_deveny, - magellan_fire, magellan_fire_long, magellan_mage, - mdm_modspec, mdm_osmos_mdm4k, mdm_osmos_r4k, - mmt_binospec, mmt_bluechannel, mmt_mmirs, not_alfosc, - not_alfosc_vert, ntt_efosc2, p200_dbsp_blue, - p200_dbsp_red, p200_ngps_i, p200_ngps_r, p200_tspec, - shane_kast_blue, shane_kast_red, shane_kast_red_ret, - soar_goodman_blue, soar_goodman_red, subaru_focas, - tng_dolores, vlt_fors2, vlt_sinfoni, vlt_xshooter_nir, - vlt_xshooter_uvb, vlt_xshooter_vis, wht_isis_blue, - wht_isis_red - file FITS file. Either a Raw file or an Intermediate PypeIt - file + spectrograph A valid spectrograph identifier: aat_uhrf, apf_levy, + bok_bc, gemini_flamingos1, gemini_flamingos2, + gemini_gmos_north_e2v, gemini_gmos_north_ham, + gemini_gmos_north_ham_ns, gemini_gmos_south_ham, + gemini_gnirs_echelle, gemini_gnirs_ifu, gtc_maat, + gtc_osiris, gtc_osiris_plus, jwst_nircam, jwst_nirspec, + keck_deimos, keck_esi, keck_hires, keck_kcrm, keck_kcwi, + keck_lris_blue, keck_lris_blue_orig, keck_lris_red, + keck_lris_red_mark4, keck_lris_red_orig, keck_mosfire, + keck_nires, keck_nirspec_high, keck_nirspec_high_old, + keck_nirspec_low, lbt_luci1, lbt_luci2, lbt_mods1b, + lbt_mods1b_proc, lbt_mods1r, lbt_mods1r_proc, + lbt_mods2b, lbt_mods2b_proc, lbt_mods2r, + lbt_mods2r_proc, ldt_deveny, magellan_fire, + magellan_fire_long, magellan_mage, mdm_modspec, + mdm_osmos_mdm4k, mdm_osmos_r4k, mmt_binospec, + mmt_bluechannel, mmt_mmirs, not_alfosc, not_alfosc_vert, + ntt_efosc2, p200_dbsp_blue, p200_dbsp_red, p200_ngps_i, + p200_ngps_r, p200_tspec, shane_kast_blue, + shane_kast_red, shane_kast_red_ret, soar_goodman_blue, + soar_goodman_red, subaru_focas, tng_dolores, vlt_fors2, + vlt_sinfoni, vlt_xshooter_nir, vlt_xshooter_uvb, + vlt_xshooter_vis, wht_isis_blue, wht_isis_red + file FITS file. Either a Raw file or an Intermediate PypeIt + file options: - -h, --help show this help message and exit - --list List the extensions only? (default: False) - --proc Process the image (i.e. orient, overscan subtract, - multiply by gain) using pypeit.images.buildimage. - (default: False) - --bkg_file BKG_FILE FITS file to be subtracted from the image in file.--proc - must be set in order for this option to work. (default: - None) - --inter Input file is an Intermediate SciImage file (default: - False) - --exten EXTEN Show a FITS extension in the raw file. Note --proc and - --mosaic will not work with this option. (default: None) - --det [DET ...] Detector(s) to show. If more than one, the list of - detectors, i.e. --det 4 8 to show detectors 4 and 8. This - combination must be one of the allowed mosaics hard-coded - for the selected spectrograph. Using "mosaic" for - gemini_gmos, keck_deimos, or keck_lris will show the - mosaic of all detectors. (default: 1) - --chname CHNAME Name of Ginga tab (default: Image) - --showmask Overplot masked pixels (default: False) - --embed Upon completion embed in ipython shell (default: False) + -h, --help show this help message and exit + -v, --verbosity VERBOSITY + Verbosity level, which must be 0, 1, or 2. Level 0 + includes warning and error messages, level 1 adds + informational messages, and level 2 adds debugging + messages and the calling sequence. (default: 2) + --log_file LOG_FILE Name for the log file. If set to "default", a default + name is used. If None, a log file is not produced. + (default: None) + --log_level LOG_LEVEL + Verbosity level for the log file. If a log file is + produce and this is None, the file log will match the + console stream log. (default: None) + --list List the extensions only? (default: False) + --proc Process the image (i.e. orient, overscan subtract, + multiply by gain) using pypeit.images.buildimage. + (default: False) + --bkg_file BKG_FILE FITS file to be subtracted from the image in file.--proc + must be set in order for this option to work. (default: + None) + --inter Input file is an Intermediate SciImage file (default: + False) + --exten EXTEN Show a FITS extension in the raw file. Note --proc and + --mosaic will not work with this option. (default: None) + --det [DET ...] Detector(s) to show. If more than one, the list of + detectors, i.e. --det 4 8 to show detectors 4 and 8. + This combination must be one of the allowed mosaics + hard-coded for the selected spectrograph. Using "mosaic" + for gemini_gmos, keck_deimos, or keck_lris will show the + mosaic of all detectors. (default: 1) + --chname CHNAME Name of Ginga tab (default: Image) + --showmask Overplot masked pixels (default: False) + --embed Upon completion embed in ipython shell (default: False) \ No newline at end of file diff --git a/doc/help/run_pypeit.rst b/doc/help/run_pypeit.rst index f4f969c247..38238381f9 100644 --- a/doc/help/run_pypeit.rst +++ b/doc/help/run_pypeit.rst @@ -5,27 +5,28 @@ [--log_level LOG_LEVEL] [-r REDUX_PATH] [-m] [-s] [-o] [-c] pypeit_file - ## PypeIt : The Python Spectroscopic Data Reduction Pipeline v1.18.2.dev202+g3b3e96589 - ## - ## Available spectrographs include: - ## aat_uhrf, apf_levy, bok_bc, gemini_flamingos1, gemini_flamingos2, - ## gemini_gmos_north_e2v, gemini_gmos_north_ham, - ## gemini_gmos_north_ham_ns, gemini_gmos_south_ham, gemini_gnirs_echelle, - ## gemini_gnirs_ifu, gtc_maat, gtc_osiris, gtc_osiris_plus, jwst_nircam, - ## jwst_nirspec, keck_deimos, keck_esi, keck_hires, keck_kcrm, keck_kcwi, - ## keck_lris_blue, keck_lris_blue_orig, keck_lris_red, - ## keck_lris_red_mark4, keck_lris_red_orig, keck_mosfire, keck_nires, - ## keck_nirspec_high, keck_nirspec_high_old, keck_nirspec_low, lbt_luci1, - ## lbt_luci2, lbt_mods1b, lbt_mods1b_proc, lbt_mods1r, lbt_mods1r_proc, - ## lbt_mods2b, lbt_mods2b_proc, lbt_mods2r, lbt_mods2r_proc, ldt_deveny, - ## magellan_fire, magellan_fire_long, magellan_mage, mdm_modspec, - ## mdm_osmos_mdm4k, mdm_osmos_r4k, mmt_binospec, mmt_bluechannel, - ## mmt_mmirs, not_alfosc, not_alfosc_vert, ntt_efosc2, p200_dbsp_blue, - ## p200_dbsp_red, p200_ngps_i, p200_ngps_r, p200_tspec, shane_kast_blue, - ## shane_kast_red, shane_kast_red_ret, soar_goodman_blue, - ## soar_goodman_red, subaru_focas, tng_dolores, vlt_fors2, vlt_sinfoni, - ## vlt_xshooter_nir, vlt_xshooter_uvb, vlt_xshooter_vis, wht_isis_blue, - ## wht_isis_red + PypeIt: The Python Spectroscopic Data Reduction Pipeline + Version 1.18.2.dev571+gc7a01c598.d20260105 + + Available spectrographs include: + aat_uhrf, apf_levy, bok_bc, gemini_flamingos1, gemini_flamingos2, + gemini_gmos_north_e2v, gemini_gmos_north_ham, + gemini_gmos_north_ham_ns, gemini_gmos_south_ham, gemini_gnirs_echelle, + gemini_gnirs_ifu, gtc_maat, gtc_osiris, gtc_osiris_plus, jwst_nircam, + jwst_nirspec, keck_deimos, keck_esi, keck_hires, keck_kcrm, keck_kcwi, + keck_lris_blue, keck_lris_blue_orig, keck_lris_red, + keck_lris_red_mark4, keck_lris_red_orig, keck_mosfire, keck_nires, + keck_nirspec_high, keck_nirspec_high_old, keck_nirspec_low, lbt_luci1, + lbt_luci2, lbt_mods1b, lbt_mods1b_proc, lbt_mods1r, lbt_mods1r_proc, + lbt_mods2b, lbt_mods2b_proc, lbt_mods2r, lbt_mods2r_proc, ldt_deveny, + magellan_fire, magellan_fire_long, magellan_mage, mdm_modspec, + mdm_osmos_mdm4k, mdm_osmos_r4k, mmt_binospec, mmt_bluechannel, + mmt_mmirs, not_alfosc, not_alfosc_vert, ntt_efosc2, p200_dbsp_blue, + p200_dbsp_red, p200_ngps_i, p200_ngps_r, p200_tspec, shane_kast_blue, + shane_kast_red, shane_kast_red_ret, soar_goodman_blue, + soar_goodman_red, subaru_focas, tng_dolores, vlt_fors2, vlt_sinfoni, + vlt_xshooter_nir, vlt_xshooter_uvb, vlt_xshooter_vis, wht_isis_blue, + wht_isis_red positional arguments: pypeit_file PypeIt reduction file (must have .pypeit extension) diff --git a/doc/include/class_datamodel_pypeitimage.rst b/doc/include/class_datamodel_pypeitimage.rst index 9008ab7bc3..35ec692792 100644 --- a/doc/include/class_datamodel_pypeitimage.rst +++ b/doc/include/class_datamodel_pypeitimage.rst @@ -1,24 +1,26 @@ -**Version**: 1.3.0 +**Version**: 1.3.1 -================ =================================================================================================== ================= ======================================================================================================================================================================================== -Attribute Type Array Type Description -================ =================================================================================================== ================= ======================================================================================================================================================================================== -``PYP_SPEC`` str PypeIt spectrograph name -``amp_img`` `numpy.ndarray`_ `numpy.integer`_ Provides the amplifier that contributed to each pixel. If this is a detector mosaic, this must be used in combination with ``det_img`` to select pixels for a given detector amplifier. -``base_var`` `numpy.ndarray`_ `numpy.floating`_ Base-level image variance, excluding count shot-noise -``det_img`` `numpy.ndarray`_ `numpy.integer`_ If a detector mosaic, this image provides the detector that contributed to each pixel. -``detector`` :class:`~pypeit.images.detector_container.DetectorContainer`, :class:`~pypeit.images.mosaic.Mosaic` The detector (see :class:`~pypeit.images.detector_container.DetectorContainer`) or mosaic (see :class:`~pypeit.images.mosaic.Mosaic`) parameters -``exptime`` int, float Effective exposure time (s) -``filename`` str Filename for the image -``fullmask`` :class:`~pypeit.images.imagebitmask.ImageBitMaskArray` Image mask -``image`` `numpy.ndarray`_ `numpy.floating`_ Primary image data -``img_scale`` `numpy.ndarray`_ `numpy.floating`_ Image count scaling applied (e.g., 1/flat-field) -``ivar`` `numpy.ndarray`_ `numpy.floating`_ Inverse variance image -``nimg`` `numpy.ndarray`_ `numpy.integer`_ If a combination of multiple images, this is the number of images that contributed to each pixel -``noise_floor`` float Noise floor included in variance -``rn2img`` `numpy.ndarray`_ `numpy.floating`_ Read noise squared image -``shot_noise`` bool Shot-noise included in variance -``spat_flexure`` float Shift, in spatial pixels, between this image and SlitTrace -``units`` str (Unscaled) Pixel units (e- or ADU) -================ =================================================================================================== ================= ======================================================================================================================================================================================== +================ =================================================================================================== ================= ============================================================================================================================================================================================================================ +Attribute Type Array Type Description +================ =================================================================================================== ================= ============================================================================================================================================================================================================================ +``PYP_SPEC`` str PypeIt spectrograph name +``amp_img`` `numpy.ndarray`_ `numpy.integer`_ Provides the amplifier that contributed to each pixel. If this is a detector mosaic, this must be used in combination with ``det_img`` to select pixels for a given detector amplifier. +``base_var`` `numpy.ndarray`_ `numpy.floating`_ Base-level image variance, excluding count shot-noise +``det_img`` `numpy.ndarray`_ `numpy.integer`_ If a detector mosaic, this image provides the detector that contributed to each pixel. +``detector`` :class:`~pypeit.images.detector_container.DetectorContainer`, :class:`~pypeit.images.mosaic.Mosaic` The detector (see :class:`~pypeit.images.detector_container.DetectorContainer`) or mosaic (see :class:`~pypeit.images.mosaic.Mosaic`) parameters +``exptime`` int, float Effective exposure time (s) +``filename`` str Filename for the image +``flex_shift`` `numpy.ndarray`_ `numpy.floating`_ Array of global spectral shifts (pixels) of the wavelength array at the center of each slit to correct for spectral flexure. This is calculated using the sky spectrum, therefore, updated during object finding/extraction. +``fullmask`` :class:`~pypeit.images.imagebitmask.ImageBitMaskArray` Image mask +``image`` `numpy.ndarray`_ `numpy.floating`_ Primary image data +``img_scale`` `numpy.ndarray`_ `numpy.floating`_ Image count scaling applied (e.g., 1/flat-field) +``ivar`` `numpy.ndarray`_ `numpy.floating`_ Inverse variance image +``nimg`` `numpy.ndarray`_ `numpy.integer`_ If a combination of multiple images, this is the number of images that contributed to each pixel +``noise_floor`` float Noise floor included in variance +``rel_scaleImg`` `numpy.ndarray`_ `numpy.floating`_ Image used to apply a relative scaling to the science image to correct its spectral illumination. Currently only used for IFU reductions. This is calculated and updated during object finding. +``rn2img`` `numpy.ndarray`_ `numpy.floating`_ Read noise squared image +``shot_noise`` bool Shot-noise included in variance +``spat_flexure`` float Shift, in spatial pixels, between this image and SlitTrace +``units`` str (Unscaled) Pixel units (e- or ADU) +================ =================================================================================================== ================= ============================================================================================================================================================================================================================ diff --git a/doc/include/class_datamodel_sensfunc.rst b/doc/include/class_datamodel_sensfunc.rst index e57591681c..54f113a963 100644 --- a/doc/include/class_datamodel_sensfunc.rst +++ b/doc/include/class_datamodel_sensfunc.rst @@ -11,7 +11,7 @@ Attribute Type Array Type Description ``extr`` str Extraction method used for the standard star (OPT or BOX) ``pypeline`` str PypeIt pipeline reduction path ``sens`` `astropy.table.table.Table`_ Table with the sensitivity function -``spec1df`` str PypeIt spec1D file used to for sensitivity function +``spec1df`` str PypeIt spec1D file(s) used to for sensitivity function ``std_cal`` str File name (or shorthand) with the standard flux data ``std_dec`` float DEC of the standard source ``std_name`` str Type of standard source diff --git a/doc/include/class_datamodel_specobj.rst b/doc/include/class_datamodel_specobj.rst index a315d9c0ff..3854ba4f23 100644 --- a/doc/include/class_datamodel_specobj.rst +++ b/doc/include/class_datamodel_specobj.rst @@ -68,10 +68,12 @@ Attribute Type ``SPAT_FWHM`` float Spatial FWHM of the object (arcsec) ``SPAT_PIXPOS`` float, `numpy.floating`_ Spatial location of the trace on detector (pixel) at half-way ``SPAT_PIXPOS_ID`` int, `numpy.integer`_ Nearest integer spatial location of the trace on detector (pixel) at half-way used as a unique identifier for the naming model +``SPEC_DET`` `numpy.ndarray`_ `numpy.integer`_ Array of detector indices for each pixel in the spectral direction. This is only available for mosaic reductions. ``TRACE_SPAT`` `numpy.ndarray`_ float Object trace along the spec (spatial pixel) ``VEL_CORR`` float Relativistic velocity correction for wavelengths ``VEL_TYPE`` str Type of heliocentric correction (if any) ``WAVE_RMS`` float, `numpy.floating`_ RMS (pix) for the wavelength solution for this slit. +``ech_snr`` float, `numpy.floating`_ Median S/N of the echelle of the spectrum ``hand_extract_flag`` bool Boolean indicating if this is a forced extraction at the location provided by the user. ``maskwidth`` float, `numpy.floating`_ Size (in units of spatial fwhm) of the region used for local sky subtraction ``sign`` float Sign of the object profile (+1 or -1). + is a positive profile above the sky background. diff --git a/doc/include/datamodel_arcimage.rst b/doc/include/datamodel_arcimage.rst index 513d279776..221c8887a8 100644 --- a/doc/include/datamodel_arcimage.rst +++ b/doc/include/datamodel_arcimage.rst @@ -1,5 +1,5 @@ -Version 1.3.0 +Version 1.3.1 ================ ============================== ========= ================================================================================================================================================ HDU Name HDU Type Data Type Description diff --git a/doc/include/datamodel_biasimage.rst b/doc/include/datamodel_biasimage.rst index fcc54ad6c9..a9710f5d4b 100644 --- a/doc/include/datamodel_biasimage.rst +++ b/doc/include/datamodel_biasimage.rst @@ -1,5 +1,5 @@ -Version 1.3.0 +Version 1.3.1 ================= ============================== ========= ================================================================================================================================================ HDU Name HDU Type Data Type Description diff --git a/doc/include/datamodel_darkimage.rst b/doc/include/datamodel_darkimage.rst index 787919fc75..e56057a0fb 100644 --- a/doc/include/datamodel_darkimage.rst +++ b/doc/include/datamodel_darkimage.rst @@ -1,5 +1,5 @@ -Version 1.3.0 +Version 1.3.1 ================= ============================== ========= ================================================================================================================================================ HDU Name HDU Type Data Type Description diff --git a/doc/include/datamodel_specobj.rst b/doc/include/datamodel_specobj.rst index 975c7f805d..0a26c28753 100644 --- a/doc/include/datamodel_specobj.rst +++ b/doc/include/datamodel_specobj.rst @@ -69,10 +69,12 @@ Obj Key Obj Type Array Type Descripti ``SPAT_FWHM`` float Spatial FWHM of the object (arcsec) ``SPAT_PIXPOS`` float, floating Spatial location of the trace on detector (pixel) at half-way ``SPAT_PIXPOS_ID`` int, integer Nearest integer spatial location of the trace on detector (pixel) at half-way used as a unique identifier for the naming model +``SPEC_DET`` ndarray integer Array of detector indices for each pixel in the spectral direction. This is only available for mosaic reductions. ``TRACE_SPAT`` ndarray float Object trace along the spec (spatial pixel) ``VEL_CORR`` float Relativistic velocity correction for wavelengths ``VEL_TYPE`` str Type of heliocentric correction (if any) ``WAVE_RMS`` float, floating RMS (pix) for the wavelength solution for this slit. +``ech_snr`` float, floating Median S/N of the echelle of the spectrum ``hand_extract_flag`` bool Boolean indicating if this is a forced extraction at the location provided by the user. ``maskwidth`` float, floating Size (in units of spatial fwhm) of the region used for local sky subtraction ``sign`` float Sign of the object profile (+1 or -1). + is a positive profile above the sky background. diff --git a/doc/include/datamodel_tiltimage.rst b/doc/include/datamodel_tiltimage.rst index 55189a8428..abf90e427f 100644 --- a/doc/include/datamodel_tiltimage.rst +++ b/doc/include/datamodel_tiltimage.rst @@ -1,5 +1,5 @@ -Version 1.3.0 +Version 1.3.1 ================= ============================== ========= ================================================================================================================================================ HDU Name HDU Type Data Type Description diff --git a/doc/include/dependencies_table.rst b/doc/include/dependencies_table.rst index 1d4caf0ac9..15c968609d 100644 --- a/doc/include/dependencies_table.rst +++ b/doc/include/dependencies_table.rst @@ -1,5 +1,5 @@ ======================= ============================================================================================================================================================================================================================================================================================================================================== Python Version ``>=3.11,<3.14`` -Required for users ``IPython>=8.0.0``, ``PyERFA>=2.0.0``, ``PyYAML>=6.0``, ``astropy>=7.0``, ``bottleneck``, ``configobj>=5.0.6``, ``fast-histogram>=0.11``, ``ginga>=5.4.0``, ``linetools>=0.3.2``, ``matplotlib>=3.7``, ``numpy>=2.0``, ``packaging>=22.0``, ``pygithub``, ``pyqt6``, ``qtpy>=2.2.0``, ``scikit-learn>=1.2``, ``scipy>=1.9``, ``setuptools<81`` +Required for users ``IPython>=8.0.0``, ``PyERFA>=2.0.0``, ``PyYAML>=6.0``, ``astropy>=7.0``, ``bottleneck``, ``configobj>=5.0.6``, ``fast-histogram>=0.11``, ``ginga>=5.5.1``, ``linetools>=0.3.2``, ``matplotlib>=3.7``, ``numpy>=2.0``, ``packaging>=22.0``, ``pygithub``, ``pyqt6``, ``qtpy>=2.2.0``, ``scikit-learn>=1.2``, ``scipy>=1.9``, ``setuptools<81`` Required for developers ``coverage``, ``docutils<0.22``, ``psutil``, ``pygit2``, ``pytest-astropy``, ``pytest-cov``, ``pytest-qt``, ``pytest>=7.0.0``, ``scikit-image>=0.23``, ``specutils>=2.0``, ``sphinx-autodoc-typehints>3.2``, ``sphinx-automodapi``, ``sphinx-design``, ``sphinx>6``, ``sphinx_rtd_theme==3.0.0``, ``tox`` ======================= ============================================================================================================================================================================================================================================================================================================================================== diff --git a/doc/include/inst_detector_table.rst b/doc/include/inst_detector_table.rst index 3b49504c6f..58b60efe42 100644 --- a/doc/include/inst_detector_table.rst +++ b/doc/include/inst_detector_table.rst @@ -9,15 +9,15 @@ Instrument Det specaxis specflip spatflip namp gain ``gemini_gmos_north_e2v`` 1 1 False False 2 2.27, 2.27 3.32, 3.32 0.0 -1.0e+10 110900.0 0.9500 0.0728 ... 2 1 False False 2 2.27, 2.27 3.32, 3.32 0.0 -1.0e+10 115500.0 0.9500 0.0728 ... 3 1 False False 2 2.27, 2.27 3.32, 3.32 0.0 -1.0e+10 116700.0 0.9500 0.0728 -``gemini_gmos_north_ham`` 1 1 False False 4 1.63, 1.63, 1.63, 1.63 4.14, 4.14, 4.14, 4.14 0.0 -1.0e+10 129000.0 0.9500 0.0807 -... 2 1 False False 4 1.63, 1.63, 1.63, 1.63 4.14, 4.14, 4.14, 4.14 0.0 -1.0e+10 123000.0 0.9500 0.0807 -... 3 1 False False 4 1.63, 1.63, 1.63, 1.63 4.14, 4.14, 4.14, 4.14 0.0 -1.0e+10 125000.0 0.9500 0.0807 -``gemini_gmos_north_ham_ns`` 1 1 False False 4 1.63, 1.63, 1.63, 1.63 4.14, 4.14, 4.14, 4.14 0.0 -1.0e+10 129000.0 0.9500 0.0807 -... 2 1 False False 4 1.63, 1.63, 1.63, 1.63 4.14, 4.14, 4.14, 4.14 0.0 -1.0e+10 123000.0 0.9500 0.0807 -... 3 1 False False 4 1.63, 1.63, 1.63, 1.63 4.14, 4.14, 4.14, 4.14 0.0 -1.0e+10 125000.0 0.9500 0.0807 -``gemini_gmos_south_ham`` 1 1 False False 4 1.86, 1.86, 1.86, 1.86 4.19, 4.19, 4.19, 4.19 0.0 -1.0e+10 129000.0 0.9500 0.0800 -... 2 1 False False 4 1.89, 1.89, 1.89, 1.89 4.13, 4.13, 4.13, 4.13 0.0 -1.0e+10 123000.0 0.9500 0.0800 -... 3 1 False False 4 1.74, 1.74, 1.74, 1.74 3.75, 3.75, 3.75, 3.75 0.0 -1.0e+10 125000.0 0.9500 0.0800 +``gemini_gmos_north_ham`` 1 1 False False 4 1.568, 1.62, 1.618, 1.675 3.99, 4.12, 4.12, 4.06 0.0 -1.0e+10 129000.0 0.9500 0.0807 +... 2 1 False False 4 1.664, 1.633, 1.65, 1.69 4.2, 3.88, 3.98, 4.2 0.0 -1.0e+10 123000.0 0.9500 0.0807 +... 3 1 False False 4 1.654, 1.587, 1.63, 1.604 4.55, 4.02, 4.35, 4.04 0.0 -1.0e+10 125000.0 0.9500 0.0807 +``gemini_gmos_north_ham_ns`` 1 1 False False 4 1.568, 1.62, 1.618, 1.675 3.99, 4.12, 4.12, 4.06 0.0 -1.0e+10 129000.0 0.9500 0.0807 +... 2 1 False False 4 1.664, 1.633, 1.65, 1.69 4.2, 3.88, 3.98, 4.2 0.0 -1.0e+10 123000.0 0.9500 0.0807 +... 3 1 False False 4 1.654, 1.587, 1.63, 1.604 4.55, 4.02, 4.35, 4.04 0.0 -1.0e+10 125000.0 0.9500 0.0807 +``gemini_gmos_south_ham`` 1 1 False False 4 1.852, 1.878, 1.874, 1.834 4.24, 4.0, 4.25, 4.03 0.0 -1.0e+10 129000.0 0.9500 0.0800 +... 2 1 False False 4 1.878, 1.84, 1.933, 1.908 4.12, 3.83, 3.98, 3.8 0.0 -1.0e+10 123000.0 0.9500 0.0800 +... 3 1 False False 4 1.652, 1.761, 1.724, 1.813 3.46, 3.35, 3.25, 3.5 0.0 -1.0e+10 125000.0 0.9500 0.0800 ``gemini_gnirs_echelle`` 1 0 True True 1 13.5 7.0 540.0 -1.0e+10 150000.0 0.7100 0.1500 ``gemini_gnirs_ifu`` 1 0 True True 1 13.5 7.0 540.0 -1.0e+10 150000.0 0.7100 0.1500 ``gtc_maat`` 1 1 True False 1 1.9 4.3 5.0 0.0e+00 65535.0 0.9500 0.1250 diff --git a/doc/pypeit_par.rst b/doc/pypeit_par.rst index f2e83fcbbe..dc65240ae3 100644 --- a/doc/pypeit_par.rst +++ b/doc/pypeit_par.rst @@ -383,6 +383,8 @@ Key Type Options ``maskdesign_maxsep`` int, float .. 50 Maximum allowed offset in pixels between the slit edges defined by the slit-mask design and the traced edges. ``maskdesign_sigrej`` int, float .. 3 Number of sigma for sigma-clipping rejection during slit-mask design matching. ``maskdesign_step`` int, float .. 1 Step in pixels used to generate a list of possible offsets (within +/- `maskdesign_maxsep`) between the slit edges defined by the mask design and the traced edges. +``maskdesign_trim`` bool .. False If True, the mask design information is used to trim each slit in the spectral direction. This functionality is only used for spectrographs with slit-mask designs that have information on the spectral extent of each slit (currently, only Gemini GMOS N/S). +``maskdesign_trim_shift`` int, float .. 0 Shift in pixels to apply to the mask design information when trimming the slits in the spectral direction. This is useful for cases where the mask design information is not perfectly aligned with the detector. This functionality is only used for spectrographs with slit-mask designs that have information on the spectral extent of each slit (currently, only Gemini GMOS N/S). ``match_tol`` int, float .. 3.0 Same-side slit edges below this separation in pixels are considered part of the same edge. ``max_nudge`` int, float .. .. If parts of any (predicted) trace fall off the detector edge, allow them to be nudged away from the detector edge up to and including this maximum number of pixels. If None, no limit is set; otherwise should be 0 or larger. ``max_overlap`` float .. .. When adding missing echelle orders based on where existing orders are found, the prediction can yield overlapping orders. The edges of these orders are adjusted to eliminate the overlap, and orders can be added up over the spatial range of the detector set by ``order_spate_range``. If this value is None, orders are added regardless of how much they overlap. If not None, this defines the maximum fraction of an order spatial width that can overlap with other orders. For example, if ``max_overlap=0.5``, any order that overlaps its neighboring orders by more than 50% will not be added as a missing order. @@ -594,21 +596,21 @@ Collate1DPar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.Collate1DPar` -========================= =============== ======= ======================================================== ================================================================================================================================================================================================================================================================================================================================================================================================================== -Key Type Options Default Description -========================= =============== ======= ======================================================== ================================================================================================================================================================================================================================================================================================================================================================================================================== -``dry_run`` bool .. False If set, the script will display the matching File and Object Ids but will not flux, coadd or archive. -``exclude_serendip`` bool .. False Whether to exclude SERENDIP objects from collating. -``exclude_slit_trace_bm`` list, str .. A list of slit trace bitmask bits that should be excluded. -``flux`` bool .. False If set, the script will flux calibrate using archived sensfuncs before coadding. -``ignore_flux`` bool .. False If set, the script will only coadd non-fluxed spectra even if flux data is present. Otherwise fluxed spectra are coadded if all spec1ds have been fluxed calibrated. -``match_using`` str .. ``ra/dec`` Determines how 1D spectra are matched as being the same object. Must be either 'pixel' or 'ra/dec'. -``outdir`` str .. ``/Users/westfall/Work/packages/pypeit-main/pypeit/doc`` The path where all coadded output files and report files will be placed. -``refframe`` str .. .. Perform reference frame correction prior to coadding. Options are: observed, heliocentric, barycentric -``spec1d_outdir`` str .. .. The path where all modified spec1d files are placed. These are only created if flux calibration or refframe correction are asked for. -``tolerance`` str, float, int .. 1.0 The tolerance used when comparing the coordinates of objects. If two objects are within this distance from each other, they are considered the same object. If match_using is 'ra/dec' (the default) this is an angular distance. The defaults units are arcseconds but other units supported by astropy.coordinates.Angle can be used (`e.g.`, '0.003d' or '0h1m30s'). If match_using is 'pixel' this is a float. -``wv_rms_thresh`` float .. .. If set, any objects with a wavelength RMS > this value are skipped, else all wavelength RMS values are accepted. -========================= =============== ======= ======================================================== ================================================================================================================================================================================================================================================================================================================================================================================================================== +========================= =============== ======= ========== ================================================================================================================================================================================================================================================================================================================================================================================================================== +Key Type Options Default Description +========================= =============== ======= ========== ================================================================================================================================================================================================================================================================================================================================================================================================================== +``dry_run`` bool .. False If set, the script will display the matching File and Object Ids but will not flux, coadd or archive. +``exclude_serendip`` bool .. False Whether to exclude SERENDIP objects from collating. +``exclude_slit_trace_bm`` list, str .. A list of slit trace bitmask bits that should be excluded. +``flux`` bool .. False If set, the script will flux calibrate using archived sensfuncs before coadding. +``ignore_flux`` bool .. False If set, the script will only coadd non-fluxed spectra even if flux data is present. Otherwise fluxed spectra are coadded if all spec1ds have been fluxed calibrated. +``match_using`` str .. ``ra/dec`` Determines how 1D spectra are matched as being the same object. Must be either 'pixel' or 'ra/dec'. +``outdir`` str .. ``$PWD`` The path where all coadded output files and report files will be placed. +``refframe`` str .. .. Perform reference frame correction prior to coadding. Options are: observed, heliocentric, barycentric +``spec1d_outdir`` str .. .. The path where all modified spec1d files are placed. These are only created if flux calibration or refframe correction are asked for. +``tolerance`` str, float, int .. 1.0 The tolerance used when comparing the coordinates of objects. If two objects are within this distance from each other, they are considered the same object. If match_using is 'ra/dec' (the default) this is an angular distance. The defaults units are arcseconds but other units supported by astropy.coordinates.Angle can be used (`e.g.`, '0.003d' or '0h1m30s'). If match_using is 'pixel' this is a float. +``wv_rms_thresh`` float .. .. If set, any objects with a wavelength RMS > this value are skipped, else all wavelength RMS values are accepted. +========================= =============== ======= ========== ================================================================================================================================================================================================================================================================================================================================================================================================================== ---- @@ -661,22 +663,22 @@ ReduxPar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.ReduxPar` -====================== ============== ======= ======================================================== ========================================================================================================================================================================================================================================================================================================================================================================================================== -Key Type Options Default Description -====================== ============== ======= ======================================================== ========================================================================================================================================================================================================================================================================================================================================================================================================== -``calwin`` int, float .. 0 The window of time in hours to search for calibration frames for a science frame -``chk_version`` bool .. True If True enforce strict PypeIt version checking to ensure that all files were created with the current version of PypeIt. If set to False, the code will attempt to read out-of-date files and keep going. Beware (!!) that this can lead to unforeseen bugs that either cause the code to crash or lead to erroneous results. I.e., you really need to know what you are doing if you set this to False! -``detnum`` int, list .. .. Restrict reduction to a list of detector indices. In case of mosaic reduction (currently only available for Gemini/GMOS and Keck/DEIMOS) ``detnum`` should be a list of tuples of the detector indices that are mosaiced together. E.g., for Gemini/GMOS ``detnum`` would be ``[(1,2,3)]`` and for Keck/DEIMOS it would be ``[(1, 5), (2, 6), (3, 7), (4, 8)]`` -``ignore_bad_headers`` bool .. False Ignore bad headers (NOT recommended unless you know it is safe). -``maskIDs`` str, int, list .. .. Restrict reduction to a set of slitmask IDs Example syntax -- ``maskIDs = 818006,818015`` This must be used with detnum (for now). -``qadir`` str .. ``QA`` Directory relative to calling directory to write quality assessment files. -``quicklook`` bool .. False Run a quick look reduction? This is usually good if you want to quickly reduce the data (usually at the telescope in real time) to get an initial estimate of the data quality. -``redux_path`` str .. ``/Users/westfall/Work/packages/pypeit-main/pypeit/doc`` Path to folder for performing reductions. Default is the current working directory. -``scidir`` str .. ``Science`` Directory relative to calling directory to write science files. -``slitspatnum`` str, list .. .. Restrict reduction to a set of slit DET:SPAT values (closest slit is used). Example syntax -- slitspatnum = DET01:175,DET01:205 or MSC02:2234 If you are re-running the code, (i.e. modifying one slit) you *must* have the precise SPAT_ID index. -``sortroot`` str .. .. A filename given to output the details of the sorted files. If None, the default is the root name of the pypeit file. If off, no output is produced. -``spectrograph`` str .. .. Spectrograph that provided the data to be reduced. See :ref:`instruments` for valid options. -====================== ============== ======= ======================================================== ========================================================================================================================================================================================================================================================================================================================================================================================================== +====================== ============== ======= =========== ========================================================================================================================================================================================================================================================================================================================================================================================================== +Key Type Options Default Description +====================== ============== ======= =========== ========================================================================================================================================================================================================================================================================================================================================================================================================== +``calwin`` int, float .. 0 The window of time in hours to search for calibration frames for a science frame +``chk_version`` bool .. True If True enforce strict PypeIt version checking to ensure that all files were created with the current version of PypeIt. If set to False, the code will attempt to read out-of-date files and keep going. Beware (!!) that this can lead to unforeseen bugs that either cause the code to crash or lead to erroneous results. I.e., you really need to know what you are doing if you set this to False! +``detnum`` int, list .. .. Restrict reduction to a list of detector indices. In case of mosaic reduction (currently only available for Gemini/GMOS and Keck/DEIMOS) ``detnum`` should be a list of tuples of the detector indices that are mosaiced together. E.g., for Gemini/GMOS ``detnum`` would be ``[(1,2,3)]`` and for Keck/DEIMOS it would be ``[(1, 5), (2, 6), (3, 7), (4, 8)]`` +``ignore_bad_headers`` bool .. False Ignore bad headers (NOT recommended unless you know it is safe). +``maskIDs`` str, int, list .. .. Restrict reduction to a set of slitmask IDs Example syntax -- ``maskIDs = 818006,818015`` This must be used with detnum (for now). +``qadir`` str .. ``QA`` Directory relative to calling directory to write quality assessment files. +``quicklook`` bool .. False Run a quick look reduction? This is usually good if you want to quickly reduce the data (usually at the telescope in real time) to get an initial estimate of the data quality. +``redux_path`` str .. ``$PWD`` Path to folder for performing reductions. Default is the current working directory. +``scidir`` str .. ``Science`` Directory relative to calling directory to write science files. +``slitspatnum`` str, list .. .. Restrict reduction to a set of slit DET:SPAT values (closest slit is used). Example syntax -- slitspatnum = DET01:175,DET01:205 or MSC02:2234 If you are re-running the code, (i.e. modifying one slit) you *must* have the precise SPAT_ID index. +``sortroot`` str .. .. A filename given to output the details of the sorted files. If None, the default is the root name of the pypeit file. If off, no output is produced. +``spectrograph`` str .. .. Spectrograph that provided the data to be reduced. See :ref:`instruments` for valid options. +====================== ============== ======= =========== ========================================================================================================================================================================================================================================================================================================================================================================================================== ---- @@ -1803,6 +1805,7 @@ Alterations to the default parameters are: edge_thresh = 100.0 follow_span = 80 fit_order = 3 + minimum_slit_length = 1.8 [[tilts]] tracethresh = 10.0 [scienceframe] @@ -1811,6 +1814,10 @@ Alterations to the default parameters are: noise_floor = 0.01 [flexure] spec_method = boxcar + [sensfunc] + extrap_blu = 0.05 + extrap_red = 0.05 + trim_std_pixs = 20, 20, .. _instr_par-gemini_gmos_north_ham: @@ -1898,6 +1905,7 @@ Alterations to the default parameters are: edge_thresh = 100.0 follow_span = 80 fit_order = 3 + minimum_slit_length = 1.8 [[tilts]] tracethresh = 10.0 [scienceframe] @@ -1906,6 +1914,10 @@ Alterations to the default parameters are: noise_floor = 0.01 [flexure] spec_method = boxcar + [sensfunc] + extrap_blu = 0.05 + extrap_red = 0.05 + trim_std_pixs = 20, 20, .. _instr_par-gemini_gmos_north_ham_ns: @@ -1993,6 +2005,7 @@ Alterations to the default parameters are: edge_thresh = 100.0 follow_span = 80 fit_order = 3 + minimum_slit_length = 1.8 [[tilts]] tracethresh = 10.0 [scienceframe] @@ -2001,6 +2014,10 @@ Alterations to the default parameters are: noise_floor = 0.01 [flexure] spec_method = boxcar + [sensfunc] + extrap_blu = 0.05 + extrap_red = 0.05 + trim_std_pixs = 20, 20, .. _instr_par-gemini_gmos_south_ham: @@ -2089,6 +2106,7 @@ Alterations to the default parameters are: follow_span = 80 fit_order = 3 bound_detector = True + minimum_slit_length = 1.8 [[tilts]] tracethresh = 10.0 [scienceframe] @@ -2098,6 +2116,9 @@ Alterations to the default parameters are: [flexure] spec_method = boxcar [sensfunc] + extrap_blu = 0.05 + extrap_red = 0.05 + trim_std_pixs = 20, 20, algorithm = IR [[IR]] telgridfile = TellPCA_3000_26000_R10000.fits From 7a3b9bcb5721c8f829faae16d9010c9b494c30e2 Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Mon, 5 Jan 2026 16:07:13 -0800 Subject: [PATCH 31/33] test fixes --- pypeit/scripts/trace_edges.py | 4 ---- pypeit/setup_gui/model.py | 5 +++-- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/pypeit/scripts/trace_edges.py b/pypeit/scripts/trace_edges.py index dada267b04..d864e34ab7 100644 --- a/pypeit/scripts/trace_edges.py +++ b/pypeit/scripts/trace_edges.py @@ -74,10 +74,6 @@ def main(cls, args): # Initialize the log cls.init_log(args) - if args.show: - log.warning('"show" option is deprecated. Setting debug = 1.') - args.debug = 1 - if args.pypeit_file is not None: pypeit_file = Path(args.pypeit_file).absolute() if not pypeit_file.exists(): diff --git a/pypeit/setup_gui/model.py b/pypeit/setup_gui/model.py index e2aaf6f387..5e2e433052 100644 --- a/pypeit/setup_gui/model.py +++ b/pypeit/setup_gui/model.py @@ -21,7 +21,8 @@ from configobj import ConfigObj from datetime import datetime, timezone -from pypeit import log, spectrographs +from pypeit import log +from pypeit import spectrographs from pypeit.spectrographs import available_spectrographs from pypeit.pypeitsetup import PypeItSetup from pypeit.metadata import PypeItMetaData @@ -1238,7 +1239,7 @@ def setup_logging(self, verbosity): # TODO: Need help from Dusty to update this self.log_buffer = LogBuffer(logfile,verbosity) - log.init(level=log.level, log_file=self.log_buffer) + log.init(level=log.level, stream=self.log_buffer) # log.reset(verbosity=verbosity, log=self.log_buffer, log_to_stderr=False) log.info(f"QT Version: {qtpy.QT_VERSION}") log.info(f"PySide version: {qtpy.PYSIDE_VERSION}") From a5a574a15c189ced00c8984548bc9b2a0dbecc76 Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Tue, 6 Jan 2026 10:01:22 -0800 Subject: [PATCH 32/33] test fix --- pypeit/logger.py | 30 ++++++++++++++++++++++++++++++ pypeit/scripts/scriptbase.py | 19 ++----------------- pypeit/setup_gui/controller.py | 4 +--- pypeit/setup_gui/model.py | 6 ++---- 4 files changed, 35 insertions(+), 24 deletions(-) diff --git a/pypeit/logger.py b/pypeit/logger.py index 6ad47b1cb9..5bdd1d2171 100644 --- a/pypeit/logger.py +++ b/pypeit/logger.py @@ -269,6 +269,36 @@ def _excepthook(self, etype, value, trace): # Call the original exception hook self._excepthook_orig(etype, value, trace) + @staticmethod + def convert_verbosity_to_logging_level(v): + """ + Given a PypeIt "verbosity level," return the logging level. + + Parameters + ---------- + v : int + PypeIt verbosity level (0, 1, or 2) + + Returns + ------- + int + Corresponding logging level + + Raises + ------ + ValueError + Raised if the input verbosity level is not 0, 1, or 2. + """ + match v: + case 0: + return logging.WARNING + case 1: + return logging.INFO + case 2: + return logging.DEBUG + case _: + raise ValueError(f'Verbosity level must be 0, 1, or 2, not {v}.') + def makeRecord( self, name, level, pathname, lineno, msg, args, exc_info, func=None, extra=None, sinfo=None diff --git a/pypeit/scripts/scriptbase.py b/pypeit/scripts/scriptbase.py index cb940734ff..687b252729 100644 --- a/pypeit/scripts/scriptbase.py +++ b/pypeit/scripts/scriptbase.py @@ -232,9 +232,9 @@ def init_log(cls, args): """ Initialize the logger provided the command-line arguments. """ - level = cls._convert_verbosity_to_logging_level(args.verbosity) + level = log.convert_verbosity_to_logging_level(args.verbosity) log_file_level = None if args.log_level is None else \ - cls._convert_verbosity_to_logging_level(args.log_level) + log.convert_verbosity_to_logging_level(args.log_level) if args.log_file == 'default': _log_file = cls.default_log_file() elif args.log_file in ['None', None]: @@ -245,21 +245,6 @@ def init_log(cls, args): log_file=_log_file, log_file_level=log_file_level) - @staticmethod - def _convert_verbosity_to_logging_level(v): - """ - Given a PypeIt "verbosity level," return the logging level. - """ - match v: - case 0: - return logging.WARNING - case 1: - return logging.INFO - case 2: - return logging.DEBUG - case _: - raise PypeItError(f'Verbosity level must be 0, 1, or 2, not {v}.') - @classmethod def default_log_file(cls): """ diff --git a/pypeit/setup_gui/controller.py b/pypeit/setup_gui/controller.py index 991d326095..f66a221162 100644 --- a/pypeit/setup_gui/controller.py +++ b/pypeit/setup_gui/controller.py @@ -39,9 +39,7 @@ def lock_qt_mutex(mutex): mutex.unlock() class OpCanceledError(Exception): - """Exception thrown when a background operation has been canceled.""" - def __init__(self): - super().__init__() + pass class OperationThread(QThread): """Thread to run a background operation.""" diff --git a/pypeit/setup_gui/model.py b/pypeit/setup_gui/model.py index 5e2e433052..90d443c591 100644 --- a/pypeit/setup_gui/model.py +++ b/pypeit/setup_gui/model.py @@ -1237,10 +1237,8 @@ def setup_logging(self, verbosity): else: logfile = None - # TODO: Need help from Dusty to update this - self.log_buffer = LogBuffer(logfile,verbosity) - log.init(level=log.level, stream=self.log_buffer) -# log.reset(verbosity=verbosity, log=self.log_buffer, log_to_stderr=False) + self.log_buffer = LogBuffer(logfile, verbosity) + log.init(level=log.convert_verbosity_to_logging_level(verbosity), stream=self.log_buffer) log.info(f"QT Version: {qtpy.QT_VERSION}") log.info(f"PySide version: {qtpy.PYSIDE_VERSION}") log.info(f"PyQt version: {qtpy.PYQT_VERSION}") From d3b0e1f09b9d9d372454793b630630fdce4b7d6b Mon Sep 17 00:00:00 2001 From: Dusty Reichwein Date: Tue, 6 Jan 2026 10:38:32 -0800 Subject: [PATCH 33/33] Remove cancel button from setup gui progress dialog until that feature can be refactored. --- pypeit/setup_gui/view.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pypeit/setup_gui/view.py b/pypeit/setup_gui/view.py index 8f6b7a86ad..595121f7dc 100644 --- a/pypeit/setup_gui/view.py +++ b/pypeit/setup_gui/view.py @@ -1508,7 +1508,7 @@ def create_progress_dialog(self, op_caption, max_progress_value, cancel_func): progress dialog. """ log.info(f"Starting operation {op_caption} max progress: {max_progress_value}") - self.current_op_progress_dialog = QProgressDialog(self.tr(op_caption), self.tr("Cancel"), 0, max_progress_value, parent=self) + self.current_op_progress_dialog = QProgressDialog(self.tr(op_caption), None, 0, max_progress_value, parent=self) self.current_op_progress_dialog.setMinimumWidth(380) self.current_op_progress_dialog.setWindowTitle(op_caption) self.current_op_progress_dialog.setMinimumDuration(1000)