import copy
import numpy as np
[docs]def make_default_dict(d):
o = {}
for k, v in d.items():
o[k] = copy.deepcopy(v[0])
return o
# Options for defining input data files
data = {
'evfile': (None, 'Path to FT1 file or list of FT1 files.', str),
'scfile': (None, 'Path to FT2 (spacecraft) file.', str),
'ltcube': (None, 'Path to livetime cube. If none a livetime cube will be generated with ``gtmktime``.', str),
}
# Options for data selection.
selection = {
'emin': (None, 'Minimum Energy (MeV)', float),
'emax': (None, 'Maximum Energy (MeV)', float),
'logemin': (None, 'Minimum Energy (log10(MeV))', float),
'logemax': (None, 'Maximum Energy (log10(MeV))', float),
'tmin': (None, 'Minimum time (MET).', int),
'tmax': (None, 'Maximum time (MET).', int),
'zmax': (None, 'Maximum zenith angle.', float),
'evclass': (None, 'Event class selection.', int),
'evtype': (None, 'Event type selection.', int),
'convtype': (None, 'Conversion type selection.', int),
'target': (None, 'Choose an object on which to center the ROI. '
'This option takes precendence over ra/dec or glon/glat.', str),
'ra': (None, '', float),
'dec': (None, '', float),
'glat': (None, '', float),
'glon': (None, '', float),
'radius': (None, 'Radius of data selection. If none this will be automatically set from the ROI size.', float),
'filter': (None, 'Filter string for ``gtmktime`` selection.', str),
'roicut': ('no', '', str)
}
# Options for ROI model.
model = {
'src_radius':
(None, 'Set the maximum distance for inclusion of sources in the ROI '
'model. Selects all sources within a circle of this radius '
'centered '
'on the ROI. If none then no selection is applied. This '
'selection '
'will be ORed with sources passing the cut on src_roiwidth.',
float),
'src_roiwidth':
(None, 'Select sources within a box of RxR centered on the ROI. If '
'none then no cut is applied.', float),
'src_radius_roi':
(None,
'Half-width of the ROI selection. This parameter can be used in '
'lieu of src_roiwidth.',
float),
'isodiff': (None, 'Set the isotropic template.', list),
'galdiff': (None, 'Set the galactic IEM mapcube.', list),
'limbdiff': (None, '', list),
'diffuse': (None, '', list),
'sources': (None, '', list),
'extdir': ('Extended_archive_v15', '', str),
'catalogs': (None, '', list),
'merge_sources' :
(True, 'Merge properties of sources that appear in multiple '
'source catalogs. If merge_sources=false then subsequent sources with '
'the same name will be ignored.', bool),
'assoc_xmatch_columns' :
(['3FGL_Name'],'Choose a set of association columns on which to '
'cross-match catalogs.',list),
'extract_diffuse': (
False, 'Extract a copy of all mapcube components centered on the ROI.',
bool)
}
# Options for configuring likelihood analysis
gtlike = {
'irfs': (None, 'Set the IRF string.', str),
'edisp': (True, 'Enable the correction for energy dispersion.', bool),
'edisp_disable': (None,
'Provide a list of sources for which the edisp '
'correction should be disabled.',
list),
# 'likelihood': ('binned', '', str),
'minbinsz': (0.05, 'Set the minimum bin size used for resampling diffuse maps.', float),
'rfactor': (2, '', int),
'convolve': (True, '', bool),
'resample': (True, '', bool),
'srcmap': (None, '', str),
'bexpmap': (None, '', str),
}
# Options for binning.
binning = {
'projtype': ('WCS', 'Projection mode (WCS or HPX).', str),
'proj': ('AIT', 'Spatial projection for WCS mode.', str),
'coordsys': ('CEL', 'Coordinate system of the spatial projection (CEL or GAL).', str),
'npix':
(None,
'Number of pixels. If none then this will be set from ``roiwidth`` '
'and ``binsz``.', int),
'roiwidth': (10.0,
'Width of the ROI in degrees. The number of pixels in each spatial dimension will be set from ``roiwidth`` / ``binsz`` (rounded up).',
float),
'binsz': (0.1, 'Spatial bin size in degrees.', float),
'binsperdec': (8, 'Number of energy bins per decade.', float),
'enumbins': (
None,
'Number of energy bins. If none this will be inferred from energy '
'range and ``binsperdec`` parameter.', int),
'hpx_ordering_scheme': ('RING', 'HEALPix Ordering Scheme', str),
'hpx_order': (10, 'Order of the map (int between 0 and 12, included)', int),
'hpx_ebin': (True, 'Include energy binning', bool)
}
# Options related to I/O and output file bookkeeping
fileio = {
'outdir': (None, 'Path of the output directory. If none this will default to the directory containing the configuration file.', str),
'scratchdir': ('/scratch', 'Path to the scratch directory.', str),
'workdir': (None, 'Override the working directory.', str),
'logfile': (None, 'Path to log file. If None then log will be written to fermipy.log.', str),
'savefits': (True, 'Save intermediate FITS files.', bool),
'usescratch': (
False, 'Run analysis in a temporary directory under ``scratchdir``.', bool),
}
logging = {
'chatter': (3, 'Set the chatter parameter of the STs.', int),
'verbosity': (3, '', int)
}
# Options related to likelihood optimizer
optimizer = {
'optimizer':
('MINUIT', 'Set the optimization algorithm to use when maximizing the '
'likelihood function.', str),
'tol': (1E-4, 'Set the optimizer tolerance.', float),
'retries': (3, 'Set the number of times to retry the fit when the fit quality is less than ``min_fit_quality``.', int),
'min_fit_quality': (3, 'Set the minimum fit quality.', int),
'verbosity': (0, '', int)
}
# MC options
mc = {
'seed' : (None, '', int)
}
# ROI Optimization
roiopt = {
'npred_threshold': (1.0, '', float),
'npred_frac': (0.95, '', float),
'shape_ts_threshold': (100.0, '', float)
}
# Residual Maps
residmap = {
'model': (None, 'Dictionary defining the properties of the test source. By default the test source will be a PointSource with an Index 2 power-law specturm.', dict),
'erange': (None, 'Lower and upper energy bounds in log10(E/MeV). By default the calculation will be performed over the full analysis energy range.', list),
}
# TS Map
tsmap = {
'model': (None, 'Dictionary defining the properties of the test source.', dict),
'multithread': (False, '', bool),
'max_kernel_radius': (3.0, '', float),
'erange': (None, 'Lower and upper energy bounds in log10(E/MeV). By default the calculation will be performed over the full analysis energy range.', list),
}
# TS Cube
tscube = {
'model': (None, 'Dictionary defining the properties of the test source. By default the test source will be a PointSource with an Index 2 power-law specturm.', dict),
'do_sed': (True, 'Compute the energy bin-by-bin fits', bool),
'nnorm': (10, 'Number of points in the likelihood v. normalization scan', int),
'norm_sigma': (5.0, 'Number of sigma to use for the scan range ', float),
'cov_scale_bb': (-1.0, 'Scale factor to apply to global fitting '
'cov. matrix in broadband fits. ( < 0 -> no prior ) ', float),
'cov_scale': (-1.0, 'Scale factor to apply to broadband fitting cov. '
'matrix in bin-by-bin fits ( < 0 -> fixed ) ', float),
'tol': (1E-3, 'Critetia for fit convergence (estimated vertical distance to min < tol )', float),
'max_iter': (30, 'Maximum number of iterations for the Newtons method fitter.', int),
'tol_type': (0, 'Absoulte (0) or relative (1) criteria for convergence.', int),
'remake_test_source': (False, 'If true, recomputes the test source image (otherwise just shifts it)', bool),
'st_scan_level': (0, 'Level to which to do ST-based fitting (for testing)', int),
}
# Options for Source Finder
sourcefind = {
'model': (None, 'Set the source model dictionary. By default the test source will be a PointSource with an Index 2 power-law specturm.', dict),
'min_separation': (1.0, 'Set the minimum separation in deg for sources added in each iteration.', float),
'sqrt_ts_threshold': (5.0, 'Set the threshold on sqrt(TS).', float),
'max_iter': (3, 'Set the number of search iterations.', int),
'sources_per_iter': (3, '', int),
'tsmap_fitter': ('tsmap', 'Set the method for generating the TS map.', str)
}
# Options for SED analysis
sed = {
'bin_index': (2.0, 'Spectral index that will be use when fitting the energy distribution within an energy bin.', float),
'use_local_index': (False, 'Use a power-law approximation to the shape of the global spectrum in '
'each bin. If this is false then a constant index set to `bin_index` '
'will be used.', bool),
'fix_background': (True, 'Fix background parameters when fitting the '
'source flux in each energy bin.', bool),
'ul_confidence': (0.95, 'Confidence level for upper limit calculation.',
float)
}
# Output for SED analysis
sed_output = {
'emin' : (None, 'Lower edges of SED energy bins (log10(E/MeV)).',np.ndarray,'`~numpy.ndarray`'),
'emax' : (None, 'Upper edges of SED energy bins (log10(E/MeV)).',np.ndarray,'`~numpy.ndarray`'),
'ecenter' : (None, 'Centers of SED energy bins (log10(E/MeV)).',np.ndarray,'`~numpy.ndarray`'),
'flux' : (None, 'Flux in each bin (cm^{-2} s^{-1}).',np.ndarray,'`~numpy.ndarray`'),
'eflux' : (None, 'Energy flux in each bin (MeV cm^{-2} s^{-1}).',np.ndarray,'`~numpy.ndarray`'),
'dfde' : (None, 'Differential flux in each bin (MeV^{-1} cm^{-2} s^{-1}).',np.ndarray,'`~numpy.ndarray`'),
'e2dfde' : (None, 'E^2 x the differential flux in each bin (MeV^{-1} cm^{-2} s^{-1}).',np.ndarray,'`~numpy.ndarray`'),
'dfde_err' : (None, '1-sigma error on dfde evaluated from likelihood curvature.',np.ndarray,'`~numpy.ndarray`'),
'dfde_err_lo' : (None, 'Lower 1-sigma error on dfde evaluated from the profile likelihood (MINOS errors).',np.ndarray,'`~numpy.ndarray`'),
'dfde_err_hi' : (None, 'Upper 1-sigma error on dfde evaluated from the profile likelihood (MINOS errors).',np.ndarray,'`~numpy.ndarray`'),
'dfde_ul95' : (None, '95% CL upper limit on dfde evaluated from the profile likelihood (MINOS errors).',np.ndarray,'`~numpy.ndarray`'),
'dfde_ul' : (None, 'Upper limit on dfde evaluated from the profile likelihood using a CL = ``ul_confidence``.',np.ndarray,'`~numpy.ndarray`'),
'e2dfde_err' : (None, '1-sigma error on e2dfde evaluated from likelihood curvature.',np.ndarray,'`~numpy.ndarray`'),
'e2dfde_err_lo' : (None, 'Lower 1-sigma error on e2dfde evaluated from the profile likelihood (MINOS errors).',np.ndarray,'`~numpy.ndarray`'),
'e2dfde_err_hi' : (None, 'Upper 1-sigma error on e2dfde evaluated from the profile likelihood (MINOS errors).',np.ndarray,'`~numpy.ndarray`'),
'e2dfde_ul95' : (None, '95% CL upper limit on e2dfde evaluated from the profile likelihood (MINOS errors).',np.ndarray,'`~numpy.ndarray`'),
'e2dfde_ul' : (None, 'Upper limit on e2dfde evaluated from the profile likelihood using a CL = ``ul_confidence``.',np.ndarray,'`~numpy.ndarray`'),
'ts' : (None, 'Test statistic.',np.ndarray,'`~numpy.ndarray`'),
'Npred' : (None, 'Number of model counts.',np.ndarray,'`~numpy.ndarray`'),
'fit_quality' : (None, 'Fit quality parameter.',np.ndarray,'`~numpy.ndarray`'),
'index' : (None, 'Spectral index of the power-law model used to fit this bin.',np.ndarray,'`~numpy.ndarray`'),
'lnlprofile' : (None, 'Likelihood scan for each energy bin.',dict,'dict'),
'config' : (None, 'Copy of the input parameters to this method.',dict,'dict'),
}
# Options for extension analysis
extension = {
'spatial_model': ('GaussianSource', 'Spatial model use for extension test.', str),
'width': (None, 'Parameter vector for scan over spatial extent. If none then the parameter '
'vector will be set from ``width_min``, ``width_max``, and ``width_nstep``.', str),
'width_min': (0.01, 'Minimum value in degrees for the likelihood scan over spatial extent.', float),
'width_max': (1.0, 'Maximum value in degrees for the likelihood scan over spatial extent.', float),
'width_nstep': (21, 'Number of steps for the spatial likelihood scan.', int),
'save_templates': (False, '', bool),
'fix_background': (False, 'Fix any background parameters that are currently free in the model when '
'performing the likelihood scan over extension.', bool),
'save_model_map': (False, '', bool),
'update': (False, 'Update the source model with the best-fit spatial extension.', bool)
}
extension_output = {
'width': (None, 'List of width parameters.',np.ndarray,'`~numpy.ndarray`'),
'dlogLike': (None, 'Sequence of delta-log-likelihood values for each point in the profile likelihood scan.',np.ndarray,'`~numpy.ndarray`'),
'logLike': (None, 'Sequence of likelihood values for each point in the scan over the spatial extension.',np.ndarray,'`~numpy.ndarray`'),
'logLike_ptsrc': (np.nan,'Model log-Likelihood value of the best-fit point-source model.',float,'float'),
'logLike_ext': (np.nan,'Model log-Likelihood value of the best-fit extended source model.',float,'float'),
'logLike_base': (np.nan,'Model log-Likelihood value of the baseline model.',float,'float'),
'ext': (np.nan, 'Best-fit extension in degrees.',float,'float'),
'ext_err_hi': (np.nan, 'Upper (1 sigma) error on the best-fit extension in degrees.',float,'float'),
'ext_err_lo': (np.nan,'Lower (1 sigma) error on the best-fit extension in degrees.',float,'float'),
'ext_err': (np.nan,'Symmetric (1 sigma) error on the best-fit extension in degrees.',float,'float'),
'ext_ul95': (np.nan,'95% CL upper limit on the spatial extension in degrees.',float,'float'),
'ts_ext': (np.nan,'Test statistic for the extension hypothesis.',float,'float'),
'source_fit': ({},'Dictionary with parameters of the best-fit extended source model.',dict,'dict'),
'config': ({},'Copy of the input configuration to this method.',dict,'dict')
}
# Options for localization analysis
localize = {
'nstep': (5, 'Number of steps along each spatial dimension in the refined likelihood scan.', int),
'dtheta_max': (0.3, 'Half-width of the search region in degrees used for the first pass of the localization search.', float),
'fix_background': (True, 'Fix background parameters when fitting the '
'source flux in each energy bin.', bool),
'update': (False, 'Update the source model with the best-fit position.', bool)
}
# Output for localization analysis
localize_output = {
'ra' : (np.nan,'Right ascension of best-fit position in deg.',float,'float'),
'dec' : (np.nan,'Declination of best-fit position in deg.',float,'float'),
'glon' : (np.nan,'Galactic Longitude of best-fit position in deg.',float,'float'),
'glat' : (np.nan,'Galactic Latitude of best-fit position in deg.',float,'float'),
'offset' : (np.nan,'Angular offset in deg between the current and localized source position.',float,'float'),
'r68' : (np.nan,'68% positional uncertainty in deg.',float,'float'),
'r95' : (np.nan,'95% positional uncertainty in deg.',float,'float'),
'r99' : (np.nan,'99% positional uncertainty in deg.',float,'float'),
'sigmax' : (np.nan,'1-sigma uncertainty in deg in longitude.',float,'float'),
'sigmay' : (np.nan,'1-sigma uncertainty in deg in latitude.',float,'float'),
'xpix' : (np.nan,'Longitude pixel coordinate of best-fit position.',float,'float'),
'ypix' : (np.nan,'Latitude pixel coordinate of best-fit position.',float,'float'),
'theta' : (np.nan,'Position angle of uncertainty ellipse.',float,'float'),
'config' : (None, 'Copy of the input parameters to this method.',dict,'dict')
}
# Options for plotting
plotting = {
'erange': (None, '', list),
'catalogs': (None, '', list),
'graticule_radii': (None, 'Define a list of radii at which circular graticules will be drawn.', list),
'format': ('png', '', str),
'cmap': ('ds9_b', 'Set the colormap for 2D plots.', str),
}