From 4db44d4c8e438bb12cfb06cc53a54dc1287d0664 Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Sat, 11 Jan 2025 07:04:37 -0800 Subject: [PATCH 01/29] start work on new PR --- CHANGELOG.md | 2 ++ desietc/_version.py | 2 +- setup.py | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index eeb6aeb..4c73e89 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,8 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.1.20] - Unreleased + ## [0.1.19] - 2024-10-16 ### Changed - Make default SkyCam processing identical to pre 0.1.18, i.e. refit by default and do not apply centroid fitting or temperature corrections. Centroid fitting is not ready to deploy yet since it runs too slowly. Temperature corrections will require new plumbing with ICS. diff --git a/desietc/_version.py b/desietc/_version.py index d38c350..592d452 100644 --- a/desietc/_version.py +++ b/desietc/_version.py @@ -1 +1 @@ -__version__ = "0.1.19" +__version__ = "0.1.20dev" diff --git a/setup.py b/setup.py index 62767a7..3ae6859 100644 --- a/setup.py +++ b/setup.py @@ -3,7 +3,7 @@ setup( name="desietc", - version="0.1.19", # MAKE SURE THIS MATCHES desietc/_version.py! There are also ways to automate this. + version="0.1.20dev", # MAKE SURE THIS MATCHES desietc/_version.py! There are also ways to automate this. description="Online exposure-time calculator for DESI", url="http://github.com/desihub/desietc", author="David Kirkby", From 212897fba5978ba8aa47efafd22f53a205bd6124 Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Sat, 11 Jan 2025 08:09:06 -0800 Subject: [PATCH 02/29] apply black formatting to sky,util modules --- desietc/sky.py | 224 +++++++---- desietc/util.py | 1028 +++++++++++++++++++++++++++++++++++------------ 2 files changed, 929 insertions(+), 323 deletions(-) diff --git a/desietc/sky.py b/desietc/sky.py index 2f20cce..67d1c0f 100644 --- a/desietc/sky.py +++ b/desietc/sky.py @@ -1,5 +1,6 @@ """Analyze images captured by the DESI Sky Cameras for the online ETC. """ + import collections try: @@ -19,24 +20,23 @@ class BGFitter(object): - """Fit a histogram of pixel values to a single Gaussian noise model. - """ + """Fit a histogram of pixel values to a single Gaussian noise model.""" + def __init__(self, optimize_args={}): # Initialize the args sent to scipy.optimize.minimize() self.kwargs = dict( - method='Nelder-Mead', - options=dict(maxiter=10000, xatol=1e-4, fatol=1e-4, disp=False)) + method="Nelder-Mead", + options=dict(maxiter=10000, xatol=1e-4, fatol=1e-4, disp=False), + ) self.kwargs.update(optimize_args) def predict(self, ntot, mu, std): - """Predict data with specified parameters and bin edges. - """ + """Predict data with specified parameters and bin edges.""" z = scipy.special.erf((self.xedge - mu) / (np.sqrt(2) * std)) return 0.5 * ntot * np.diff(z) def nll(self, theta): - """Objective function for minimization, calculates -logL. - """ + """Objective function for minimization, calculates -logL.""" ypred = self.predict(*theta) # Use the model to predict the Gaussian inverse variances. yvar = np.maximum(1, ypred) @@ -68,68 +68,116 @@ def fit(self, data, nbins=30, maxpct=90): return theta -def load_calib_data(name='SKY_calib.fits'): +def load_calib_data(name="SKY_calib.fits"): names = collections.defaultdict(list) slices = collections.defaultdict(list) calibs = collections.defaultdict(list) cameras = [] with fitsio.FITS(str(name)) as hdus: meta = hdus[0].read_header() - nspot = meta['NSPOT'] - spots = hdus['SPOT'].read() + nspot = meta["NSPOT"] + spots = hdus["SPOT"].read() stampsize = spots.shape[1] lo = stampsize // 2 hi = stampsize - lo - for k in range(nspot): - camera = meta['CAMERA_{0}'.format(k)] + for k in range(nspot): + camera = meta["CAMERA_{0}".format(k)] cameras.append(camera) - names[camera].append(meta['NAME_{0}'.format(k)]) - y, x = meta['ROW_{0}'.format(k)], meta['COL_{0}'.format(k)] + names[camera].append(meta["NAME_{0}".format(k)]) + y, x = meta["ROW_{0}".format(k)], meta["COL_{0}".format(k)] slices[camera].append((slice(y - lo, y + hi), slice(x - lo, x + hi))) - calibs[camera].append(meta['CALIB_{0}'.format(k)]) - mask_data = hdus['MASK'].read() - spot_data = hdus['SPOT'].read() + calibs[camera].append(meta["CALIB_{0}".format(k)]) + mask_data = hdus["MASK"].read() + spot_data = hdus["SPOT"].read() masks, spots, calib = {}, {}, {} cameras = np.array(cameras) for camera in np.unique(cameras): sel = cameras == camera masks[camera] = mask_data[sel] spots[camera] = spot_data[sel] - logging.info('Loaded SKY calib data from {0}'.format(name)) + logging.info("Loaded SKY calib data from {0}".format(name)) return names, slices, masks, spots, calibs # Fine tuning coefficients derived from fitting the trends of individual fibers vs the spectroscopic # r-band sky using data from Mar-Apr 2021. The fit is linear in log space, so the adjustment model is: # x --> coef * x ** pow -finetune_coef = np.array([ - [0.62397108, 0. , 0.7552137 , 0.93242162, 0.95308126, - 0.97305782, 0.87329783, 1.09370934, 1.12080438, 0. ], - [0. , 0.70029763, 0.91706983, 0.85841961, 0.91394301, - 1.00810978, 0.91657536, 0. , 0. , 0. ]]) +finetune_coef = np.array( + [ + [ + 0.62397108, + 0.0, + 0.7552137, + 0.93242162, + 0.95308126, + 0.97305782, + 0.87329783, + 1.09370934, + 1.12080438, + 0.0, + ], + [ + 0.0, + 0.70029763, + 0.91706983, + 0.85841961, + 0.91394301, + 1.00810978, + 0.91657536, + 0.0, + 0.0, + 0.0, + ], + ] +) + +finetune_pow = np.array( + [ + [ + 1.08673508, + 1.0, + 1.06715913, + 1.01187676, + 1.0178157, + 1.02464004, + 1.02520429, + 1.01357766, + 1.00232129, + 1.0, + ], + [ + 1.0, + 1.08950204, + 1.00924252, + 1.01807379, + 1.01857049, + 1.04251868, + 1.02630379, + 1.0, + 1.0, + 1.0, + ], + ] +) -finetune_pow = np.array([ - [1.08673508, 1. , 1.06715913, 1.01187676, 1.0178157 , - 1.02464004, 1.02520429, 1.01357766, 1.00232129, 1. ], - [1. , 1.08950204, 1.00924252, 1.01807379, 1.01857049, - 1.04251868, 1.02630379, 1. , 1. , 1. ]]) def finetune_adjust(x, icamera, ifiber): return finetune_coef[icamera, ifiber] * x ** finetune_pow[icamera, ifiber] + ## Mask of fibers to use for each camera. -fiber_mask = np.array([ - [1,0,1,1,1,1,1,1,1,0], - [0,1,1,1,1,1,1,0,0,0]]) +fiber_mask = np.array([[1, 0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 0, 0, 0]]) class SkyCamera(object): - """ - """ - sky_names = 'SKYCAM0', 'SKYCAM1' + """ """ + + sky_names = "SKYCAM0", "SKYCAM1" - def __init__(self, calib_name='SKY_calib.fits'): - self.names, self.slices, self.masks, self.spots, self.calibs = load_calib_data(calib_name) + def __init__(self, calib_name="SKY_calib.fits"): + self.names, self.slices, self.masks, self.spots, self.calibs = load_calib_data( + calib_name + ) maxstamps = max([len(N) for N in self.names.values()]) stampsize = self.masks[self.sky_names[0]].shape[1] # Initialize reusable arrays of analysis results. @@ -149,10 +197,23 @@ def __init__(self, calib_name='SKY_calib.fits'): # Initialize background fitting. self.bgfitter = BGFitter() - def setraw(self, raw, name, gain=2.5, saturation=65500, refit=True, pullcut=5, chisq_max=5, ndrop_max=3, - masked=True, finetune=True, Temperature=None, Temp_correc_coef=np.array([ - [0.905, 0.007], - [0.941, 0.003]]), return_offsets=False, fit_centroids=False): + def setraw( + self, + raw, + name, + gain=2.5, + saturation=65500, + refit=True, + pullcut=5, + chisq_max=5, + ndrop_max=3, + masked=True, + finetune=True, + Temperature=None, + Temp_correc_coef=np.array([[0.905, 0.007], [0.941, 0.003]]), + return_offsets=False, + fit_centroids=False, + ): """Fit images of a spot to estimate the spot flux and background level as well as the position offset from the reference profile. @@ -193,7 +254,7 @@ def setraw(self, raw, name, gain=2.5, saturation=65500, refit=True, pullcut=5, c and spot_offsets is an array of shape (...,nf,2) where nf is the number of sky monitoring fibers (10 for SKYCAM0 and 7 for SKYCAM1) with elements [...,i,0] = i-th spot position_offset(x direction) and [...,i,1] = i-th spot position_offset(y direction) """ if name not in self.slices: - raise ValueError('Invalid SKY name: {0}.'.format(name)) + raise ValueError("Invalid SKY name: {0}.".format(name)) icamera = self.sky_names.index(name) slices = self.slices[name] # Copy stamps for each spot into self.data. @@ -207,50 +268,67 @@ def setraw(self, raw, name, gain=2.5, saturation=65500, refit=True, pullcut=5, c # Estimate the signal contribution to the variance. signalvar = np.maximum(0, self.data[k]) / gain # Estimate the total inverse variance per pixel. - self.ivar[k] = 1 / (bgsigma ** 2 + signalvar) + self.ivar[k] = 1 / (bgsigma**2 + signalvar) # Mask any saturated or ~dead pixels. - dead = self.data[k] < - 5 * bgsigma + dead = self.data[k] < -5 * bgsigma saturated = raw[S] > saturation self.ivar[k][dead | saturated] = 0 # Mask known hot pixels. self.ivar[k][self.masks[name][k]] = 0 # Fit for the spot flux and background level and (optionally) the spot centroids. - fitter = desietc.util.fit_spots_flux_and_pos if fit_centroids else desietc.util.fit_spots - self.flux[:N], self.bgfit[:N], cov, spot_offsets = fitter(self.data[:N], self.ivar[:N], self.spots[name]) + fitter = ( + desietc.util.fit_spots_flux_and_pos + if fit_centroids + else desietc.util.fit_spots + ) + self.flux[:N], self.bgfit[:N], cov, spot_offsets = fitter( + self.data[:N], self.ivar[:N], self.spots[name] + ) self.fluxerr[:N] = np.sqrt(cov[:, 0, 0]) self.bgerr[:N] = np.sqrt(cov[:, 1, 1]) if refit: # Save the original masking due to dead / hot / saturated pixels. - self.valid[:N] = 1. * (self.ivar[:N] > 0) + self.valid[:N] = 1.0 * (self.ivar[:N] > 0) # Use the initial fit for an improved estimate of the signal variance. - signalvar = np.maximum(0, self.flux[:N].reshape(-1, 1, 1) * self.spots[name]) / gain - #assert np.all(signalvar >= 0) + signalvar = ( + np.maximum(0, self.flux[:N].reshape(-1, 1, 1) * self.spots[name]) / gain + ) + # assert np.all(signalvar >= 0) self.ivar[:N] = 1 / (self.bgsigma[:N].reshape(-1, 1, 1) ** 2 + signalvar) # Mask any pixels with extreme pulls, calculated with the improved ivar. - model = (self.bgfit[:N].reshape(-1, 1, 1) + - self.flux[:N].reshape(-1, 1, 1) * self.spots[name]) + model = ( + self.bgfit[:N].reshape(-1, 1, 1) + + self.flux[:N].reshape(-1, 1, 1) * self.spots[name] + ) pull = (self.data[:N] - model[:N]) * np.sqrt(self.ivar[:N]) - self.valid[:N] *= 1. * (np.abs(pull) < pullcut) + self.valid[:N] *= 1.0 * (np.abs(pull) < pullcut) # Apply the original + new masking. self.ivar[:N] *= self.valid[:N] # Refit - self.flux[:N], self.bgfit[:N], cov, spot_offsets = fitter(self.data[:N], self.ivar[:N], self.spots[name]) - #assert np.all(cov[:, 0, 0] > 0) + self.flux[:N], self.bgfit[:N], cov, spot_offsets = fitter( + self.data[:N], self.ivar[:N], self.spots[name] + ) + # assert np.all(cov[:, 0, 0] > 0) self.fluxerr[:N] = np.sqrt(cov[:, 0, 0]) - #assert np.all(cov[:, 1, 1] > 0) + # assert np.all(cov[:, 1, 1] > 0) self.bgerr[:N] = np.sqrt(cov[:, 1, 1]) if fit_centroids: # Compute the average spot profile position offset if masked: - dx = np.ones(N)*np.mean(spot_offsets[fiber_mask[icamera, :N] > 0][:,0]) - dy = np.ones(N)*np.mean(spot_offsets[fiber_mask[icamera, :N] > 0][:,1]) + dx = np.ones(N) * np.mean( + spot_offsets[fiber_mask[icamera, :N] > 0][:, 0] + ) + dy = np.ones(N) * np.mean( + spot_offsets[fiber_mask[icamera, :N] > 0][:, 1] + ) else: - dx = np.ones(N)*np.mean(spot_offsets[:,0]) - dy = np.ones(N)*np.mean(spot_offsets[:,1]) + dx = np.ones(N) * np.mean(spot_offsets[:, 0]) + dy = np.ones(N) * np.mean(spot_offsets[:, 1]) shifted_profiles = desietc.util.shifted_profile(self.spots[name], dx, dy) # Doing a final fit using the mean profile position offset over the different spot self.flux[:N], self.bgfit[:N], cov, _ = desietc.util.fit_spots( - self.data[:N], self.ivar[:N], shifted_profiles) + self.data[:N], self.ivar[:N], shifted_profiles + ) # Give up if we have invalid fluxes or errors. if not np.all((self.fluxerr[:N] > 0) & np.isfinite(self.flux[:N])): if return_offsets: @@ -258,8 +336,10 @@ def setraw(self, raw, name, gain=2.5, saturation=65500, refit=True, pullcut=5, c else: return None, None # Calculate the best-fit model for each fiber. - self.model[:N] = (self.bgfit[:N].reshape(-1, 1, 1) + - self.flux[:N].reshape(-1, 1, 1) * self.spots[name]) + self.model[:N] = ( + self.bgfit[:N].reshape(-1, 1, 1) + + self.flux[:N].reshape(-1, 1, 1) * self.spots[name] + ) # Calculate the corresponding pull images. self.pull[:N] = (self.data[:N] - self.model[:N]) * np.sqrt(self.ivar[:N]) # Apply per-fiber calibrations. @@ -278,7 +358,7 @@ def setraw(self, raw, name, gain=2.5, saturation=65500, refit=True, pullcut=5, c cfluxerr = cfluxerr[keep] N = len(cflux) # Calculate the weighted mean over fibers (marginalized over bg levels) and its error. - wgt = cfluxerr ** -2 + wgt = cfluxerr**-2 used = np.ones(N, bool) snr = self.flux[:N] / self.fluxerr[:N] order = np.argsort(snr)[::-1] @@ -286,7 +366,9 @@ def setraw(self, raw, name, gain=2.5, saturation=65500, refit=True, pullcut=5, c while self.ndrop <= ndrop_max: ivar = np.sum(wgt[used]) meanflux = np.sum(wgt[used] * cflux[used]) / ivar - self.chisq = np.sum(((cflux[used] - meanflux) / cfluxerr[used]) ** 2) / (N - self.ndrop) + self.chisq = np.sum(((cflux[used] - meanflux) / cfluxerr[used]) ** 2) / ( + N - self.ndrop + ) if self.chisq < chisq_max: break # Drop the remaining fiber with the highest SNR. @@ -298,11 +380,17 @@ def setraw(self, raw, name, gain=2.5, saturation=65500, refit=True, pullcut=5, c if Temperature is not None: try: if (Temperature > -10) and (Temperature < 35): - meanflux = meanflux/(Temp_correc_coef[icamera,0] + Temp_correc_coef[icamera,1]*Temperature) - ivar = (Temp_correc_coef[icamera,0] + Temp_correc_coef[icamera,1]*Temperature)**2*ivar + meanflux = meanflux / ( + Temp_correc_coef[icamera, 0] + + Temp_correc_coef[icamera, 1] * Temperature + ) + ivar = ( + Temp_correc_coef[icamera, 0] + + Temp_correc_coef[icamera, 1] * Temperature + ) ** 2 * ivar except: pass if return_offsets: - return meanflux, ivar ** -0.5, spot_offsets + return meanflux, ivar**-0.5, spot_offsets else: - return meanflux, ivar ** -0.5 \ No newline at end of file + return meanflux, ivar**-0.5 diff --git a/desietc/util.py b/desietc/util.py index 16459e1..255eed6 100644 --- a/desietc/util.py +++ b/desietc/util.py @@ -3,6 +3,7 @@ The general guideline for things implemented here is that they do not read/write any files or produce any logging output. """ + import datetime import json import pathlib @@ -48,20 +49,22 @@ def fit_spots(data, ivar, profile, area=1): # Calculate the matrix elements for the linear problem # [ M11 M12 ] [ f ] = [ A1 ] # [ M12 M22 ] [ b ] [ A2 ] - M11 = np.sum(ivar * profile ** 2, axis=(-2, -1)) + M11 = np.sum(ivar * profile**2, axis=(-2, -1)) M12 = np.sum(ivar * area * profile, axis=(-2, -1)) - M22 = np.sum(ivar * area ** 2, axis=(-2, -1)) + M22 = np.sum(ivar * area**2, axis=(-2, -1)) A1 = np.sum(ivar * data * profile, axis=(-2, -1)) A2 = np.sum(ivar * data * area, axis=(-2, -1)) # Solve the linear problem. - det = M11 * M22 - M12 ** 2 + det = M11 * M22 - M12**2 M11 /= det M12 /= det M22 /= det - f = (M22 * A1 - M12 * A2) - b = (M11 * A2 - M12 * A1) + f = M22 * A1 - M12 * A2 + b = M11 * A2 - M12 * A1 # Calculate the covariance of (f, b). - cov = np.stack((np.stack((M22, -M12), axis=-1), np.stack((-M12, M11), axis=-1)), axis=-1) + cov = np.stack( + (np.stack((M22, -M12), axis=-1), np.stack((-M12, M11), axis=-1)), axis=-1 + ) return f, b, cov, None @@ -89,14 +92,21 @@ def shifted_profile(profile, dx, dy): nx = profile.shape[1] ny = profile.shape[2] shifted_profile = np.zeros(profile.shape) - x, y = np.meshgrid(np.linspace(0, nx-1, nx), np.linspace(0, ny-1, ny), indexing='ij') + x, y = np.meshgrid( + np.linspace(0, nx - 1, nx), np.linspace(0, ny - 1, ny), indexing="ij" + ) for i in range(nf): - interp = scipy.interpolate.RegularGridInterpolator((np.linspace(0, nx-1, nx), np.linspace(0, ny-1, ny)), profile[i], bounds_error=False, fill_value=0) + interp = scipy.interpolate.RegularGridInterpolator( + (np.linspace(0, nx - 1, nx), np.linspace(0, ny - 1, ny)), + profile[i], + bounds_error=False, + fill_value=0, + ) shifted_profile[i] = interp((x - dx[i], y - dy[i])) # Add a normalisation step as the interpolation might not be flux preserving try: normalization = np.sum(shifted_profile[i]) - shifted_profile[i] = shifted_profile[i]/normalization + shifted_profile[i] = shifted_profile[i] / normalization except: pass return shifted_profile @@ -126,7 +136,8 @@ def get_chi2(data, ivar, profile, flux, background, area=1): scalar Value of the chi2 residual given the data and the model. """ - return np.sum(ivar*(data - profile*flux - area*background)**2) + return np.sum(ivar * (data - profile * flux - area * background) ** 2) + def fit_spots_flux_and_pos(data, ivar, profile, area=1): """Fit images of a spot to estimate the spot flux and background level as well as the position offset @@ -152,8 +163,10 @@ def fit_spots_flux_and_pos(data, ivar, profile, area=1): [...,1,1] = var(b) and [...,0,1] = [...,1,0] = cov(f,b) and offsets has shape (nf,2) with elements [...,0] = position_offset(x direction) and [...,1] = position_offset(y direction) """ - npar = 4 # flux,bkg,dx,dy - nf = profile.shape[0] # Number of sky monitoring fibers (10 for SkyCam0 and 7 for SkyCam1) + npar = 4 # flux,bkg,dx,dy + nf = profile.shape[ + 0 + ] # Number of sky monitoring fibers (10 for SkyCam0 and 7 for SkyCam1) nx = profile.shape[1] ny = profile.shape[2] @@ -166,68 +179,93 @@ def fit_spots_flux_and_pos(data, ivar, profile, area=1): # Set the matrices that will verify: M [delta_params] = A, and other variable used in the minimisation M = np.zeros((npar, npar)) A = np.zeros(npar) - der = np.zeros((npar,nf,nx,ny)) - cov = np.zeros((nf,2,2)) - prev_chi2 = 0 # Residual of the previous step (set to 0) + der = np.zeros((npar, nf, nx, ny)) + cov = np.zeros((nf, 2, 2)) + prev_chi2 = 0 # Residual of the previous step (set to 0) spots_to_fit = list(range(nf)) for step in range(10): - der[0] = shifted_profile(profile,dx,dy) - der[1] = np.ones((nf,nx,ny))*area + der[0] = shifted_profile(profile, dx, dy) + der[1] = np.ones((nf, nx, ny)) * area for i in spots_to_fit: if step == 0: - Model = der[0][i]*flux[i] + area*bkg[i] + Model = der[0][i] * flux[i] + area * bkg[i] for p in range(npar): - for q in range(p,npar): - M[q,p] = M[p,q] = np.sum(ivar[i]*der[p][i]*der[q][i]) - A[p] = np.sum(ivar[i]*der[p][i]*(data[i] - Model)) + for q in range(p, npar): + M[q, p] = M[p, q] = np.sum(ivar[i] * der[p][i] * der[q][i]) + A[p] = np.sum(ivar[i] * der[p][i] * (data[i] - Model)) # Setting the diagonal to one everywhere it is null (ie in the dx, dy related blocks) - M[2:,2:] = np.diag(np.ones(2)) + M[2:, 2:] = np.diag(np.ones(2)) else: - der[2][i] = flux[i]*((shifted_profile(profile,dx+eps,dy)[i]-der[0][i])/eps) - der[3][i] = flux[i]*((shifted_profile(profile,dx,dy+eps)[i]-der[0][i])/eps) - Model = der[0][i]*flux[i] + area*bkg[i] + der[2][i] = flux[i] * ( + (shifted_profile(profile, dx + eps, dy)[i] - der[0][i]) / eps + ) + der[3][i] = flux[i] * ( + (shifted_profile(profile, dx, dy + eps)[i] - der[0][i]) / eps + ) + Model = der[0][i] * flux[i] + area * bkg[i] for p in range(npar): - for q in range(p,npar): - M[q,p] = M[p,q] = np.sum(ivar[i]*der[p][i]*der[q][i]) - A[p] = np.sum(ivar[i]*der[p][i]*(data[i] - Model)) + for q in range(p, npar): + M[q, p] = M[p, q] = np.sum(ivar[i] * der[p][i] * der[q][i]) + A[p] = np.sum(ivar[i] * der[p][i] * (data[i] - Model)) # Solve the matrix system - if np.linalg.det(M) !=0: + if np.linalg.det(M) != 0: M_inv = np.linalg.inv(M) Sol = np.dot(M_inv, A) # Search for a missed minimum of the chi2 function alpha_opt = 1 new_profile = shifted_profile(profile, dx + Sol[2], dy + Sol[3])[i] - new_chi2 = get_chi2(data[i], ivar[i], new_profile, flux[i]+Sol[0], bkg[i]+Sol[1], area) + new_chi2 = get_chi2( + data[i], + ivar[i], + new_profile, + flux[i] + Sol[0], + bkg[i] + Sol[1], + area, + ) try: - for alpha in np.linspace(0, 1, int(10*max(abs(Sol[2]),abs(Sol[3])))): + for alpha in np.linspace( + 0, 1, int(10 * max(abs(Sol[2]), abs(Sol[3]))) + ): if alpha != 0: - new_profile = shifted_profile(profile, dx + alpha*Sol[2], dy + alpha*Sol[3])[i] - chi2 = get_chi2(data[i], ivar[i], new_profile, flux[i]+alpha*Sol[0], bkg[i]+alpha*Sol[1], area) + new_profile = shifted_profile( + profile, dx + alpha * Sol[2], dy + alpha * Sol[3] + )[i] + chi2 = get_chi2( + data[i], + ivar[i], + new_profile, + flux[i] + alpha * Sol[0], + bkg[i] + alpha * Sol[1], + area, + ) if chi2 < new_chi2: alpha_opt = alpha new_chi2 = chi2 except: pass # Add to each parameter their "optimal" increment - flux[i] += alpha_opt*Sol[0] - bkg[i] += alpha_opt*Sol[1] - dx[i] += alpha_opt*Sol[2] - dy[i] += alpha_opt*Sol[3] + flux[i] += alpha_opt * Sol[0] + bkg[i] += alpha_opt * Sol[1] + dx[i] += alpha_opt * Sol[2] + dy[i] += alpha_opt * Sol[3] # Break i-th spot loop if its minimisation is satisfying or gives unrealistic dx and/or dy - if (abs(prev_chi2 - new_chi2) < 0.1) or (abs(dx[i]) > 6) or (abs(dy[i]) > 6): + if ( + (abs(prev_chi2 - new_chi2) < 0.1) + or (abs(dx[i]) > 6) + or (abs(dy[i]) > 6) + ): spots_to_fit.remove(i) # Store the residual at this step prev_chi2 = new_chi2 # Calculate the covariance of (f, b). - cov[i] = M_inv[:2,:2] + cov[i] = M_inv[:2, :2] offsets = np.stack((dx, dy), axis=-1) return flux, bkg, cov, offsets - def get_significance(D, W, smoothing=2.5, downsampling=2, medfiltsize=5): """Calculate a downsampled pixel significance image. @@ -275,8 +313,16 @@ def get_significance(D, W, smoothing=2.5, downsampling=2, medfiltsize=5): return D * np.sqrt(W) -def detect_sources(snr, minsnr=4, minsize=8, maxsize=32, minsep=0, - min_snr_ratio=0.1, maxsrc=20, measure=None): +def detect_sources( + snr, + minsnr=4, + minsize=8, + maxsize=32, + minsep=0, + min_snr_ratio=0.1, + maxsrc=20, + measure=None, +): """Detect and measure sources in a significance image. A source is defined as a connected and isolated region of pixels above @@ -317,7 +363,7 @@ def detect_sources(snr, minsnr=4, minsize=8, maxsize=32, minsep=0, A list of the measurements for each detected source. """ if minsize > maxsize: - raise ValueError('Expected minsize <= maxsize.') + raise ValueError("Expected minsize <= maxsize.") ny, nx = snr.shape # Label all non-overlapping regions above SNRmin in the inset image. labeled, nlabels = scipy.ndimage.label(snr > minsnr) @@ -328,15 +374,20 @@ def detect_sources(snr, minsnr=4, minsize=8, maxsize=32, minsep=0, bboxes = scipy.ndimage.find_objects(labeled) # Estimate the quadrature summed SNR for each candidate source. snrtot = scipy.ndimage.labeled_comprehension( - snr, labeled, labels, out_dtype=float, default=-1, - func=lambda X: np.sqrt(np.sum(X ** 2))) + snr, + labeled, + labels, + out_dtype=float, + default=-1, + func=lambda X: np.sqrt(np.sum(X**2)), + ) maxsnrtot = None # Rank sources by snrtot. ranks = np.argsort(snrtot)[::-1] # Build the final list of detected sources. sources = [] - snrsq = snr ** 2 - minsepsq = minsep ** 2 + snrsq = snr**2 + minsepsq = minsep**2 centroids = np.empty((maxsrc, 2)) for idx in range(nlabels): label = labels[ranks[idx]] @@ -446,7 +497,9 @@ def make_template(size, profile, dx=0, dy=0, oversampling=10, normalized=True): array 2D numpy array of template pixel values with shape (size, size). """ - xy = (np.arange(size * oversampling) - 0.5 * (size * oversampling - 1)) / oversampling + xy = ( + np.arange(size * oversampling) - 0.5 * (size * oversampling - 1) + ) / oversampling z = profile(xy - dx, (xy - dy).reshape(-1, 1)) T = downsample(z, oversampling, np.mean) if normalized: @@ -483,38 +536,45 @@ def downsample(data, downsampling, summary=np.sum, allow_trim=False): """ data = np.asarray(data) if data.ndim != 2: - raise ValueError('Data must be 2 dimensional.') + raise ValueError("Data must be 2 dimensional.") ny, nx = data.shape if not allow_trim and ((nx % downsampling) or (ny % downsampling)): - raise ValueError('Data shape {0} does not evenly divide downsampling={1} and allow_trim is False.' - .format((ny, nx), downsampling)) + raise ValueError( + "Data shape {0} does not evenly divide downsampling={1} and allow_trim is False.".format( + (ny, nx), downsampling + ) + ) ny //= downsampling nx //= downsampling shape = (ny, nx, downsampling, downsampling) - strides = (downsampling * data.strides[0], downsampling * data.strides[1]) + data.strides + strides = ( + downsampling * data.strides[0], + downsampling * data.strides[1], + ) + data.strides blocks = np.lib.stride_tricks.as_strided( - data[:downsampling * ny, :downsampling * nx], shape=shape, strides=strides) + data[: downsampling * ny, : downsampling * nx], shape=shape, strides=strides + ) return summary(blocks, axis=(2, 3)) def downsample_weighted(D, W, downsampling=4, allow_trim=True): - """Downsample 2D data D with weights W. - """ + """Downsample 2D data D with weights W.""" if D.shape != W.shape: - raise ValueError('Arrays D, W must have the same shape.') + raise ValueError("Arrays D, W must have the same shape.") if D.ndim != 2: - raise ValueError('Arrays D, W must be 2D.') + raise ValueError("Arrays D, W must be 2D.") if np.any(W < 0): - raise ValueError('Array W contains negative values.') - WD = downsample(D * W, downsampling=downsampling, summary=np.sum, allow_trim=allow_trim) + raise ValueError("Array W contains negative values.") + WD = downsample( + D * W, downsampling=downsampling, summary=np.sum, allow_trim=allow_trim + ) W = downsample(W, downsampling=downsampling, summary=np.sum, allow_trim=allow_trim) D = np.divide(WD, W, out=np.zeros_like(WD), where=W > 0) return D, W def smooth(D, W, smoothing): - """Apply a weighted Gaussian smoothing. - """ + """Apply a weighted Gaussian smoothing.""" WD = scipy.ndimage.gaussian_filter(W * D, smoothing) W = scipy.ndimage.gaussian_filter(W, smoothing) D = np.divide(WD, W, out=np.zeros_like(D), where=W > 0) @@ -522,8 +582,7 @@ def smooth(D, W, smoothing): def get_stacked(stamps, smoothing=1, maxdither=1, maxdist=3, min_stack=2): - """Calculate a stack of detected sources ignoring outliers. - """ + """Calculate a stack of detected sources ignoring outliers.""" # Extract and normalize stamps. nstamps = len(stamps) if nstamps == 0: @@ -539,7 +598,8 @@ def get_stacked(stamps, smoothing=1, maxdither=1, maxdist=3, min_stack=2): for i in range(j + 1, nstamps): D2, W2 = stamps[i] dist_ji, dither_ji, fscale_ji, _ = get_stamp_distance( - D1, W1, D2, W2, maxdither=maxdither, smoothing=smoothing) + D1, W1, D2, W2, maxdither=maxdither, smoothing=smoothing + ) dist[i, j] = dist[j, i] = dist_ji dither[j, i] = dither_ji dither[i, j] = -dither_ji @@ -565,17 +625,20 @@ def get_stacked(stamps, smoothing=1, maxdither=1, maxdist=3, min_stack=2): D, W = stamps[j] dy, dx = dither[imed, j] f = fscale[imed, j] - inset_j = slice(maxdither + dy, ny - maxdither + dy), slice(maxdither + dx, nx - maxdither + dx) - Dj, Wj = f * D[inset_j], W[inset_j] / f ** 2 + inset_j = slice(maxdither + dy, ny - maxdither + dy), slice( + maxdither + dx, nx - maxdither + dx + ) + Dj, Wj = f * D[inset_j], W[inset_j] / f**2 DWstack += Dj * Wj Wstack += Wj Dstack = np.divide(DWstack, Wstack, out=np.zeros_like(DWstack), where=Wstack > 0) return normalize_stamp(Dstack, Wstack) -def get_stamp_distance(D1, W1, D2, W2, maxdither=3, smoothing=1, fscale=np.linspace(0.85, 1.15, 11)): - """Calculate the minimum chisq distance between two stamps allowing for some dither. - """ +def get_stamp_distance( + D1, W1, D2, W2, maxdither=3, smoothing=1, fscale=np.linspace(0.85, 1.15, 11) +): + """Calculate the minimum chisq distance between two stamps allowing for some dither.""" ny, nx = D1.shape assert D1.shape == D2.shape == W1.shape == W2.shape nscale = len(fscale) @@ -594,21 +657,35 @@ def get_stamp_distance(D1, W1, D2, W2, maxdither=3, smoothing=1, fscale=np.linsp for iy, dy in enumerate(dxy): for ix, dx in enumerate(dxy): # Dither the second stamp. - D2inset = D2[maxdither + dy:ny - maxdither + dy, maxdither + dx:nx - maxdither + dx] - W2inset = W2[maxdither + dy:ny - maxdither + dy, maxdither + dx:nx - maxdither + dx] + D2inset = D2[ + maxdither + dy : ny - maxdither + dy, + maxdither + dx : nx - maxdither + dx, + ] + W2inset = W2[ + maxdither + dy : ny - maxdither + dy, + maxdither + dx : nx - maxdither + dx, + ] # Calculate the chi-square distance between the inset stamps with scale factors of # 1/fvec and fvec applied to (D1,W1) and (D2,W2) respectively. num = np.sqrt(W1inset * W2inset) * (D1inset / fvec - D2inset * fvec) - denom = np.sqrt(W1inset * fvec ** 2 + W2inset * fvec ** -2) + denom = np.sqrt(W1inset * fvec**2 + W2inset * fvec**-2) # Could also use where=(num > 0) here. - pull[iy, ix] = np.divide(num, denom, out=np.zeros_like(num), where=denom > 0) + pull[iy, ix] = np.divide( + num, denom, out=np.zeros_like(num), where=denom > 0 + ) # Find the dither with the smallest chisq. - chisq = np.sum(pull ** 2, axis=(3, 4)) - iy, ix, iscale = np.unravel_index(np.argmin(chisq.reshape(-1)), (ndither, ndither, nscale)) + chisq = np.sum(pull**2, axis=(3, 4)) + iy, ix, iscale = np.unravel_index( + np.argmin(chisq.reshape(-1)), (ndither, ndither, nscale) + ) assert chisq.min() == chisq[iy, ix, iscale] # Return the smallest distance, the corresponding dither and scale, and the best pull image. - return (chisq[iy, ix, iscale] / D1inset.size, np.array((dxy[iy], dxy[ix]), int), - fscale[iscale], pull[iy, ix, iscale].copy()) + return ( + chisq[iy, ix, iscale] / D1inset.size, + np.array((dxy[iy], dxy[ix]), int), + fscale[iscale], + pull[iy, ix, iscale].copy(), + ) def normalize_stamp(D, W, smoothing=2.5): @@ -618,7 +695,7 @@ def normalize_stamp(D, W, smoothing=2.5): smoothed, _ = smooth(D, W, smoothing) norm = smoothed.sum() if norm != 0: - return D / np.abs(norm), W * norm ** 2 + return D / np.abs(norm), W * norm**2 def diskgrid(n, radius=1, alpha=2): @@ -650,7 +727,7 @@ def diskgrid(n, radius=1, alpha=2): phi = 0.5 * (np.sqrt(5) + 1) # Calculate coordinates of each point to uniformly fill the unit disk. k = np.arange(1, n + 1) - theta = 2 * np.pi * k / phi ** 2 + theta = 2 * np.pi * k / phi**2 r = np.sqrt((k - 0.5) / (n - 0.5)) # Transform r to increase the density towards the center. if alpha > 0: @@ -685,7 +762,9 @@ def make_template(size, profile, dx=0, dy=0, oversampling=10, normalized=True): array 2D numpy array of template pixel values with shape (size, size). """ - xy = (np.arange(size * oversampling) - 0.5 * (size * oversampling - 1)) / oversampling + xy = ( + np.arange(size * oversampling) - 0.5 * (size * oversampling - 1) + ) / oversampling z = profile(xy - dx, (xy - dy).reshape(-1, 1)) T = downsample(z, oversampling, np.mean) if normalized: @@ -694,8 +773,7 @@ def make_template(size, profile, dx=0, dy=0, oversampling=10, normalized=True): def preprocess(D, W, nsig_lo=10, nsig_hi=30, vmin=None, vmax=None): - """Preprocess weighted 2D array data for display. - """ + """Preprocess weighted 2D array data for display.""" masked = W == 0 # Calculate the median unmasked pixel value. median_value = np.median(D[~masked]) @@ -733,25 +811,36 @@ def ADCangles(EL, HA, DEC, LAT=31.963972222): class PSFMeasure(object): - def __init__(self, stamp_size, fiber_diam_um=107, pixel_size_um=15, plate_scales=(70., 76.), - max_offset_pix=3.5, noffset=15, nangbins=20): + def __init__( + self, + stamp_size, + fiber_diam_um=107, + pixel_size_um=15, + plate_scales=(70.0, 76.0), + max_offset_pix=3.5, + noffset=15, + nangbins=20, + ): self.stamp_size = stamp_size self.pixel_size_um = pixel_size_um self.plate_scales = plate_scales # Tabulate fiber templates for each (x,y) offset in the x >= 0 and y >= 0 quadrant. - self.offset_template = np.empty((noffset, noffset, stamp_size, stamp_size), np.float32) + self.offset_template = np.empty( + (noffset, noffset, stamp_size, stamp_size), np.float32 + ) max_rsq = (0.5 * fiber_diam_um / pixel_size_um) ** 2 - profile = lambda x, y: 1.0 * (x ** 2 + y ** 2 < max_rsq) + profile = lambda x, y: 1.0 * (x**2 + y**2 < max_rsq) delta = np.linspace(0, max_offset_pix, noffset) for iy in range(noffset): for ix in range(noffset): self.offset_template[iy, ix] = make_template( - stamp_size, profile, dx=delta[ix], dy=delta[iy], normalized=False) + stamp_size, profile, dx=delta[ix], dy=delta[iy], normalized=False + ) self.xyoffset = np.linspace(-max_offset_pix, +max_offset_pix, 2 * noffset - 1) dxy = np.arange(self.stamp_size) - 0.5 * (self.stamp_size - 1) self.xgrid, self.ygrid = np.meshgrid(dxy, dxy, sparse=False) rmax = dxy[-1] * self.pixel_size_um / max(self.plate_scales) - self.angbins = np.linspace(0., rmax, nangbins + 1) + self.angbins = np.linspace(0.0, rmax, nangbins + 1) self.rang = 0.5 * (self.angbins[1:] + self.angbins[:-1]) def measure(self, P, W): @@ -770,20 +859,28 @@ def measure(self, P, W): fiberfrac[origin + iy, origin + ix] = np.sum(P * T) / Psum if iy > 0: # Calculate in the x >= 0 and y < 0 quadrant. - fiberfrac[origin - iy, origin + ix] = np.sum(P * T[reverse, :]) / Psum + fiberfrac[origin - iy, origin + ix] = ( + np.sum(P * T[reverse, :]) / Psum + ) if ix > 0: # Calculate in the x < 0 and y >= 0 quadrant. - fiberfrac[origin + iy, origin - ix] = np.sum(P * T[:, reverse]) / Psum + fiberfrac[origin + iy, origin - ix] = ( + np.sum(P * T[:, reverse]) / Psum + ) if iy > 0 and ix > 0: # Calculate in the x < 0 and y < 0 quadrant. - fiberfrac[origin - iy, origin - ix] = np.sum(P * T[reverse, reverse]) / Psum + fiberfrac[origin - iy, origin - ix] = ( + np.sum(P * T[reverse, reverse]) / Psum + ) # Locate the best centered offset. iy, ix = np.unravel_index(np.argmax(fiberfrac), fiberfrac.shape) xc = self.xyoffset[ix] yc = self.xyoffset[iy] # Calculate the radius of each pixel in arcsecs relative to this center. - radius = np.hypot((self.xgrid - xc) * self.pixel_size_um / self.plate_scales[0], - (self.ygrid - yc) * self.pixel_size_um / self.plate_scales[1]).reshape(-1) + radius = np.hypot( + (self.xgrid - xc) * self.pixel_size_um / self.plate_scales[0], + (self.ygrid - yc) * self.pixel_size_um / self.plate_scales[1], + ).reshape(-1) # Fill ivar-weighted histograms of flux versus angular radius. WZ, _ = np.histogram(radius, bins=self.angbins, weights=(P * W).reshape(-1)) W, _ = np.histogram(radius, bins=self.angbins, weights=W.reshape(-1)) @@ -806,7 +903,7 @@ def measure(self, P, W): class CenteredStamp(object): - def __init__(self, stamp_size, inset, method='fiber'): + def __init__(self, stamp_size, inset, method="fiber"): self.inset = inset self.stamp_size = stamp_size self.inset_size = stamp_size - 2 * inset @@ -817,20 +914,31 @@ def __init__(self, stamp_size, inset, method='fiber'): self.template = np.zeros((noffset, noffset, stamp_size, stamp_size)) # Define the template profile. rfibersq = (0.5 * 107 / 15) ** 2 - if method == 'fiber': - profile = lambda x, y: 1.0 * (x ** 2 + y ** 2 < rfibersq) - elif method == 'donut': + if method == "fiber": + profile = lambda x, y: 1.0 * (x**2 + y**2 < rfibersq) + elif method == "donut": xymax = 0.5 * (stamp_size - 2 * inset) + def profile(x, y): - rsq = x ** 2 + y ** 2 - return rsq * np.exp(-0.5 * rsq / (3 * rfibersq)) * (np.maximum(np.abs(x), np.abs(y)) < xymax) + rsq = x**2 + y**2 + return ( + rsq + * np.exp(-0.5 * rsq / (3 * rfibersq)) + * (np.maximum(np.abs(x), np.abs(y)) < xymax) + ) + else: raise ValueError('Unsupported method "{0}".'.format(method)) # Build the profiles. for iy in range(noffset): for ix in range(noffset): self.template[iy, ix] = make_template( - stamp_size, profile, dx=self.dxy[ix], dy=self.dxy[iy], normalized=True) + stamp_size, + profile, + dx=self.dxy[ix], + dy=self.dxy[iy], + normalized=True, + ) def center(self, D, W): assert D.shape == (self.stamp_size, self.stamp_size) and W.shape == D.shape @@ -838,7 +946,9 @@ def center(self, D, W): # Calculate the weighted mean template flux for each offset. WDsum = np.sum(W * D * self.template, axis=(2, 3)) Wsum = np.sum(W * self.template, axis=(2, 3)) - meanflux = np.divide(WDsum, Wsum, out=np.zeros(self.template.shape[:2]), where=Wsum > 0) + meanflux = np.divide( + WDsum, Wsum, out=np.zeros(self.template.shape[:2]), where=Wsum > 0 + ) # Calculate the best-centered offset iy, ix = np.unravel_index(np.argmax(meanflux.reshape(-1)), meanflux.shape) yslice = slice(iy, iy + self.inset_size) @@ -851,6 +961,7 @@ class MeasurementBuffer(object): Auxiliary data can optionally be attached to each measurement. """ + SECS_PER_DAY = 86400 def __init__(self, maxlen, default_value, padding=300, recent=300, aux_dtype=[]): @@ -859,7 +970,10 @@ def __init__(self, maxlen, default_value, padding=300, recent=300, aux_dtype=[]) self.full = False self.last = None dtype = [ - ('mjd1', np.float64), ('mjd2', np.float64), ('value', np.float32), ('error', np.float32) + ("mjd1", np.float64), + ("mjd2", np.float64), + ("value", np.float32), + ("error", np.float32), ] + aux_dtype self._entries = np.zeros(shape=maxlen, dtype=dtype) self.names = self._entries.dtype.names @@ -869,10 +983,10 @@ def __init__(self, maxlen, default_value, padding=300, recent=300, aux_dtype=[]) @property def entries(self): - return self._entries[:self.len] + return self._entries[: self.len] def __str__(self): - return f'{self.__class__.__name__}(len={self.len}, full={self.full}, oldest={self.oldest})' + return f"{self.__class__.__name__}(len={self.len}, full={self.full}, oldest={self.oldest})" def add(self, mjd1, mjd2, value, error, aux_data=()): """Add a single measurement. @@ -880,7 +994,7 @@ def add(self, mjd1, mjd2, value, error, aux_data=()): We make no assumption that measurements are non-overlapping or added in time order. """ assert mjd1 < mjd2 and error > 0 - is_oldest = (self.oldest is None) or (mjd1 < self._entries[self.oldest]['mjd1']) + is_oldest = (self.oldest is None) or (mjd1 < self._entries[self.oldest]["mjd1"]) entry = (mjd1, mjd2, value, error) + aux_data if self.full: assert self.oldest is not None @@ -890,19 +1004,18 @@ def add(self, mjd1, mjd2, value, error, aux_data=()): self.last = self.oldest self._entries[self.last] = entry # Update the index of the oldest entry, which might be us. - self.oldest = np.argmin(self.entries['mjd1']) + self.oldest = np.argmin(self.entries["mjd1"]) else: self.last = self.len if is_oldest: # This is now the oldest entry. self.oldest = self.last self.len += 1 - self.full = (self.len == self._entries.size) + self.full = self.len == self._entries.size self._entries[self.last] = entry def set_last(self, **kwargs): - """Set values of the most recently added measurement. - """ + """Set values of the most recently added measurement.""" if self.last is not None: for name, value in kwargs.items(): if name in self.names: @@ -913,12 +1026,12 @@ def inside(self, mjd1, mjd2): Use mjd2=None to select all entries after mjd1. """ - mask = self.entries['mjd2'] > mjd1 + mask = self.entries["mjd2"] > mjd1 if mjd2 is not None: - mask &= self.entries['mjd1'] < mjd2 + mask &= self.entries["mjd1"] < mjd2 return mask - def average(self, mjd, interval_secs, min_values, field='value'): + def average(self, mjd, interval_secs, min_values, field="value"): """Return the average of values recorded up to inteval_secs before mjd, or None if less than min_values have been recorded. """ @@ -926,20 +1039,20 @@ def average(self, mjd, interval_secs, min_values, field='value'): nsel = np.count_nonzero(sel) return np.mean(self.entries[sel][field]) if nsel >= min_values else None - def sample_grid(self, mjd_grid, field='value'): + def sample_grid(self, mjd_grid, field="value"): """Sample measurements on a the specified MJD grid. Use measurements that lie outside the grid up to self.padding seconds. Return default_value when no measurements are available. Use constant extrapolation of the first/last measurement if necessary. """ - mjd1, mjd2 = mjd_grid[0], mjd_grid[-1] + mjd1, mjd2 = mjd_grid[0], mjd_grid[-1] # Select measurements that span the padded input grid. sel = self.inside(mjd1 - self.padding, mjd2 + self.padding) if not np.any(sel): return np.full_like(mjd_grid, self.default_value) - mjd_sel = 0.5 * (self.entries[sel]['mjd1'] + self.entries[sel]['mjd2']) - dmjd_sel = self.entries[sel]['mjd2'] - self.entries[sel]['mjd1'] + mjd_sel = 0.5 * (self.entries[sel]["mjd1"] + self.entries[sel]["mjd2"]) + dmjd_sel = self.entries[sel]["mjd2"] - self.entries[sel]["mjd1"] value_sel = self.entries[sel][field] # Values might not be recorded in time order so fix that now. iorder = np.argsort(mjd_sel) @@ -950,9 +1063,11 @@ def sample_grid(self, mjd_grid, field='value'): # Correct for this deadtime by calculating a piece-wise linear approximation to # the instantaneous value that matches the measured integrals. try: - value_sel_corrected = pwlinear_solve(mjd_sel, dmjd_sel, value_sel * dmjd_sel) + value_sel_corrected = pwlinear_solve( + mjd_sel, dmjd_sel, value_sel * dmjd_sel + ) except Exception as e: - print(f'pwlinear_solve failed: {e}') + print(f"pwlinear_solve failed: {e}") value_sel_corrected = value_sel # Use linear interpolation with constant extrapolation beyond the endpoints. return np.interp(mjd_grid, mjd_sel, value_sel_corrected) @@ -964,14 +1079,13 @@ def trend(self, mjd): sel = self.inside(mjd - self.recent, mjd) if not np.any(sel): return self.default_value, 0 - wgt = self.entries[sel]['error'] ** -0.5 - val = self.entries[sel]['value'] + wgt = self.entries[sel]["error"] ** -0.5 + val = self.entries[sel]["value"] return np.sum(wgt * val) / np.sum(wgt), 0 def forecast_grid(self, mjd_grid): - """Forecast our trend on the specified MJD grid. - """ - mjd1, mjd2 = mjd_grid[0], mjd_grid[-1] + """Forecast our trend on the specified MJD grid.""" + mjd1, mjd2 = mjd_grid[0], mjd_grid[-1] # Calculate the trend at mjd1. offset, slope = self.trend(mjd1) # Evaluate the linear trend on our grid. @@ -991,17 +1105,17 @@ def save(self, mjd1, mjd2=None): if len(E) == 0: return {} # Sort based on mjd1. - isort = np.argsort(E['mjd1']) + isort = np.argsort(E["mjd1"]) E = E[isort] # Convert to a dictionary of fields, excluding mjd1,2. - D = {name: E[name] for name in E.dtype.fields if name not in ('mjd1','mjd2')} + D = {name: E[name] for name in E.dtype.fields if name not in ("mjd1", "mjd2")} # Lookup the earliest MJD. - mjd0 = E['mjd1'][0] - D['mjd0'] = mjd0 + mjd0 = E["mjd1"][0] + D["mjd0"] = mjd0 # Replace mjd1,2 with offsets dt1,2 from mjd0 in seconds. # Use float32 so that JSON output will be rounded. - D['dt1'] = np.float32((E['mjd1'] - mjd0) * self.SECS_PER_DAY) - D['dt2'] = np.float32((E['mjd2'] - mjd0) * self.SECS_PER_DAY) + D["dt1"] = np.float32((E["mjd1"] - mjd0) * self.SECS_PER_DAY) + D["dt2"] = np.float32((E["mjd2"] - mjd0) * self.SECS_PER_DAY) return D @@ -1011,7 +1125,9 @@ def mjd_to_date(mjd, utc_offset): Use utc_offset of -7 for local time at Kitt Peak. Use :func:`date_to_mjd` to invert this calculation. """ - return datetime.datetime(2019, 1, 1) + datetime.timedelta(days=mjd - 58484.0, hours=utc_offset) + return datetime.datetime(2019, 1, 1) + datetime.timedelta( + days=mjd - 58484.0, hours=utc_offset + ) def date_to_mjd(date, utc_offset): @@ -1032,7 +1148,7 @@ def mjd_to_night(mjd): date = mjd_to_date(mjd, utc_offset=-7) if date.hour < 12: date -= datetime.timedelta(days=1) - return int(date.strftime('%Y%m%d')) + return int(date.strftime("%Y%m%d")) def night_to_midnight(night, utc_offset): @@ -1041,15 +1157,18 @@ def night_to_midnight(night, utc_offset): """ night = str(night) if len(night) != 8: - raise ValueError('night_to_midnight: expected an integer of the form YYYYMMDD.') + raise ValueError("night_to_midnight: expected an integer of the form YYYYMMDD.") year, month, day = int(night[0:4]), int(night[4:6]), int(night[6:8]) - return datetime.datetime(year, month, day, 12) + datetime.timedelta(hours=12 + utc_offset) + return datetime.datetime(year, month, day, 12) + datetime.timedelta( + hours=12 + utc_offset + ) class NumpyEncoder(json.JSONEncoder): - """JSON encoder to use with numpy data with rounding of float32 values. - """ + """JSON encoder to use with numpy data with rounding of float32 values.""" + FLOAT32_DECIMALS = 6 + def default(self, obj): if isinstance(obj, np.float32): # Convert to 64-bit float before rounding. @@ -1062,7 +1181,7 @@ def default(self, obj): if obj.dtype.fields is not None: # convert a recarray to a dictionary. new_obj = {} - for (name, (dtype, size)) in obj.dtype.fields.items(): + for name, (dtype, size) in obj.dtype.fields.items(): if dtype.base == np.float32: new_obj[name] = np.round(obj[name], self.FLOAT32_DECIMALS) else: @@ -1078,8 +1197,7 @@ def default(self, obj): def is_datetime(time, oldest=datetime.datetime(2019, 1, 1)): - """Test for a valid datetime after oldest. - """ + """Test for a valid datetime after oldest.""" try: delta = (time - oldest).days return delta > 0 @@ -1104,17 +1222,17 @@ def load_guider_centroids(path, expid): shape (nstars, 2, nframes). If a star is not measured in a frame, the centroid values are np.nan. """ - cameras = ('GUIDE0', 'GUIDE2', 'GUIDE3', 'GUIDE5', 'GUIDE7', 'GUIDE8') + cameras = ("GUIDE0", "GUIDE2", "GUIDE3", "GUIDE5", "GUIDE7", "GUIDE8") # Read the json file of guider outputs. - jsonpath = path / 'centroids-{0}.json'.format(expid) + jsonpath = path / "centroids-{0}.json".format(expid) if not jsonpath.exists(): - raise ValueError('Non-existent path: {0}.'.format(jsonpath)) + raise ValueError("Non-existent path: {0}.".format(jsonpath)) with open(jsonpath) as f: D = json.load(f) - assert D['expid'] == int(expid) - nframes = D['summary']['frames'] + assert D["expid"] == int(expid) + nframes = D["summary"]["frames"] # Use the first frame to lookup the guide stars for each camera. - frame0 = D['frames']['1'] + frame0 = D["frames"]["1"] stars = {G: len([K for K in frame0.keys() if K.startswith(G)]) for G in cameras} expected = {G: np.zeros((stars[G], 2)) for G in cameras} combined = {G: np.zeros((2, nframes)) for G in cameras} @@ -1122,22 +1240,24 @@ def load_guider_centroids(path, expid): for camera in cameras: # Get the expected position for each guide star. for istar in range(stars[camera]): - S = frame0.get(camera + f'_{istar}') - expected[camera][istar, 0] = S['y_expected'] - expected[camera][istar, 1] = S['x_expected'] + S = frame0.get(camera + f"_{istar}") + expected[camera][istar, 0] = S["y_expected"] + expected[camera][istar, 1] = S["x_expected"] # Get the combined centroid sent to the telescope for each frame. for iframe in range(nframes): - F = D['frames'].get(str(iframe + 1)) + F = D["frames"].get(str(iframe + 1)) if F is None: - logging.warning('Missing frame {0}/{1} in {2}'.format(iframe + 1, nframes, jsonpath)) + logging.warning( + "Missing frame {0}/{1} in {2}".format(iframe + 1, nframes, jsonpath) + ) continue - combined[camera][0, iframe] = F['combined_y'] - combined[camera][1, iframe] = F['combined_x'] + combined[camera][0, iframe] = F["combined_y"] + combined[camera][1, iframe] = F["combined_x"] # Get the measured centroids for each guide star in this frame. for istar in range(stars[camera]): - S = F.get(camera + '_{0}'.format(istar)) - centroid[camera][istar, 0, iframe] = S.get('y_centroid', np.nan) - centroid[camera][istar, 1, iframe] = S.get('x_centroid', np.nan) + S = F.get(camera + "_{0}".format(istar)) + centroid[camera][istar, 0, iframe] = S.get("y_centroid", np.nan) + centroid[camera][istar, 1, iframe] = S.get("x_centroid", np.nan) return expected, combined, centroid @@ -1151,8 +1271,11 @@ def git_describe(): try: path = pathlib.Path(__file__).parent process = subprocess.Popen( - ['git', 'describe', '--tags', '--always'], - cwd=path, shell=False, stdout=subprocess.PIPE) + ["git", "describe", "--tags", "--always"], + cwd=path, + shell=False, + stdout=subprocess.PIPE, + ) return process.communicate()[0].strip().decode() except Exception as e: return None @@ -1176,56 +1299,162 @@ def cos_zenith_to_airmass(cosZ): float or array Airmass value(s) >= 1. """ - cosZ = np.clip(np.asarray(cosZ), 0., 1.) - return np.clip(1. / (cosZ + 0.025 * np.exp(-11 * cosZ)), 1., None) + cosZ = np.clip(np.asarray(cosZ), 0.0, 1.0) + return np.clip(1.0 / (cosZ + 0.025 * np.exp(-11 * cosZ)), 1.0, None) + # Arrays of (r,scale_r,scale_az) in units of (mm, um/arcsec, um/arcsec) # downsampled from $DESIMODEL/data/focalplane/platescale.txt -platescale_data = np.array([ - [ 0. , 9.55338614, 19.10823877, 28.66602485, - 38.22821229, 47.79627044, 57.37167061, 66.95583925, - 76.5503467 , 86.15653808, 95.77607221, 105.40998724, - 115.06050339, 124.72876316, 134.41629762, 144.12445419, - 153.85507949, 163.60955313, 173.38942287, 183.19603606, - 193.03141186, 202.89690121, 212.79409317, 222.72462014, - 232.69009941, 242.69217521, 252.73250831, 262.81277626, - 272.93467342, 283.09991068, 293.31021495, 303.56735178, - 313.87303193, 324.22913743, 334.637255 , 345.09926806, - 355.61697659, 366.19242695, 376.8270098 , 387.52276409, - 398.28177809, 409.10421235, 419.98856998], - [ 67.48456941, 67.4897893 , 67.50549147, 67.53167143, - 67.56832223, 67.61543507, 67.67300035, 67.74100878, - 67.81945285, 67.9083283 , 68.00763583, 68.11738281, - 68.23758501, 68.36826828, 68.50947014, 68.66124117, - 68.8236462 , 68.99676513, 69.18069348, 69.3755424 , - 69.58143832, 69.7985219 , 70.0269465 , 70.26687594, - 70.51848152, 70.78193842, 71.05742124, 71.3450988 , - 71.64512835, 71.95764893, 72.2827744 , 72.62058611, - 72.97112576, 73.33438887, 73.71031989, 74.09881018, - 74.49970068, 74.91279208, 75.33786611, 75.77472346, - 76.22324542, 76.6834893 , 77.15583098], - [ 67.48456948, 67.4863062 , 67.49151606, 67.50019814, - 67.51235097, 67.52797265, 67.54706099, 67.56961364, - 67.59562835, 67.62510318, 67.65803674, 67.69442847, - 67.73427893, 67.77759008, 67.82436552, 67.87461092, - 67.92833383, 67.98554449, 68.04625563, 68.11048271, - 68.17824403, 68.24956072, 68.32445674, 68.40295874, - 68.48509592, 68.57089967, 68.66040331, 68.75364155, - 68.85064994, 68.95146421, 69.05611955, 69.16464973, - 69.27708619, 69.39345711, 69.51378646, 69.63809304, - 69.76638985, 69.89868353, 70.03497444, 70.17525741, - 70.31952364, 70.46776416, 70.61997566]]) +platescale_data = np.array( + [ + [ + 0.0, + 9.55338614, + 19.10823877, + 28.66602485, + 38.22821229, + 47.79627044, + 57.37167061, + 66.95583925, + 76.5503467, + 86.15653808, + 95.77607221, + 105.40998724, + 115.06050339, + 124.72876316, + 134.41629762, + 144.12445419, + 153.85507949, + 163.60955313, + 173.38942287, + 183.19603606, + 193.03141186, + 202.89690121, + 212.79409317, + 222.72462014, + 232.69009941, + 242.69217521, + 252.73250831, + 262.81277626, + 272.93467342, + 283.09991068, + 293.31021495, + 303.56735178, + 313.87303193, + 324.22913743, + 334.637255, + 345.09926806, + 355.61697659, + 366.19242695, + 376.8270098, + 387.52276409, + 398.28177809, + 409.10421235, + 419.98856998, + ], + [ + 67.48456941, + 67.4897893, + 67.50549147, + 67.53167143, + 67.56832223, + 67.61543507, + 67.67300035, + 67.74100878, + 67.81945285, + 67.9083283, + 68.00763583, + 68.11738281, + 68.23758501, + 68.36826828, + 68.50947014, + 68.66124117, + 68.8236462, + 68.99676513, + 69.18069348, + 69.3755424, + 69.58143832, + 69.7985219, + 70.0269465, + 70.26687594, + 70.51848152, + 70.78193842, + 71.05742124, + 71.3450988, + 71.64512835, + 71.95764893, + 72.2827744, + 72.62058611, + 72.97112576, + 73.33438887, + 73.71031989, + 74.09881018, + 74.49970068, + 74.91279208, + 75.33786611, + 75.77472346, + 76.22324542, + 76.6834893, + 77.15583098, + ], + [ + 67.48456948, + 67.4863062, + 67.49151606, + 67.50019814, + 67.51235097, + 67.52797265, + 67.54706099, + 67.56961364, + 67.59562835, + 67.62510318, + 67.65803674, + 67.69442847, + 67.73427893, + 67.77759008, + 67.82436552, + 67.87461092, + 67.92833383, + 67.98554449, + 68.04625563, + 68.11048271, + 68.17824403, + 68.24956072, + 68.32445674, + 68.40295874, + 68.48509592, + 68.57089967, + 68.66040331, + 68.75364155, + 68.85064994, + 68.95146421, + 69.05611955, + 69.16464973, + 69.27708619, + 69.39345711, + 69.51378646, + 69.63809304, + 69.76638985, + 69.89868353, + 70.03497444, + 70.17525741, + 70.31952364, + 70.46776416, + 70.61997566, + ], + ] +) """Return the radial and azimuthal platescales in um/arcsec given the CS5 focal-plane radius in mm, using linear interpolation. """ get_platescales = scipy.interpolate.interp1d( - platescale_data[0], platescale_data[1:], copy=False, assume_sorted=True) + platescale_data[0], platescale_data[1:], copy=False, assume_sorted=True +) def robust_median(X, axis=0, zcut=3.5): - """Calculate a robust median of X along the specified axis. - """ + """Calculate a robust median of X along the specified axis.""" X = np.asarray(X, np.float64) X0 = np.nanmedian(X, axis=axis, keepdims=True) MAD = np.nanmedian(np.abs(X - X0), axis=axis, keepdims=True) @@ -1263,116 +1492,405 @@ def pwlinear_solve(t, dt, yint): Dt = np.diff(t) if np.any(Dt <= 0): return yint / dt - dt2 = dt ** 2 / 8 + dt2 = dt**2 / 8 hi = dt2[1:] / Dt lo = dt2[:-1] / Dt banded = np.zeros((3, len(dt))) banded[1] = dt - banded[1,:-1] -= hi - banded[1,1:] -= lo - banded[0,1:] = hi - banded[2,:-1] = lo - return scipy.linalg.solve_banded((1,1), banded, yint) + banded[1, :-1] -= hi + banded[1, 1:] -= lo + banded[0, 1:] = hi + banded[2, :-1] = lo + return scipy.linalg.solve_banded((1, 1), banded, yint) + +_blur_kernel = np.array( + [ + [1.8584491e-07, 4.3132287e-04, 1.8588798e-07], + [4.3132226e-04, 9.9827397e-01, 4.3132226e-04], + [1.8588798e-07, 4.3132287e-04, 1.8584491e-07], + ] +) -_blur_kernel = np.array([ - [1.8584491e-07, 4.3132287e-04, 1.8588798e-07], - [4.3132226e-04, 9.9827397e-01, 4.3132226e-04], - [1.8588798e-07, 4.3132287e-04, 1.8584491e-07]]) def blur(D, W): """Apply a weighted 0.15-pixel Gaussian blur to reduce the impact of any isolated pixels with large ivar""" - DWconv = scipy.signal.convolve(D * W, _blur_kernel, mode='same') - Wconv = scipy.signal.convolve(W, _blur_kernel, mode='same') - return np.divide(DWconv, Wconv, out=np.zeros_like(D), where=Wconv>0), Wconv + DWconv = scipy.signal.convolve(D * W, _blur_kernel, mode="same") + Wconv = scipy.signal.convolve(W, _blur_kernel, mode="same") + return np.divide(DWconv, Wconv, out=np.zeros_like(D), where=Wconv > 0), Wconv + # 11x11 convolution kernel for the nominal ELG profile which is round Sersic n=1 with 0.45" half-light radius. # This kernel is stretched by 1.084 along y and squeezed by 0.922 along x to account for the different plate scales. # To apply this profile, use scipy.signal.convolve(GFA_data, _ELG_kernel, mode='same') -_ELG_kernel = np.array([ - [0.0004233430081512779, 0.0007364505436271429, 0.001188363297842443, 0.0017316826852038503, 0.0022099444177001715, 0.002399129094555974, 0.0022099444177001715, 0.0017316826852038503, 0.001188363297842443, 0.0007364505436271429, 0.0004233430081512779], - [0.0006760309333913028, 0.0012480159057304263, 0.0021513437386602163, 0.003348887199535966, 0.0045022256672382355, 0.0049937451258301735, 0.0045022256672382355, 0.003348887199535966, 0.0021513437386602163, 0.0012480159057304263, 0.0006760309333913028], - [0.0010057131294161081, 0.001976283034309745, 0.0036765243858098984, 0.006234399974346161, 0.00904802419245243, 0.010399244725704193, 0.00904802419245243, 0.006234399974346161, 0.0036765243858098984, 0.001976282801479101, 0.0010057131294161081], - [0.0013665164588019252, 0.0028473353013396263, 0.005736198276281357, 0.010810506530106068, 0.017641831189393997, 0.021501323208212852, 0.017641831189393997, 0.010810506530106068, 0.005736198276281357, 0.0028473353013396263, 0.0013665164588019252], - [0.001660904148593545, 0.0036149118095636368, 0.0077792382799088955, 0.016309885308146477, 0.03155653551220894, 0.04382346570491791, 0.03155653551220894, 0.016309885308146477, 0.0077792382799088955, 0.0036149118095636368, 0.001660904148593545], - [0.0017684746999293566, 0.003920542076230049, 0.00870299432426691, 0.019154787063598633, 0.04165779799222946, 0.06993736326694489, 0.04165779799222946, 0.019154787063598633, 0.00870299432426691, 0.003920542076230049, 0.0017684746999293566], - [0.001660904148593545, 0.0036149118095636368, 0.0077792382799088955, 0.016309885308146477, 0.03155653551220894, 0.04382346570491791, 0.03155653551220894, 0.016309885308146477, 0.0077792382799088955, 0.0036149118095636368, 0.001660904148593545], - [0.0013665164588019252, 0.0028473353013396263, 0.005736198276281357, 0.010810506530106068, 0.017641831189393997, 0.021501323208212852, 0.017641831189393997, 0.010810506530106068, 0.005736198276281357, 0.0028473353013396263, 0.0013665164588019252], - [0.0010057131294161081, 0.001976282801479101, 0.0036765243858098984, 0.006234399974346161, 0.00904802419245243, 0.010399244725704193, 0.00904802419245243, 0.006234399974346161, 0.0036765243858098984, 0.001976283034309745, 0.0010057131294161081], - [0.0006760309333913028, 0.0012480159057304263, 0.0021513437386602163, 0.003348887199535966, 0.0045022256672382355, 0.0049937451258301735, 0.0045022256672382355, 0.003348887199535966, 0.0021513437386602163, 0.0012480159057304263, 0.0006760309333913028], - [0.0004233430081512779, 0.0007364505436271429, 0.001188363297842443, 0.0017316826852038503, 0.0022099444177001715, 0.002399129094555974, 0.0022099444177001715, 0.0017316826852038503, 0.001188363297842443, 0.0007364505436271429, 0.0004233430081512779]]) +_ELG_kernel = np.array( + [ + [ + 0.0004233430081512779, + 0.0007364505436271429, + 0.001188363297842443, + 0.0017316826852038503, + 0.0022099444177001715, + 0.002399129094555974, + 0.0022099444177001715, + 0.0017316826852038503, + 0.001188363297842443, + 0.0007364505436271429, + 0.0004233430081512779, + ], + [ + 0.0006760309333913028, + 0.0012480159057304263, + 0.0021513437386602163, + 0.003348887199535966, + 0.0045022256672382355, + 0.0049937451258301735, + 0.0045022256672382355, + 0.003348887199535966, + 0.0021513437386602163, + 0.0012480159057304263, + 0.0006760309333913028, + ], + [ + 0.0010057131294161081, + 0.001976283034309745, + 0.0036765243858098984, + 0.006234399974346161, + 0.00904802419245243, + 0.010399244725704193, + 0.00904802419245243, + 0.006234399974346161, + 0.0036765243858098984, + 0.001976282801479101, + 0.0010057131294161081, + ], + [ + 0.0013665164588019252, + 0.0028473353013396263, + 0.005736198276281357, + 0.010810506530106068, + 0.017641831189393997, + 0.021501323208212852, + 0.017641831189393997, + 0.010810506530106068, + 0.005736198276281357, + 0.0028473353013396263, + 0.0013665164588019252, + ], + [ + 0.001660904148593545, + 0.0036149118095636368, + 0.0077792382799088955, + 0.016309885308146477, + 0.03155653551220894, + 0.04382346570491791, + 0.03155653551220894, + 0.016309885308146477, + 0.0077792382799088955, + 0.0036149118095636368, + 0.001660904148593545, + ], + [ + 0.0017684746999293566, + 0.003920542076230049, + 0.00870299432426691, + 0.019154787063598633, + 0.04165779799222946, + 0.06993736326694489, + 0.04165779799222946, + 0.019154787063598633, + 0.00870299432426691, + 0.003920542076230049, + 0.0017684746999293566, + ], + [ + 0.001660904148593545, + 0.0036149118095636368, + 0.0077792382799088955, + 0.016309885308146477, + 0.03155653551220894, + 0.04382346570491791, + 0.03155653551220894, + 0.016309885308146477, + 0.0077792382799088955, + 0.0036149118095636368, + 0.001660904148593545, + ], + [ + 0.0013665164588019252, + 0.0028473353013396263, + 0.005736198276281357, + 0.010810506530106068, + 0.017641831189393997, + 0.021501323208212852, + 0.017641831189393997, + 0.010810506530106068, + 0.005736198276281357, + 0.0028473353013396263, + 0.0013665164588019252, + ], + [ + 0.0010057131294161081, + 0.001976282801479101, + 0.0036765243858098984, + 0.006234399974346161, + 0.00904802419245243, + 0.010399244725704193, + 0.00904802419245243, + 0.006234399974346161, + 0.0036765243858098984, + 0.001976283034309745, + 0.0010057131294161081, + ], + [ + 0.0006760309333913028, + 0.0012480159057304263, + 0.0021513437386602163, + 0.003348887199535966, + 0.0045022256672382355, + 0.0049937451258301735, + 0.0045022256672382355, + 0.003348887199535966, + 0.0021513437386602163, + 0.0012480159057304263, + 0.0006760309333913028, + ], + [ + 0.0004233430081512779, + 0.0007364505436271429, + 0.001188363297842443, + 0.0017316826852038503, + 0.0022099444177001715, + 0.002399129094555974, + 0.0022099444177001715, + 0.0017316826852038503, + 0.001188363297842443, + 0.0007364505436271429, + 0.0004233430081512779, + ], + ] +) # 11x11 convolution kernel for the nominal BGS profile which is round Sersic n=4 with 1.5" half-light radius. # This kernel is stretched by 1.084 along y and squeezed by 0.922 along x to account for the different plate scales. # To apply this profile, use scipy.signal.convolve(GFA_data, _ELG_kernel, mode='same') -_BGS_kernel = np.array([ - [0.0008819324430078268, 0.0010629459284245968, 0.0012624080991372466, 0.0014565379824489355, 0.00160425144713372, 0.0016609467566013336, 0.00160425144713372, 0.0014565379824489355, 0.0012624080991372466, 0.0010629459284245968, 0.0008819324430078268], - [0.0010629459284245968, 0.001333612366579473, 0.0016604403499513865, 0.0020129659678786993, 0.002308456925675273, 0.002431850880384445, 0.002308456925675273, 0.0020129659678786993, 0.0016604403499513865, 0.0013336124829947948, 0.0010629459284245968], - [0.0012624080991372466, 0.0016604403499513865, 0.002200713846832514, 0.002877740887925029, 0.003545331070199609, 0.0038440146017819643, 0.003545331070199609, 0.002877740887925029, 0.002200713846832514, 0.0016604403499513865, 0.0012624080991372466], - [0.0014565379824489355, 0.0020129659678786993, 0.002877740887925029, 0.004214275162667036, 0.0059745050966739655, 0.006993815768510103, 0.0059745050966739655, 0.004214275162667036, 0.002877740887925029, 0.0020129659678786993, 0.0014565379824489355], - [0.00160425144713372, 0.002308456925675273, 0.003545331070199609, 0.0059745050966739655, 0.011260008439421654, 0.017317788675427437, 0.011260008439421654, 0.0059745050966739655, 0.003545331070199609, 0.002308456925675273, 0.00160425144713372], - [0.0016609467566013336, 0.002431850880384445, 0.0038440146017819643, 0.006993815768510103, 0.017317788675427437, 0.053177446126937866, 0.017317788675427437, 0.006993815768510103, 0.0038440146017819643, 0.002431850880384445, 0.0016609467566013336], - [0.00160425144713372, 0.002308456925675273, 0.003545331070199609, 0.0059745050966739655, 0.011260008439421654, 0.017317788675427437, 0.011260008439421654, 0.0059745050966739655, 0.003545331070199609, 0.002308456925675273, 0.00160425144713372], - [0.0014565379824489355, 0.0020129659678786993, 0.002877740887925029, 0.004214275162667036, 0.0059745050966739655, 0.006993815768510103, 0.0059745050966739655, 0.004214275162667036, 0.002877740887925029, 0.0020129659678786993, 0.0014565379824489355], - [0.0012624080991372466, 0.0016604403499513865, 0.002200713846832514, 0.002877740887925029, 0.003545331070199609, 0.0038440146017819643, 0.003545331070199609, 0.002877740887925029, 0.002200713846832514, 0.0016604403499513865, 0.0012624080991372466], - [0.0010629459284245968, 0.0013336124829947948, 0.0016604403499513865, 0.0020129659678786993, 0.002308456925675273, 0.002431850880384445, 0.002308456925675273, 0.0020129659678786993, 0.0016604403499513865, 0.001333612366579473, 0.0010629459284245968], - [0.0008819324430078268, 0.0010629459284245968, 0.0012624080991372466, 0.0014565379824489355, 0.00160425144713372, 0.0016609467566013336, 0.00160425144713372, 0.0014565379824489355, 0.0012624080991372466, 0.0010629459284245968, 0.0008819324430078268]]) +_BGS_kernel = np.array( + [ + [ + 0.0008819324430078268, + 0.0010629459284245968, + 0.0012624080991372466, + 0.0014565379824489355, + 0.00160425144713372, + 0.0016609467566013336, + 0.00160425144713372, + 0.0014565379824489355, + 0.0012624080991372466, + 0.0010629459284245968, + 0.0008819324430078268, + ], + [ + 0.0010629459284245968, + 0.001333612366579473, + 0.0016604403499513865, + 0.0020129659678786993, + 0.002308456925675273, + 0.002431850880384445, + 0.002308456925675273, + 0.0020129659678786993, + 0.0016604403499513865, + 0.0013336124829947948, + 0.0010629459284245968, + ], + [ + 0.0012624080991372466, + 0.0016604403499513865, + 0.002200713846832514, + 0.002877740887925029, + 0.003545331070199609, + 0.0038440146017819643, + 0.003545331070199609, + 0.002877740887925029, + 0.002200713846832514, + 0.0016604403499513865, + 0.0012624080991372466, + ], + [ + 0.0014565379824489355, + 0.0020129659678786993, + 0.002877740887925029, + 0.004214275162667036, + 0.0059745050966739655, + 0.006993815768510103, + 0.0059745050966739655, + 0.004214275162667036, + 0.002877740887925029, + 0.0020129659678786993, + 0.0014565379824489355, + ], + [ + 0.00160425144713372, + 0.002308456925675273, + 0.003545331070199609, + 0.0059745050966739655, + 0.011260008439421654, + 0.017317788675427437, + 0.011260008439421654, + 0.0059745050966739655, + 0.003545331070199609, + 0.002308456925675273, + 0.00160425144713372, + ], + [ + 0.0016609467566013336, + 0.002431850880384445, + 0.0038440146017819643, + 0.006993815768510103, + 0.017317788675427437, + 0.053177446126937866, + 0.017317788675427437, + 0.006993815768510103, + 0.0038440146017819643, + 0.002431850880384445, + 0.0016609467566013336, + ], + [ + 0.00160425144713372, + 0.002308456925675273, + 0.003545331070199609, + 0.0059745050966739655, + 0.011260008439421654, + 0.017317788675427437, + 0.011260008439421654, + 0.0059745050966739655, + 0.003545331070199609, + 0.002308456925675273, + 0.00160425144713372, + ], + [ + 0.0014565379824489355, + 0.0020129659678786993, + 0.002877740887925029, + 0.004214275162667036, + 0.0059745050966739655, + 0.006993815768510103, + 0.0059745050966739655, + 0.004214275162667036, + 0.002877740887925029, + 0.0020129659678786993, + 0.0014565379824489355, + ], + [ + 0.0012624080991372466, + 0.0016604403499513865, + 0.002200713846832514, + 0.002877740887925029, + 0.003545331070199609, + 0.0038440146017819643, + 0.003545331070199609, + 0.002877740887925029, + 0.002200713846832514, + 0.0016604403499513865, + 0.0012624080991372466, + ], + [ + 0.0010629459284245968, + 0.0013336124829947948, + 0.0016604403499513865, + 0.0020129659678786993, + 0.002308456925675273, + 0.002431850880384445, + 0.002308456925675273, + 0.0020129659678786993, + 0.0016604403499513865, + 0.001333612366579473, + 0.0010629459284245968, + ], + [ + 0.0008819324430078268, + 0.0010629459284245968, + 0.0012624080991372466, + 0.0014565379824489355, + 0.00160425144713372, + 0.0016609467566013336, + 0.00160425144713372, + 0.0014565379824489355, + 0.0012624080991372466, + 0.0010629459284245968, + 0.0008819324430078268, + ], + ] +) + def get_fiber_fractions(PSF, FIBER): - """Given a PSF postage stamp observed in a GFA and a fiber profile, return the (PSF,ELG,BGS) fiber fractions. - """ + """Given a PSF postage stamp observed in a GFA and a fiber profile, return the (PSF,ELG,BGS) fiber fractions.""" PSFsum = np.sum(PSF) - ELG = scipy.signal.convolve(PSF, _ELG_kernel, mode='same') - BGS = scipy.signal.convolve(PSF, _BGS_kernel, mode='same') - return np.sum(PSF * FIBER) / PSFsum, np.sum(ELG * FIBER) / PSFsum, np.sum(BGS * FIBER) / PSFsum + ELG = scipy.signal.convolve(PSF, _ELG_kernel, mode="same") + BGS = scipy.signal.convolve(PSF, _BGS_kernel, mode="same") + return ( + np.sum(PSF * FIBER) / PSFsum, + np.sum(ELG * FIBER) / PSFsum, + np.sum(BGS * FIBER) / PSFsum, + ) class ETCExposure(object): - """Utility class for reading the per-exposure json file written by the ETC. - """ - class TimeSeries: pass + """Utility class for reading the per-exposure json file written by the ETC.""" + + class TimeSeries: + pass utc_offset = -7 - mjd_epoch = np.datetime64('1858-11-17', 'ms') + utc_offset * np.timedelta64(3600000, 'ms') - onesec = np.timedelta64(1000, 'ms') + mjd_epoch = np.datetime64("1858-11-17", "ms") + utc_offset * np.timedelta64( + 3600000, "ms" + ) + onesec = np.timedelta64(1000, "ms") @staticmethod def get_timestamps(mjd0, dt): dt = np.asarray(dt) - return ETCExposure.mjd_epoch + mjd0 * np.timedelta64(86400000, 'ms') + dt * ETCExposure.onesec + return ( + ETCExposure.mjd_epoch + + mjd0 * np.timedelta64(86400000, "ms") + + dt * ETCExposure.onesec + ) @staticmethod def load(night, expid, datadir=None): - datadir = datadir or os.getenv('DESI_SPECTRO_DATA') or '.' + datadir = datadir or os.getenv("DESI_SPECTRO_DATA") or "." path = pathlib.Path(datadir) if not path.exists(): - raise RuntimeError(f'Invalid datadir: {datadir}') + raise RuntimeError(f"Invalid datadir: {datadir}") path = path / str(night) if not path.exists(): - raise RuntimeError(f'Invalid night: {night}') + raise RuntimeError(f"Invalid night: {night}") path = path / str(expid).zfill(8) if not path.exists(): - raise RuntimeError(f'Invalid expid: {expid} for {night}') + raise RuntimeError(f"Invalid expid: {expid} for {night}") def __init__(self, filename): - """Initialize the ETC exposure data from a saved json file - """ + """Initialize the ETC exposure data from a saved json file""" with open(filename) as f: self.data = json.load(f) for block in self.data.keys(): - if block in ('thru','sky','accum'): + if block in ("thru", "sky", "accum"): # Expand time series data into an attribute object. setattr(self, block, ETCExposure.TimeSeries()) timeseries = getattr(self, block) - mjd0 = self.data[block]['mjd0'] + mjd0 = self.data[block]["mjd0"] for k in self.data[block].keys(): - if k == 'mjd0': + if k == "mjd0": continue - if k.startswith('dt'): - name = 't'+k[2:] + if k.startswith("dt"): + name = "t" + k[2:] # Convert deltas into numpy 64-bit local timestamps. - setattr(timeseries, name, ETCExposure.get_timestamps(mjd0, self.data[block][k])) + setattr( + timeseries, + name, + ETCExposure.get_timestamps(mjd0, self.data[block][k]), + ) # Convert all timeseries values to float32 numpy arrays (including dt* arrays) setattr(timeseries, k, np.array(self.data[block][k], np.float32)) else: From 9e0d1a55a5aade92914133a3ccddcdf98d52acdb Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Sun, 12 Jan 2025 08:52:20 -0800 Subject: [PATCH 03/29] precompute grid of shifted spot profiles in ctor --- desietc/sky.py | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/desietc/sky.py b/desietc/sky.py index 67d1c0f..8006d0f 100644 --- a/desietc/sky.py +++ b/desietc/sky.py @@ -174,7 +174,12 @@ class SkyCamera(object): sky_names = "SKYCAM0", "SKYCAM1" - def __init__(self, calib_name="SKY_calib.fits"): + def __init__( + self, + calib_name="SKY_calib.fits", + centroid_grid_size_pixels=5, + centroid_ngrid=21, + ): self.names, self.slices, self.masks, self.spots, self.calibs = load_calib_data( calib_name ) @@ -194,6 +199,27 @@ def __init__(self, calib_name="SKY_calib.fits"): self.valid = np.empty((maxstamps, stampsize, stampsize), np.float32) self.model = np.empty((maxstamps, stampsize, stampsize), np.float32) self.pull = np.empty((maxstamps, stampsize, stampsize), np.float32) + # Initialize the grid of offset spots for centroid fitting. + if centroid_ngrid > 0: + self.centroid_dxy = np.linspace( + -centroid_grid_size_pixels, centroid_grid_size_pixels, centroid_ngrid + ) + ny, nx = self.spots[self.sky_names[0]][0].shape + self.offsetSpots = {} + for cam in self.sky_names: + spots = self.spots[cam] + self.offsetSpots[cam] = np.zeros( + (len(spots), centroid_ngrid, centroid_ngrid, ny, nx), np.float32 + ) + for j in range(centroid_ngrid): + dy = self.centroid_dxy[j] + for i in range(centroid_ngrid): + dx = self.centroid_dxy[i] + for cam in self.sky_names: + spots = self.spots[cam] + self.offsetSpots[cam][:, j, i] = desietc.util.shifted_profile( + spots, [dx] * len(spots), [dy] * len(spots) + ) # Initialize background fitting. self.bgfitter = BGFitter() From 58578470af7e079fdd192338bcccc84b74289dc6 Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Sun, 12 Jan 2025 08:56:58 -0800 Subject: [PATCH 04/29] save best fit (dx,dy) in setraw() --- desietc/sky.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/desietc/sky.py b/desietc/sky.py index 8006d0f..892e22d 100644 --- a/desietc/sky.py +++ b/desietc/sky.py @@ -355,6 +355,11 @@ def setraw( self.flux[:N], self.bgfit[:N], cov, _ = desietc.util.fit_spots( self.data[:N], self.ivar[:N], shifted_profiles ) + self.fit_dx = dx + self.fit_dy = dy + else: + self.fit_dx = 0 + self.fit_dy = 0 # Give up if we have invalid fluxes or errors. if not np.all((self.fluxerr[:N] > 0) & np.isfinite(self.flux[:N])): if return_offsets: From 8c13ddd6a87ee2f868f623cd99e846d839e2c443 Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Sun, 12 Jan 2025 08:59:30 -0800 Subject: [PATCH 05/29] fix typo --- desietc/sky.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/desietc/sky.py b/desietc/sky.py index 892e22d..99a261f 100644 --- a/desietc/sky.py +++ b/desietc/sky.py @@ -355,8 +355,8 @@ def setraw( self.flux[:N], self.bgfit[:N], cov, _ = desietc.util.fit_spots( self.data[:N], self.ivar[:N], shifted_profiles ) - self.fit_dx = dx - self.fit_dy = dy + self.fit_dx = dx[0] + self.fit_dy = dy[0] else: self.fit_dx = 0 self.fit_dy = 0 From ac69e5954ed30c1a07f3ac2b0d607e6ca0e8cff7 Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Thu, 16 Jan 2025 06:03:13 -0800 Subject: [PATCH 06/29] save individual fitted spot centroids --- desietc/sky.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/desietc/sky.py b/desietc/sky.py index 99a261f..17a0bc2 100644 --- a/desietc/sky.py +++ b/desietc/sky.py @@ -339,14 +339,14 @@ def setraw( # assert np.all(cov[:, 1, 1] > 0) self.bgerr[:N] = np.sqrt(cov[:, 1, 1]) if fit_centroids: + # Save individual fitted spot offsets. + self.spot_offsets = np.array(spot_offsets) # Compute the average spot profile position offset if masked: - dx = np.ones(N) * np.mean( - spot_offsets[fiber_mask[icamera, :N] > 0][:, 0] - ) - dy = np.ones(N) * np.mean( - spot_offsets[fiber_mask[icamera, :N] > 0][:, 1] - ) + mask = fiber_mask[icamera, :N] > 0 + dx = np.ones(N) * np.mean(spot_offsets[mask][:, 0]) + dy = np.ones(N) * np.mean(spot_offsets[mask][:, 1]) + self.spot_offsets[~mask] = np.nan else: dx = np.ones(N) * np.mean(spot_offsets[:, 0]) dy = np.ones(N) * np.mean(spot_offsets[:, 1]) From e13406fac4caa4ddbf144e39616faaf0277efc4a Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Thu, 30 Jan 2025 12:44:25 -0800 Subject: [PATCH 07/29] implement fast centroiding fit --- desietc/util.py | 104 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 104 insertions(+) diff --git a/desietc/util.py b/desietc/util.py index 255eed6..24a3fe7 100644 --- a/desietc/util.py +++ b/desietc/util.py @@ -139,6 +139,110 @@ def get_chi2(data, ivar, profile, flux, background, area=1): return np.sum(ivar * (data - profile * flux - area * background) ** 2) +def fit_spots_flux_and_pos_fast( + data, ivar, offset_spots, offset_dxy, area=1, nfine=26, return_grids=False +): + """Fit spot images to simultaneously determine the flux, background level, and centroid offsets. + + Similar to fit_spots_flux_and_pos() but runs much faster using precomputed offset spot images. + The fit is performed in two stages: first the flux, background and resulting chi-square are + calculated on a grid of centroid offsets. These calculations are fast since the flux and background + can be calculated using linear algebra. The minimum chi-square over the offset grid then provides an + initial fit over (flux, background, dx, dy) on a fixed coarse grid of (dx,dy). + + If nfine > 0, the second stage uses a 2D bicubic spline over the 5x5 subgrid of centroid offsets + centered on the minimum chi-square from the first stage. This provides a refined estimate of (dx,dy) + where the chi-square is minimized. The flux and background are then interpolated to this refined + grid point on the same 5x5 subgrid. This second stage is not run if the initial minimum is too close + to the edge of the offset grid, preventing the 5x5 subgrid from being defined. + + Parameters + ---------- + data : array + 2D array of spot images with shape (nspots, ny, nx). + ivar : array + 2D array of inverse variance of data with shape (nspots, ny, nx). + offset_spots : array + Array of offset spot images with shape (nspots, ngrid, ngrid, ny, nx). + offset_dxy : array + Array of centroid offsets in units of pixels with shape (ngrid,). + offset_spots[n,i,j] is the 2D spot image with shape (ny, nx) at centroid offset + (offset_dxy[i], offset_dxy[j]). + area : scalar or array + Area of each pixel used to predict its background level as b * area. + nfine : int + Number of points to use along dx and dy in the refined 2D bicubic spline fit. + Set to zero to prevent this second stage, resulting in faster but less + accurate results. + return_grids : bool + If True, return the chi-square grid and the refined chi-square grid, which + are useful for displaying diagnostic plots. + + Returns + ------- + tuple + Tuple of (fit_flux, fit_bg, fit_cov, fit_offsets) where fit_flux and fit_bg are the + fitted flux and background for each spot with shapes (nspot,), fit_cov is the + covariance matrix of (fit_flux, fit_bg) for each spot with shape (nspots, 2, 2), + and fit_offsets is the fitted centroid offsets with shape (nspots, 2). + If return_grids is True, the tuple also includes (chisq, finegrid, finedx, finedy) + where chisq is the initial chi-square grid, finegrid is the refined chi-square grid, + and finedx and finedy are the dx and dy offsets used in the refined grid. + """ + # Tabulate chi-square over grid of centroid offsets (applied simultaneously to all spots). + ndata = len(data) + ngrid = len(offset_dxy) + chisq = np.zeros((ngrid, ngrid)) + flux = np.zeros((ngrid, ngrid, ndata)) + bg = np.zeros((ngrid, ngrid, ndata)) + cov = np.zeros((ngrid, ngrid, ndata, 2, 2)) + for j in range(ngrid): + for i in range(ngrid): + flux[j, i], bg[j, i], cov[j, i], _ = fit_spots( + data, ivar, offset_spots[:, j, i], area=area + ) + model = ( + bg[j, i].reshape(-1, 1, 1) + + flux[j, i].reshape(-1, 1, 1) * offset_spots[:, j, i] + ) + chisq[j, i] = np.sum((data - model) ** 2 * ivar) + # Find grid point with minimum chi-square. + j, i = np.unravel_index(np.argmin(chisq), chisq.shape) + fit_dx = offset_dxy[i] + fit_dy = offset_dxy[j] + fit_flux = flux[j, i] + fit_bg = bg[j, i] + fit_cov = cov[j, i] + if nfine > 0 and j >= 2 and i >= 2 and j < ngrid - 2 and i < ngrid - 2: + # Use a 2D bicubic spline of a 5x5 subgrid to refine the location of the minimum chisq. + subgrid = chisq[j - 2 : j + 3, i - 2 : i + 3] + subdx = offset_dxy[i - 2 : i + 3] + subdy = offset_dxy[j - 2 : j + 3] + finedx = np.linspace(subdx[0], subdx[-1], nfine) + finedy = np.linspace(subdy[0], subdy[-1], nfine) + pts = np.stack(np.meshgrid(finedy, finedx, indexing="ij"), axis=-1) + spline = scipy.interpolate.RegularGridInterpolator( + (subdy, subdx), subgrid, method="cubic" + ) + finegrid = spline(pts) + j2, i2 = np.unravel_index(np.argmin(finegrid), finegrid.shape) + fit_dx = finedx[i2] + fit_dy = finedy[j2] + # Evalute the flux and background at the refined grid point. + fit_flux = scipy.interpolate.RegularGridInterpolator( + (subdy, subdx), flux[j - 2 : j + 3, i - 2 : i + 3], method="cubic" + )((fit_dy, fit_dx)) + fit_bg = scipy.interpolate.RegularGridInterpolator( + (subdy, subdx), bg[j - 2 : j + 3, i - 2 : i + 3], method="cubic" + )((fit_dy, fit_dx)) + + fit_offsets = np.stack((fit_dx, fit_dy), axis=-1) + retval = (fit_flux, fit_bg, fit_cov, fit_offsets) + if return_grids: + retval += (chisq, finegrid, finedx, finedy) + return retval + + def fit_spots_flux_and_pos(data, ivar, profile, area=1): """Fit images of a spot to estimate the spot flux and background level as well as the position offset from the reference profile. From 37610f57b4858d0f03cf8206330d6c22de957a4e Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Thu, 30 Jan 2025 13:27:35 -0800 Subject: [PATCH 08/29] implement fast_centroids option in SKY.set_raw() --- desietc/sky.py | 67 +++++++++++++++++++++++++++++++++----------------- 1 file changed, 45 insertions(+), 22 deletions(-) diff --git a/desietc/sky.py b/desietc/sky.py index 17a0bc2..3fd4568 100644 --- a/desietc/sky.py +++ b/desietc/sky.py @@ -239,6 +239,7 @@ def setraw( Temp_correc_coef=np.array([[0.905, 0.007], [0.941, 0.003]]), return_offsets=False, fit_centroids=False, + fast_centroids=False, ): """Fit images of a spot to estimate the spot flux and background level as well as the position offset from the reference profile. @@ -257,7 +258,7 @@ def setraw( Wether are not we apply a refit procedure pullcut : scalar Threshold value to mask pixel with extreme pulls in the refit procedure. - chis_max : scalar + chisq_max : scalar Threshold chi square value used for the weighted average flux computation. ndrop_max : scalar Maximum number of sky monitoring fibers that can be dropped for the weighted average flux computation. @@ -273,10 +274,12 @@ def setraw( If True, return the spot position offsets. Offsets will be None if fit_centroids is False. fit_centroids : bool If True, fit the spot centroids. + fast_centroids : bool + If True, use the fast centroiding method. Returns ------- tuple - Tuple (meanflux, ivar ** -0.5, spot_offsets) where menflux and ivar**-0.5 are scalars, + Tuple (meanflux, ivar ** -0.5, spot_offsets) where meanflux and ivar**-0.5 are scalars, and spot_offsets is an array of shape (...,nf,2) where nf is the number of sky monitoring fibers (10 for SKYCAM0 and 7 for SKYCAM1) with elements [...,i,0] = i-th spot position_offset(x direction) and [...,i,1] = i-th spot position_offset(y direction) """ if name not in self.slices: @@ -301,18 +304,41 @@ def setraw( self.ivar[k][dead | saturated] = 0 # Mask known hot pixels. self.ivar[k][self.masks[name][k]] = 0 + # Which spots are we using? + mask = np.ones(N, bool) + if masked: + mask = fiber_mask[icamera, :N] > 0 # Fit for the spot flux and background level and (optionally) the spot centroids. - fitter = ( - desietc.util.fit_spots_flux_and_pos - if fit_centroids - else desietc.util.fit_spots - ) - self.flux[:N], self.bgfit[:N], cov, spot_offsets = fitter( - self.data[:N], self.ivar[:N], self.spots[name] - ) + cov = np.zeros((N, 2, 2)) + if fit_centroids: + if fast_centroids: + self.flux[:N] = self.bgfit[:N] = 0 + self.flux[:N][mask], self.bgfit[:N][mask], cov[mask], fit_offsets = ( + desietc.util.fit_spots_flux_and_pos_fast( + self.data[:N][mask], + self.ivar[:N][mask], + self.offsetSpots[name][:N][mask], + self.centroid_dxy, + ) + ) + self.fit_dx, self.fit_dy = fit_offsets.T + else: + self.flux[:N], self.bgfit[:N], cov, spot_offsets = ( + desietc.util.fit_spots_flux_and_pos( + self.data[:N], self.ivar[:N], self.spots[name] + ) + ) + self.spot_offsets = np.array(spot_offsets) + else: + self.flux[:N], self.bgfit[:N], cov, _ = desietc.util.fit_spots( + self.data[:N], self.ivar[:N], self.spots[name] + ) + self.fit_dx, self.fit_dy = 0, 0 + # Calculate errors on the flux and background level. self.fluxerr[:N] = np.sqrt(cov[:, 0, 0]) self.bgerr[:N] = np.sqrt(cov[:, 1, 1]) if refit: + assert not fit_centroids, "Cannot refit with centroid fitting enabled." # Save the original masking due to dead / hot / saturated pixels. self.valid[:N] = 1.0 * (self.ivar[:N] > 0) # Use the initial fit for an improved estimate of the signal variance. @@ -331,7 +357,7 @@ def setraw( # Apply the original + new masking. self.ivar[:N] *= self.valid[:N] # Refit - self.flux[:N], self.bgfit[:N], cov, spot_offsets = fitter( + self.flux[:N], self.bgfit[:N], cov, _ = desietc.util.fit_spots( self.data[:N], self.ivar[:N], self.spots[name] ) # assert np.all(cov[:, 0, 0] > 0) @@ -339,17 +365,14 @@ def setraw( # assert np.all(cov[:, 1, 1] > 0) self.bgerr[:N] = np.sqrt(cov[:, 1, 1]) if fit_centroids: - # Save individual fitted spot offsets. - self.spot_offsets = np.array(spot_offsets) # Compute the average spot profile position offset if masked: - mask = fiber_mask[icamera, :N] > 0 - dx = np.ones(N) * np.mean(spot_offsets[mask][:, 0]) - dy = np.ones(N) * np.mean(spot_offsets[mask][:, 1]) + dx = np.ones(N) * np.mean(self.spot_offsets[mask][:, 0]) + dy = np.ones(N) * np.mean(self.spot_offsets[mask][:, 1]) self.spot_offsets[~mask] = np.nan else: - dx = np.ones(N) * np.mean(spot_offsets[:, 0]) - dy = np.ones(N) * np.mean(spot_offsets[:, 1]) + dx = np.ones(N) * np.mean(self.spot_offsets[:, 0]) + dy = np.ones(N) * np.mean(self.spot_offsets[:, 1]) shifted_profiles = desietc.util.shifted_profile(self.spots[name], dx, dy) # Doing a final fit using the mean profile position offset over the different spot self.flux[:N], self.bgfit[:N], cov, _ = desietc.util.fit_spots( @@ -357,9 +380,6 @@ def setraw( ) self.fit_dx = dx[0] self.fit_dy = dy[0] - else: - self.fit_dx = 0 - self.fit_dy = 0 # Give up if we have invalid fluxes or errors. if not np.all((self.fluxerr[:N] > 0) & np.isfinite(self.flux[:N])): if return_offsets: @@ -367,11 +387,13 @@ def setraw( else: return None, None # Calculate the best-fit model for each fiber. + # NOTE: this does not account for the centroid fit! self.model[:N] = ( self.bgfit[:N].reshape(-1, 1, 1) + self.flux[:N].reshape(-1, 1, 1) * self.spots[name] ) # Calculate the corresponding pull images. + # NOTE: this does not account for the centroid fit! self.pull[:N] = (self.data[:N] - self.model[:N]) * np.sqrt(self.ivar[:N]) # Apply per-fiber calibrations. calib = self.calibs[name] @@ -421,7 +443,8 @@ def setraw( ) ** 2 * ivar except: pass + # TODO: remove return_offsets option and make sure callers use self.fit_dx, self.fit_dy instead if return_offsets: - return meanflux, ivar**-0.5, spot_offsets + return meanflux, ivar**-0.5, self.spot_offsets else: return meanflux, ivar**-0.5 From a297a422b25a8cccfc2c5b10bbeaf30c1dee15e2 Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Tue, 4 Feb 2025 13:54:23 -0800 Subject: [PATCH 09/29] wip --- desietc/sky.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/desietc/sky.py b/desietc/sky.py index 3fd4568..fd1cb79 100644 --- a/desietc/sky.py +++ b/desietc/sky.py @@ -364,7 +364,7 @@ def setraw( self.fluxerr[:N] = np.sqrt(cov[:, 0, 0]) # assert np.all(cov[:, 1, 1] > 0) self.bgerr[:N] = np.sqrt(cov[:, 1, 1]) - if fit_centroids: + if fit_centroids and not fast_centroids: # Compute the average spot profile position offset if masked: dx = np.ones(N) * np.mean(self.spot_offsets[mask][:, 0]) From 4e22ec504c8c973a7a857b7c73c47e20f95d4119 Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Tue, 4 Feb 2025 13:56:28 -0800 Subject: [PATCH 10/29] wip --- desietc/sky.py | 1 + 1 file changed, 1 insertion(+) diff --git a/desietc/sky.py b/desietc/sky.py index fd1cb79..9e3e28f 100644 --- a/desietc/sky.py +++ b/desietc/sky.py @@ -308,6 +308,7 @@ def setraw( mask = np.ones(N, bool) if masked: mask = fiber_mask[icamera, :N] > 0 + self.fiber_mask = mask # Fit for the spot flux and background level and (optionally) the spot centroids. cov = np.zeros((N, 2, 2)) if fit_centroids: From 4088157f4ffc67e89d530378e58b66b5f7906924 Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Tue, 4 Feb 2025 16:19:37 -0800 Subject: [PATCH 11/29] use err=inf for masked fibers instead of err=0 --- desietc/sky.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/desietc/sky.py b/desietc/sky.py index 9e3e28f..ca7b672 100644 --- a/desietc/sky.py +++ b/desietc/sky.py @@ -310,7 +310,7 @@ def setraw( mask = fiber_mask[icamera, :N] > 0 self.fiber_mask = mask # Fit for the spot flux and background level and (optionally) the spot centroids. - cov = np.zeros((N, 2, 2)) + cov = np.full((N, 2, 2), np.inf) if fit_centroids: if fast_centroids: self.flux[:N] = self.bgfit[:N] = 0 From f9c40bfc9a68010392505844572cc0717edc08d1 Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Tue, 4 Feb 2025 16:25:06 -0800 Subject: [PATCH 12/29] wip --- desietc/sky.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/desietc/sky.py b/desietc/sky.py index ca7b672..2a065e4 100644 --- a/desietc/sky.py +++ b/desietc/sky.py @@ -404,7 +404,9 @@ def setraw( coef = finetune_coef[icamera, :N] pow = finetune_pow[icamera, :N] cflux = coef * np.maximum(0, cflux) ** pow - cfluxerr = coef * np.maximum(0, cfluxerr) ** pow + # fluxerr is np.inf for masked fibers, so avoid a warning + ok = np.isfinite(cfluxerr) + cfluxerr[ok] = coef[ok] * np.maximum(0, cfluxerr[ok]) ** pow[ok] # Which fibers should be used? if masked: keep = (fiber_mask[icamera, :N] > 0) & (cfluxerr > 0) From 213a5db71f44645418de8ec36b252cb2eec346f5 Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Wed, 5 Feb 2025 07:30:35 -0800 Subject: [PATCH 13/29] save results before temp correction --- desietc/sky.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/desietc/sky.py b/desietc/sky.py index 2a065e4..3ee3e42 100644 --- a/desietc/sky.py +++ b/desietc/sky.py @@ -434,6 +434,9 @@ def setraw( # Apply the temperature correction if measured if Temperature is not None: + # Save the values before applying the temperature correction + self.flux_notemp = meanflux + self.fluxerr_notemp = ivar**-0.5 try: if (Temperature > -10) and (Temperature < 35): meanflux = meanflux / ( From 41b6299145add27b0e8249642ee5dcdcb3a0f1a5 Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Sat, 8 Feb 2025 07:58:21 -0800 Subject: [PATCH 14/29] Save fast-centroid fit grids by default --- desietc/sky.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/desietc/sky.py b/desietc/sky.py index 3ee3e42..2610a65 100644 --- a/desietc/sky.py +++ b/desietc/sky.py @@ -314,13 +314,21 @@ def setraw( if fit_centroids: if fast_centroids: self.flux[:N] = self.bgfit[:N] = 0 - self.flux[:N][mask], self.bgfit[:N][mask], cov[mask], fit_offsets = ( - desietc.util.fit_spots_flux_and_pos_fast( - self.data[:N][mask], - self.ivar[:N][mask], - self.offsetSpots[name][:N][mask], - self.centroid_dxy, - ) + ( + self.flux[:N][mask], + self.bgfit[:N][mask], + cov[mask], + fit_offsets, + self.grid_chisq, + self.grid_fine_chisq, + self.grid_finedx, + self.grid_finedy, + ) = desietc.util.fit_spots_flux_and_pos_fast( + self.data[:N][mask], + self.ivar[:N][mask], + self.offsetSpots[name][:N][mask], + self.centroid_dxy, + return_grids=True, ) self.fit_dx, self.fit_dy = fit_offsets.T else: From b3c797ebd747adbfa8f71ff298788e5d003cc164 Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Sat, 8 Feb 2025 07:58:39 -0800 Subject: [PATCH 15/29] Add fn to plot fast vs slow centroid fits --- desietc/plot.py | 389 +++++++++++++++++++++++++++++++++++++----------- 1 file changed, 302 insertions(+), 87 deletions(-) diff --git a/desietc/plot.py b/desietc/plot.py index 661ea34..e23934e 100644 --- a/desietc/plot.py +++ b/desietc/plot.py @@ -2,8 +2,9 @@ Requires that matplotlib is installed. """ + import datetime -import copy # for shallow copies of matplotlib colormaps +import copy # for shallow copies of matplotlib colormaps try: import DOSlib.logger as logging @@ -21,10 +22,9 @@ import desietc.util -def plot_colorhist(D, ax, imshow, mode='reverse', color='w', alpha=0.75): - """Draw a hybrid colorbar and histogram. - """ - ax.axis('off') +def plot_colorhist(D, ax, imshow, mode="reverse", color="w", alpha=0.75): + """Draw a hybrid colorbar and histogram.""" + ax.axis("off") # Extract parameters of the original imshow. cmap = imshow.get_cmap() vmin, vmax = imshow.get_clim() @@ -40,27 +40,38 @@ def plot_colorhist(D, ax, imshow, mode='reverse', color='w', alpha=0.75): counts, _ = np.histogram(D.reshape(-1), bins=np.linspace(vmin, vmax, width + 1)) hist_height = ((height - 1) * counts / counts[1:-1].max()).astype(int) mask = np.arange(height).reshape(-1, 1) < hist_height - if mode == 'color': - img[mask] = (1 - alpha) * img[mask] + alpha * np.asarray(matplotlib.colors.to_rgb(color)) - elif mode == 'reverse': + if mode == "color": + img[mask] = (1 - alpha) * img[mask] + alpha * np.asarray( + matplotlib.colors.to_rgb(color) + ) + elif mode == "reverse": cmap_r = cmap.reversed() for i, x in enumerate(xgrad): img[mask[:, i], i] = cmap_r(x)[:-1] - elif mode == 'complement': + elif mode == "complement": # https://stackoverflow.com/questions/40233986/ # python-is-there-a-function-or-formula-to-find-the-complementary-colour-of-a-rgb hilo = np.amin(img, axis=2, keepdims=True) + np.amax(img, axis=2, keepdims=True) img[mask] = hilo[mask] - img[mask] else: raise ValueError('Invalid mode "{0}".'.format(mode)) - ax.imshow(img, interpolation='none', origin='lower') + ax.imshow(img, interpolation="none", origin="lower") -def plot_pixels(D, label=None, colorhist=False, zoom=1, masked_color='cyan', - imshow_args={}, text_args={}, colorhist_args={}): - """Plot pixel data at 1:1 scale with an optional label and colorhist. - """ - dpi = 100 # value only affects metadata in an output file, not appearance on screen. +def plot_pixels( + D, + label=None, + colorhist=False, + zoom=1, + masked_color="cyan", + imshow_args={}, + text_args={}, + colorhist_args={}, +): + """Plot pixel data at 1:1 scale with an optional label and colorhist.""" + dpi = ( + 100 # value only affects metadata in an output file, not appearance on screen. + ) ny, nx = D.shape width, height = zoom * nx, zoom * ny if colorhist: @@ -69,24 +80,27 @@ def plot_pixels(D, label=None, colorhist=False, zoom=1, masked_color='cyan', fig = plt.figure(figsize=(width / dpi, height / dpi), dpi=dpi, frameon=False) ax = plt.axes((0, 0, 1, zoom * ny / height)) args = dict(imshow_args) - for name, default in dict(interpolation='none', origin='lower', cmap='plasma_r').items(): + for name, default in dict( + interpolation="none", origin="lower", cmap="plasma_r" + ).items(): if name not in args: args[name] = default # Set the masked color in the specified colormap. - cmap = copy.copy(matplotlib.cm.get_cmap(args['cmap'])) + cmap = copy.copy(matplotlib.cm.get_cmap(args["cmap"])) cmap.set_bad(color=masked_color) - args['cmap'] = cmap + args["cmap"] = cmap # Draw the image. I = ax.imshow(D, **args) - ax.axis('off') + ax.axis("off") if label: args = dict(text_args) - for name, default in dict(color='w', fontsize=18).items(): + for name, default in dict(color="w", fontsize=18).items(): if name not in args: args[name] = default outline = [ - matplotlib.patheffects.Stroke(linewidth=1, foreground='k'), - matplotlib.patheffects.Normal()] + matplotlib.patheffects.Stroke(linewidth=1, foreground="k"), + matplotlib.patheffects.Normal(), + ] text = ax.text(0.01, 0.01 * nx / ny, label, transform=ax.transAxes, **args) text.set_path_effects(outline) if colorhist: @@ -95,10 +109,20 @@ def plot_pixels(D, label=None, colorhist=False, zoom=1, masked_color='cyan', return fig, ax -def plot_data(D, W, downsampling=4, zoom=1, label=None, colorhist=False, stamps=[], - preprocess_args={}, imshow_args={}, text_args={}, colorhist_args={}): - """Plot weighted image data using downsampling, optional preprocessing, and decorators. - """ +def plot_data( + D, + W, + downsampling=4, + zoom=1, + label=None, + colorhist=False, + stamps=[], + preprocess_args={}, + imshow_args={}, + text_args={}, + colorhist_args={}, +): + """Plot weighted image data using downsampling, optional preprocessing, and decorators.""" # Downsample the input data. D, W = desietc.util.downsample_weighted(D, W, downsampling) # Preprocess the data for display. @@ -106,35 +130,64 @@ def plot_data(D, W, downsampling=4, zoom=1, label=None, colorhist=False, stamps= ny, nx = D.shape # Display the image. args = dict(imshow_args) - if 'extent' not in args: + if "extent" not in args: # Use the input pixel space for the extent, without downsampling. - args['extent'] = [-0.5, nx * downsampling - 0.5, -0.5, ny * downsampling - 0.5] - fig, ax = plot_pixels(D, zoom=zoom, label=label, colorhist=colorhist, - imshow_args=args, text_args=text_args, colorhist_args=colorhist_args) + args["extent"] = [-0.5, nx * downsampling - 0.5, -0.5, ny * downsampling - 0.5] + fig, ax = plot_pixels( + D, + zoom=zoom, + label=label, + colorhist=colorhist, + imshow_args=args, + text_args=text_args, + colorhist_args=colorhist_args, + ) outline = [ - matplotlib.patheffects.Stroke(linewidth=1, foreground='k'), - matplotlib.patheffects.Normal()] + matplotlib.patheffects.Stroke(linewidth=1, foreground="k"), + matplotlib.patheffects.Normal(), + ] for k, stamp in enumerate(stamps): yslice, xslice = stamp[:2] xlo, xhi = xslice.start, xslice.stop ylo, yhi = yslice.start, yslice.stop - rect = plt.Rectangle((xlo, ylo), xhi - xlo, yhi - ylo, fc='none', ec='w', lw=1) + rect = plt.Rectangle((xlo, ylo), xhi - xlo, yhi - ylo, fc="none", ec="w", lw=1) ax.add_artist(rect) if xhi < nx // 2: - xtext, halign = xhi, 'left' + xtext, halign = xhi, "left" else: - xtext, halign = xlo, 'right' + xtext, halign = xlo, "right" text = ax.text( - xtext, 0.5 * (ylo + yhi), str(k), fontsize=12, color='w', va='center', ha=halign) + xtext, + 0.5 * (ylo + yhi), + str(k), + fontsize=12, + color="w", + va="center", + ha=halign, + ) text.set_path_effects(outline) return fig, ax def save_acquisition_summary( - mjd, exptag, psf_model, psf_stack, fwhm, ffrac, nstars, badfit, noisy, path, - show_north=True, show_fiber=True, zoom=5, dpi=128, cmap='magma', masked_color='gray'): - """ - """ + mjd, + exptag, + psf_model, + psf_stack, + fwhm, + ffrac, + nstars, + badfit, + noisy, + path, + show_north=True, + show_fiber=True, + zoom=5, + dpi=128, + cmap="magma", + masked_color="gray", +): + """ """ # Get the size of the PSF model and stack images. shapes = [D.shape for D in psf_stack.values() if D is not None] if len(shapes) == 0: @@ -146,20 +199,25 @@ def save_acquisition_summary( # Initialize the figure. width = size * zoom * ngfa height = size * zoom * 2 - fig, axes = plt.subplots(2, ngfa, figsize=(width / dpi, height / dpi), dpi=dpi, frameon=False) + fig, axes = plt.subplots( + 2, ngfa, figsize=(width / dpi, height / dpi), dpi=dpi, frameon=False + ) plt.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0, hspace=0) # Prepare a colormap with our custom ivar=0 color. cmap = copy.copy(matplotlib.cm.get_cmap(cmap)) cmap.set_bad(color=masked_color) # Get the colormap scale to use for all images. model_sum = {name: psf_model[name].sum() for name in psf_model} - model_max = np.median([psf_model[camera].max() / model_sum[camera] for camera in psf_model]) + model_max = np.median( + [psf_model[camera].max() / model_sum[camera] for camera in psf_model] + ) vmin, vmax = -0.1 * model_max, 1.0 * model_max # Calculate the image extent. # Outline text to ensure that it is visible whatever pixels are below. outline = [ - matplotlib.patheffects.Stroke(linewidth=1, foreground='k'), - matplotlib.patheffects.Normal()] + matplotlib.patheffects.Stroke(linewidth=1, foreground="k"), + matplotlib.patheffects.Normal(), + ] # Calculate the fiber diameter to overlay in GFA pixels. fiber_diam_um = 107 pixel_size_um = 15 @@ -168,16 +226,27 @@ def save_acquisition_summary( # Loop over cameras. default_norm = np.median([s for s in model_sum.values()]) for i, name in enumerate(names): - for ax in axes[:,i]: - ax.axis('off') - ax.add_artist(plt.Rectangle([0,0], 1, 1, fc='k', ec='none', transform=ax.transAxes, zorder=-1)) + for ax in axes[:, i]: + ax.axis("off") + ax.add_artist( + plt.Rectangle( + [0, 0], 1, 1, fc="k", ec="none", transform=ax.transAxes, zorder=-1 + ) + ) if name in psf_stack and psf_stack[name] is not None: data = psf_stack[name][0].copy() norm = model_sum.get(name, default_norm) data /= norm # do not show ivar=0 pixels data[psf_stack[name][1] == 0] = np.nan - axes[0, i].imshow(data, vmin=vmin, vmax=vmax, cmap=cmap, interpolation='none', origin='lower') + axes[0, i].imshow( + data, + vmin=vmin, + vmax=vmax, + cmap=cmap, + interpolation="none", + origin="lower", + ) if show_north: # Draw an arrow point north in this GFA's pixel basis. n = int(name[5]) @@ -187,50 +256,107 @@ def save_acquisition_summary( dv = 0.02 * size * np.sin(angle) xpt = np.array([-4 * du, dv, du, -dv, -4 * du]) ypt = np.array([4 * dv, du, -dv, -du, 4 * dv]) - axes[0, i].add_line(matplotlib.lines.Line2D(xpt + xc, ypt + yc, c='c', lw=1, ls='-')) + axes[0, i].add_line( + matplotlib.lines.Line2D(xpt + xc, ypt + yc, c="c", lw=1, ls="-") + ) if name in psf_model and psf_model[name] is not None: data = psf_model[name] data /= model_sum[name] - axes[1, i].imshow(psf_model[name], vmin=vmin, vmax=vmax, cmap=cmap, - interpolation='bicubic', origin='lower') + axes[1, i].imshow( + psf_model[name], + vmin=vmin, + vmax=vmax, + cmap=cmap, + interpolation="bicubic", + origin="lower", + ) if show_fiber: # Draw an outline of the fiber. - fiber = matplotlib.patches.Circle(center, radius, color='c', ls='-', lw=1, alpha=0.7, fill=False) - axes[1,i].add_artist(fiber) + fiber = matplotlib.patches.Circle( + center, radius, color="c", ls="-", lw=1, alpha=0.7, fill=False + ) + axes[1, i].add_artist(fiber) # Generate a text overlay. ax = plt.axes((0, 0, 1, 1)) - ax.axis('off') + ax.axis("off") if mjd is not None: - night = desietc.util.mjd_to_night(mjd) + night = desietc.util.mjd_to_night(mjd) localtime = desietc.util.mjd_to_date(mjd, utc_offset=-7) - center = localtime.strftime('%H:%M:%S') + ' (UTC-7)' + center = localtime.strftime("%H:%M:%S") + " (UTC-7)" else: - night = 'YYYYMMDD' - center = 'HH:MM:SS (UTC-7)' - left = f'{night}/{exptag}' + night = "YYYYMMDD" + center = "HH:MM:SS (UTC-7)" + left = f"{night}/{exptag}" right = f'FWHM={fwhm:.2f}" ({100*ffrac:.1f}%)' - for (x, ha, label) in zip((0, 0.5, 1), ('left', 'center', 'right'), (left, center, right)): - text = ax.text(x, 0, label, color='w', ha=ha, va='bottom', size=10, transform=ax.transAxes) + for x, ha, label in zip( + (0, 0.5, 1), ("left", "center", "right"), (left, center, right) + ): + text = ax.text( + x, 0, label, color="w", ha=ha, va="bottom", size=10, transform=ax.transAxes + ) text.set_path_effects(outline) # Add per-GFA labels. xtext = (np.arange(ngfa) + 0.5) / ngfa for x, name in zip(xtext, names): - text = ax.text(x, 0.5, name, color='w', ha='center', va='center', size=8, transform=ax.transAxes) + text = ax.text( + x, + 0.5, + name, + color="w", + ha="center", + va="center", + size=8, + transform=ax.transAxes, + ) text.set_path_effects(outline) nstar = nstars[name] - label = f'{nstar} star' - if nstar > 1: label += 's' - text = ax.text(x, 0.45, label, color='w', ha='center', va='center', size=7, transform=ax.transAxes) + label = f"{nstar} star" + if nstar > 1: + label += "s" + text = ax.text( + x, + 0.45, + label, + color="w", + ha="center", + va="center", + size=7, + transform=ax.transAxes, + ) text.set_path_effects(outline) - warn_args = dict(size=10, color='c', fontweight='bold') + warn_args = dict(size=10, color="c", fontweight="bold") if nstar == 0: - text = ax.text(x, 0.92, 'NO STARS?', ha='center', va='top', transform=ax.transAxes, **warn_args) + text = ax.text( + x, + 0.92, + "NO STARS?", + ha="center", + va="top", + transform=ax.transAxes, + **warn_args, + ) text.set_path_effects(outline) elif name in badfit: - text = ax.text(x, 0.92, 'BAD PSF?', ha='center', va='top', transform=ax.transAxes, **warn_args) + text = ax.text( + x, + 0.92, + "BAD PSF?", + ha="center", + va="top", + transform=ax.transAxes, + **warn_args, + ) text.set_path_effects(outline) if name in noisy: - text = ax.text(x, 1, 'NOISY?', ha='center', va='top', transform=ax.transAxes, **warn_args) + text = ax.text( + x, + 1, + "NOISY?", + ha="center", + va="top", + transform=ax.transAxes, + **warn_args, + ) text.set_path_effects(outline) # Save the image. plt.savefig(path) @@ -238,8 +364,7 @@ def save_acquisition_summary( def plot_measurements(buffer, mjd1, mjd2, ymin=0, resolution=1, label=None, ax=None): - """Plot measurements spanning (mjd1, mjd2) in the specified buffer. - """ + """Plot measurements spanning (mjd1, mjd2) in the specified buffer.""" ax = ax or plt.gca() # Convert from MJD to minutes after mjd1. minutes = lambda mjd: (mjd - mjd1) * 720 @@ -248,43 +373,133 @@ def plot_measurements(buffer, mjd1, mjd2, ymin=0, resolution=1, label=None, ax=N padded = buffer.inside(xlo, xhi) used = buffer.inside(mjd1 - buffer.padding, mjd2 + buffer.padding) extra = padded & ~used - for sel, color in ((extra, 'lightgray'), (used, 'b')): - x = 0.5 * (buffer.entries['mjd1'][sel] + buffer.entries['mjd2'][sel]) - dx = 0.5 * (buffer.entries['mjd2'][sel] - buffer.entries['mjd1'][sel]) - y = buffer.entries['value'][sel] - dy = buffer.entries['error'][sel] - ax.errorbar(minutes(x), y, xerr=dx * 720, yerr=dy, fmt='.', color=color, ms=2, lw=1) + for sel, color in ((extra, "lightgray"), (used, "b")): + x = 0.5 * (buffer.entries["mjd1"][sel] + buffer.entries["mjd2"][sel]) + dx = 0.5 * (buffer.entries["mjd2"][sel] - buffer.entries["mjd1"][sel]) + y = buffer.entries["value"][sel] + dy = buffer.entries["error"][sel] + ax.errorbar( + minutes(x), y, xerr=dx * 720, yerr=dy, fmt=".", color=color, ms=2, lw=1 + ) # Draw the linear interpolation through the selected points. ngrid = int(np.ceil((mjd2 - mjd1) / (resolution / buffer.SECS_PER_DAY))) x_grid = np.linspace(mjd1, mjd2, ngrid) y_grid = buffer.sample_grid(x_grid) - ax.fill_between(minutes(x_grid), ymin, y_grid, color='b', lw=0, alpha=0.2) + ax.fill_between(minutes(x_grid), ymin, y_grid, color="b", lw=0, alpha=0.2) # Highlight samples used for the trend. sel = buffer.inside(mjd2 - buffer.recent, mjd2) - x = 0.5 * (buffer.entries['mjd1'][sel] + buffer.entries['mjd2'][sel]) - y = buffer.entries['value'][sel] - ax.plot(minutes(x), y, 'r.', ms=4, zorder=10) + x = 0.5 * (buffer.entries["mjd1"][sel] + buffer.entries["mjd2"][sel]) + y = buffer.entries["value"][sel] + ax.plot(minutes(x), y, "r.", ms=4, zorder=10) # Extrapolate the trend. offset, slope = buffer.trend(mjd2) x = np.array([mjd2, xhi]) y = offset + slope * (x - mjd2) - ax.fill_between(minutes(x), ymin, y, color='r', lw=0, alpha=0.2) + ax.fill_between(minutes(x), ymin, y, color="r", lw=0, alpha=0.2) # Draw vertical lines to show the (mjd1, mjd2) interval. for xv in (mjd1, mjd2): - ax.axvline(minutes(xv), c='b', ls='--') + ax.axvline(minutes(xv), c="b", ls="--") ax.set_xlim(minutes(xlo), minutes(xhi)) ax.set_ylim(ymin, None) - ax.set_xlabel(f'Minutes relative to MJD {mjd1:.6f}') + ax.set_xlabel(f"Minutes relative to MJD {mjd1:.6f}") if label is not None: ax.set_ylabel(label) -def mjd_plot(mjd, *args, axis=None, utc_offset=-7, date_format='%H:%M', **kwargs): +def mjd_plot(mjd, *args, axis=None, utc_offset=-7, date_format="%H:%M", **kwargs): """Replacement for plt.plot or axis.plot where the x-axis value is an MJD interpreted as a local datetime. """ axis = axis or plt.gca() - plot_epoch = matplotlib.dates.date2num(datetime.datetime(1858, 11, 17) + datetime.timedelta(hours=utc_offset)) + plot_epoch = matplotlib.dates.date2num( + datetime.datetime(1858, 11, 17) + datetime.timedelta(hours=utc_offset) + ) axis.plot_date(mjd + plot_epoch, *args, **kwargs) axis.xaxis.set_major_formatter(matplotlib.dates.DateFormatter(date_format)) - return axis \ No newline at end of file + return axis + + +def plotSkyCentroidFit(data, name, plot_fine=True, slow=True, save=None): + """Make a plot comparing the slow and fast centroid fit methods for SkyCam data. + + See DESI-8945 for examples. + """ + N = len(SKY.slices[name]) + icamera = SKY.sky_names.index(name) + mask = desietc.sky.fiber_mask[icamera][:N] > 0 + + # Run the fast method + start = time.time() + SKY.setraw( + data, + name, + fit_centroids=True, + fast_centroids=True, + refit=False, + return_offsets=False, + ) + elapsed = time.time() - start + print( + f"FAST dx {SKY.fit_dx:.2f} dy {SKY.fit_dy:.2f} flux {np.round(SKY.flux[:N][mask],1)} " + + f"bg {np.round(SKY.bgfit[:N][mask],2)}\nfluxerr {np.round(SKY.fluxerr[:N][mask],3)} elapsed {1000*elapsed:.1f}ms" + ) + + # Plot the chi-square values tabulated on the fixed coarse grid of centroid offsets. + dx = SKY.centroid_dxy + dy = SKY.centroid_dxy + xpad = (dx[1] - dx[0]) / 2 + ypad = (dy[1] - dy[0]) / 2 + plt.imshow( + SKY.grid_chisq, + origin="lower", + interpolation="none", + extent=(dx[0] - xpad, dx[-1] + xpad, dy[0] - ypad, dy[-1] + ypad), + ) + if plot_fine: + # Superimpose the spline interpolation of the 5x5 grid centered on the coarse-grid min chisq. + fxpad = (SKY.grid_finedx[1] - SKY.grid_finedx[0]) / 2 + fypad = (SKY.grid_finedy[1] - SKY.grid_finedy[0]) / 2 + plt.imshow( + SKY.grid_fine_chisq, + origin="lower", + interpolation="none", + extent=( + SKY.grid_finedx[0] - fxpad, + SKY.grid_finedx[-1] + fxpad, + SKY.grid_finedy[0] - fypad, + SKY.grid_finedy[-1] + fypad, + ), + ) + + plt.xlim(dx[0] - xpad, dx[-1] + xpad) + plt.ylim(dy[0] - ypad, dy[-1] + ypad) + plt.plot(SKY.fit_dx, SKY.fit_dy, "rx", ms=10) + plt.xlabel("Centroid $\Delta x$ [pixels]") + plt.ylabel("Centroid $\Delta y$ [pixels]") + plt.text(0.02, 0.02, name, transform=plt.gca().transAxes, fontsize=16) + + if slow: + # Run the slow method + start = time.time() + SKY.setraw( + data, + name, + fit_centroids=True, + fast_centroids=False, + refit=False, + return_offsets=False, + ) + elapsed = time.time() - start + print( + f"SLOW dx {SKY.fit_dx:.2f} dy {SKY.fit_dy:.2f} flux {np.round(SKY.flux[:N][mask],1)} " + + f"bg {np.round(SKY.bgfit[:N][mask],2)}\nfluxerr {np.round(SKY.fluxerr[:N][mask],3)} elapsed {1000*elapsed:.1f}ms" + ) + + # Plot centroid-offsets calculated by the slow method for each fiber. + plt.plot(*SKY.spot_offsets.T, "w.", ms=2) + # Plot the global centroid offset calculated by the slow method as the mean over fibers. + plt.plot(SKY.fit_dx, SKY.fit_dy, "w+", ms=10) + + plt.tight_layout() + if save: + plt.savefig(save) From 9426cd13e72a0c83d08388f097e197b32cb2e045 Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Sat, 8 Feb 2025 08:03:05 -0800 Subject: [PATCH 16/29] fix typos --- desietc/plot.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/desietc/plot.py b/desietc/plot.py index e23934e..e7e3b4d 100644 --- a/desietc/plot.py +++ b/desietc/plot.py @@ -4,6 +4,7 @@ """ import datetime +import time import copy # for shallow copies of matplotlib colormaps try: @@ -419,7 +420,7 @@ def mjd_plot(mjd, *args, axis=None, utc_offset=-7, date_format="%H:%M", **kwargs return axis -def plotSkyCentroidFit(data, name, plot_fine=True, slow=True, save=None): +def plotSkyCentroidFit(data, name, SKY, plot_fine=True, slow=True, save=None): """Make a plot comparing the slow and fast centroid fit methods for SkyCam data. See DESI-8945 for examples. From 2155ff1f682345fae9044bcbca1da06a91d8f949 Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Sat, 8 Feb 2025 08:29:40 -0800 Subject: [PATCH 17/29] implement separate x,y grids for fast centroid fit --- desietc/plot.py | 4 ++-- desietc/sky.py | 27 ++++++++++++++++----------- desietc/util.py | 41 +++++++++++++++++++++++------------------ 3 files changed, 41 insertions(+), 31 deletions(-) diff --git a/desietc/plot.py b/desietc/plot.py index e7e3b4d..4540359 100644 --- a/desietc/plot.py +++ b/desietc/plot.py @@ -446,8 +446,8 @@ def plotSkyCentroidFit(data, name, SKY, plot_fine=True, slow=True, save=None): ) # Plot the chi-square values tabulated on the fixed coarse grid of centroid offsets. - dx = SKY.centroid_dxy - dy = SKY.centroid_dxy + dx = SKY.centroid_dx + dy = SKY.centroid_dy xpad = (dx[1] - dx[0]) / 2 ypad = (dy[1] - dy[0]) / 2 plt.imshow( diff --git a/desietc/sky.py b/desietc/sky.py index 2610a65..d7148a8 100644 --- a/desietc/sky.py +++ b/desietc/sky.py @@ -177,8 +177,8 @@ class SkyCamera(object): def __init__( self, calib_name="SKY_calib.fits", - centroid_grid_size_pixels=5, - centroid_ngrid=21, + centroid_grid_extent=(-3, 3, -4, 2), + centroid_ngrid=(13, 13), ): self.names, self.slices, self.masks, self.spots, self.calibs = load_calib_data( calib_name @@ -200,21 +200,25 @@ def __init__( self.model = np.empty((maxstamps, stampsize, stampsize), np.float32) self.pull = np.empty((maxstamps, stampsize, stampsize), np.float32) # Initialize the grid of offset spots for centroid fitting. - if centroid_ngrid > 0: - self.centroid_dxy = np.linspace( - -centroid_grid_size_pixels, centroid_grid_size_pixels, centroid_ngrid + if centroid_ngrid is not None: + self.centroid_dx = np.linspace( + centroid_grid_extent[0], centroid_grid_extent[1], centroid_ngrid[0] + ) + self.centroid_dy = np.linspace( + centroid_grid_extent[2], centroid_grid_extent[3], centroid_ngrid[1] ) ny, nx = self.spots[self.sky_names[0]][0].shape self.offsetSpots = {} for cam in self.sky_names: spots = self.spots[cam] self.offsetSpots[cam] = np.zeros( - (len(spots), centroid_ngrid, centroid_ngrid, ny, nx), np.float32 + (len(spots), centroid_ngrid[1], centroid_ngrid[0], ny, nx), + np.float32, ) - for j in range(centroid_ngrid): - dy = self.centroid_dxy[j] - for i in range(centroid_ngrid): - dx = self.centroid_dxy[i] + for j in range(centroid_ngrid[1]): + dy = self.centroid_dy[j] + for i in range(centroid_ngrid[0]): + dx = self.centroid_dx[i] for cam in self.sky_names: spots = self.spots[cam] self.offsetSpots[cam][:, j, i] = desietc.util.shifted_profile( @@ -327,7 +331,8 @@ def setraw( self.data[:N][mask], self.ivar[:N][mask], self.offsetSpots[name][:N][mask], - self.centroid_dxy, + self.centroid_dx, + self.centroid_dy, return_grids=True, ) self.fit_dx, self.fit_dy = fit_offsets.T diff --git a/desietc/util.py b/desietc/util.py index 24a3fe7..7f99db6 100644 --- a/desietc/util.py +++ b/desietc/util.py @@ -140,7 +140,7 @@ def get_chi2(data, ivar, profile, flux, background, area=1): def fit_spots_flux_and_pos_fast( - data, ivar, offset_spots, offset_dxy, area=1, nfine=26, return_grids=False + data, ivar, offset_spots, offset_dx, offset_dy, area=1, nfine=26, return_grids=False ): """Fit spot images to simultaneously determine the flux, background level, and centroid offsets. @@ -163,11 +163,15 @@ def fit_spots_flux_and_pos_fast( ivar : array 2D array of inverse variance of data with shape (nspots, ny, nx). offset_spots : array - Array of offset spot images with shape (nspots, ngrid, ngrid, ny, nx). - offset_dxy : array - Array of centroid offsets in units of pixels with shape (ngrid,). - offset_spots[n,i,j] is the 2D spot image with shape (ny, nx) at centroid offset - (offset_dxy[i], offset_dxy[j]). + Array of offset spot images with shape (nspots, ngridy, ngridx, ny, nx). + offset_dx : array + Array of centroid offsets in units of pixels with shape (ngridx,). + offset_spots[n,j,i] is the 2D spot image with shape (ny, nx) at centroid offset + x = offset_dx[i], y = offset_dy[j]. + offset_dy : array + Array of centroid offsets in units of pixels with shape (ngridy,). + offset_spots[n,j,i] is the 2D spot image with shape (ny, nx) at centroid offset + x = offset_dx[i], y = offset_dy[j]. area : scalar or array Area of each pixel used to predict its background level as b * area. nfine : int @@ -191,13 +195,14 @@ def fit_spots_flux_and_pos_fast( """ # Tabulate chi-square over grid of centroid offsets (applied simultaneously to all spots). ndata = len(data) - ngrid = len(offset_dxy) - chisq = np.zeros((ngrid, ngrid)) - flux = np.zeros((ngrid, ngrid, ndata)) - bg = np.zeros((ngrid, ngrid, ndata)) - cov = np.zeros((ngrid, ngrid, ndata, 2, 2)) - for j in range(ngrid): - for i in range(ngrid): + ngridx = len(offset_dx) + ngridy = len(offset_dy) + chisq = np.zeros((ngridy, ngridx)) + flux = np.zeros((ngridy, ngridx, ndata)) + bg = np.zeros((ngridy, ngridx, ndata)) + cov = np.zeros((ngridy, ngridx, ndata, 2, 2)) + for j in range(ngridy): + for i in range(ngridx): flux[j, i], bg[j, i], cov[j, i], _ = fit_spots( data, ivar, offset_spots[:, j, i], area=area ) @@ -208,16 +213,16 @@ def fit_spots_flux_and_pos_fast( chisq[j, i] = np.sum((data - model) ** 2 * ivar) # Find grid point with minimum chi-square. j, i = np.unravel_index(np.argmin(chisq), chisq.shape) - fit_dx = offset_dxy[i] - fit_dy = offset_dxy[j] + fit_dx = offset_dx[i] + fit_dy = offset_dy[j] fit_flux = flux[j, i] fit_bg = bg[j, i] fit_cov = cov[j, i] - if nfine > 0 and j >= 2 and i >= 2 and j < ngrid - 2 and i < ngrid - 2: + if nfine > 0 and j >= 2 and i >= 2 and j < ngridy - 2 and i < ngridx - 2: # Use a 2D bicubic spline of a 5x5 subgrid to refine the location of the minimum chisq. subgrid = chisq[j - 2 : j + 3, i - 2 : i + 3] - subdx = offset_dxy[i - 2 : i + 3] - subdy = offset_dxy[j - 2 : j + 3] + subdx = offset_dx[i - 2 : i + 3] + subdy = offset_dy[j - 2 : j + 3] finedx = np.linspace(subdx[0], subdx[-1], nfine) finedy = np.linspace(subdy[0], subdy[-1], nfine) pts = np.stack(np.meshgrid(finedy, finedx, indexing="ij"), axis=-1) From 30143e21d84ac2174875bd2e9c62e544792c2c50 Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Sat, 8 Feb 2025 08:39:13 -0800 Subject: [PATCH 18/29] fix typo --- desietc/sky.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/desietc/sky.py b/desietc/sky.py index d7148a8..88e651a 100644 --- a/desietc/sky.py +++ b/desietc/sky.py @@ -177,7 +177,7 @@ class SkyCamera(object): def __init__( self, calib_name="SKY_calib.fits", - centroid_grid_extent=(-3, 3, -4, 2), + centroid_grid_extent=(-3, 3, -2, 4), centroid_ngrid=(13, 13), ): self.names, self.slices, self.masks, self.spots, self.calibs = load_calib_data( From da60a9718e19765eb1d7f159b0a546cac1b278dc Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Sat, 8 Feb 2025 17:54:26 -0800 Subject: [PATCH 19/29] handle coarse fit on edge --- desietc/plot.py | 2 +- desietc/util.py | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/desietc/plot.py b/desietc/plot.py index 4540359..5116d58 100644 --- a/desietc/plot.py +++ b/desietc/plot.py @@ -456,7 +456,7 @@ def plotSkyCentroidFit(data, name, SKY, plot_fine=True, slow=True, save=None): interpolation="none", extent=(dx[0] - xpad, dx[-1] + xpad, dy[0] - ypad, dy[-1] + ypad), ) - if plot_fine: + if plot_fine and SKY.grid_fine_chisq is not None: # Superimpose the spline interpolation of the 5x5 grid centered on the coarse-grid min chisq. fxpad = (SKY.grid_finedx[1] - SKY.grid_finedx[0]) / 2 fypad = (SKY.grid_finedy[1] - SKY.grid_finedy[0]) / 2 diff --git a/desietc/util.py b/desietc/util.py index 7f99db6..ee3ae72 100644 --- a/desietc/util.py +++ b/desietc/util.py @@ -218,6 +218,9 @@ def fit_spots_flux_and_pos_fast( fit_flux = flux[j, i] fit_bg = bg[j, i] fit_cov = cov[j, i] + finegrid = None + finedx = None + finedy = None if nfine > 0 and j >= 2 and i >= 2 and j < ngridy - 2 and i < ngridx - 2: # Use a 2D bicubic spline of a 5x5 subgrid to refine the location of the minimum chisq. subgrid = chisq[j - 2 : j + 3, i - 2 : i + 3] From 4b8fe3bcc144cdc959c66286bb05ba6218040785 Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Sat, 8 Feb 2025 18:51:38 -0800 Subject: [PATCH 20/29] better handling of centroid fit at edge --- desietc/util.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/desietc/util.py b/desietc/util.py index ee3ae72..45ea34e 100644 --- a/desietc/util.py +++ b/desietc/util.py @@ -221,7 +221,13 @@ def fit_spots_flux_and_pos_fast( finegrid = None finedx = None finedy = None - if nfine > 0 and j >= 2 and i >= 2 and j < ngridy - 2 and i < ngridx - 2: + if nfine > 0: + if j < 2 or i < 2 or j >= ngridy - 2 or i >= ngridx - 2: + # The minimum is too close to the edge of the offset grid, so assume this is a bad fit + # and instead center on the middle of the grid. + i = ngridx // 2 + 1 + j = ngridy // 2 + 1 + fit_cov = cov[j, i] # Use a 2D bicubic spline of a 5x5 subgrid to refine the location of the minimum chisq. subgrid = chisq[j - 2 : j + 3, i - 2 : i + 3] subdx = offset_dx[i - 2 : i + 3] From 08804b407e8fb903e5ad57343809148115fc6db6 Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Sat, 8 Feb 2025 19:13:46 -0800 Subject: [PATCH 21/29] add support for reprocessing sky data --- desietc/sky.py | 178 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 178 insertions(+) diff --git a/desietc/sky.py b/desietc/sky.py index 88e651a..c13a908 100644 --- a/desietc/sky.py +++ b/desietc/sky.py @@ -2,6 +2,9 @@ """ import collections +import pathlib +import json +import warnings try: import DOSlib.logger as logging @@ -14,6 +17,8 @@ import scipy.optimize import scipy.stats +import pandas as pd + import fitsio import desietc.util @@ -467,3 +472,176 @@ def setraw( return meanflux, ivar**-0.5, self.spot_offsets else: return meanflux, ivar**-0.5 + + +def get_db_temp( + night, DB, table_name="environmentmonitor_dome", col_name="scr_e_wall_coude" +): + """Look up the temperature to use for correcting estimated sky fluxes.""" + N = str(night) + year, month, day = int(N[0:4]), int(N[4:6]), int(N[6:8]) + start = pd.Timestamp(f"{year}-{month}-{day}T19:00:00+0000") + stop = start + pd.Timedelta(1, "day") + when = f"time_recorded > timestamp '{start}'" + when += f" and time_recorded < timestamp '{stop}'" + sql = f"select time_recorded,{col_name} as temp from telemetry.{table_name} where {when} order by time_recorded" + temp = DB.query( + sql, + dates=[ + "time_recorded", + ], + maxrows=50000, + ) + return temp + + +def process_night( + night, + SKY, + DB, + DATA=pathlib.Path("/global/cfs/cdirs/desi/spectro/data/"), + verbose=False, +): + """Process a single night of SkyCam data.""" + + # Verify that we have DESI science data for this night + sky_exps = set( + [ + path.name[4:12] + for path in (DATA / str(night)).glob("????????/sky-????????.fits.fz") + ] + ) + desi_exps = set( + [ + path.name[5:13] + for path in (DATA / str(night)).glob("????????/desi-????????.fits.fz") + ] + ) + good_exps = sorted(sky_exps & desi_exps) + nexps = len(good_exps) + if nexps == 0: + print(f"No exposures found for {night}") + return + if verbose: + print( + f"Found {nexps} exposures in {good_exps[0]}-{good_exps[-1]} with DESI and SKY data for {night}" + ) + + # Initialize the results + results = dict(night=night, nexps=nexps, exps=[]) + + # Initialize temperature interpolation + coude_temp = get_db_temp(night, DB) + coude_t = pd.to_datetime(coude_temp["time_recorded"]).astype(np.int64) + coude_T = coude_temp["temp"].astype(float) + if verbose: + print( + f"Read {len(coude_t)} temperatures with mean {np.mean(coude_T):.1f}C for {night}" + ) + + # Loop over exposures + for k, exptag in enumerate(good_exps): + if verbose: + print(f"Processing [{k+1:2d}/{nexps:2d}] {night}/{exptag}...") + try: + skydata = DATA / str(night) / exptag / f"sky-{exptag}.fits.fz" + hdus = fitsio.FITS(str(skydata)) + nframes = 0 + for camid, cam in enumerate(("SKYCAM0", "SKYCAM1")): + + meta = pd.DataFrame( + hdus[cam + "T"].read(), + ) + if camid == 0: + # Get the number of SkyCam frames available + nframes = len(meta) + flux = np.zeros((3, 2, nframes)) + dflux = np.zeros((3, 2, nframes)) + offset = np.zeros((2, 2, nframes)) + ##print(f'{night}/{exptag} has {nframes} frames') + # Get the temperature to use from the start of this exposure + tstamps = np.array( + pd.to_datetime(meta["DATE-OBS"]).astype(np.int64) + ) + Texp = np.interp(tstamps[0], coude_t, coude_T) + assert np.isfinite(Texp), "Invalid interpolated temperature" + ##print(f'Coude temperature is {Texp:.1f}C') + else: + if len(meta) != nframes: + raise RuntimeError( + f"Different number of frames! {nframes} != {len(meta)}" + ) + + # Load raw SkyCam data + raw = hdus[cam].read() + assert raw.ndim == 3, "Is this a sky flat?" + + # Loop over frames + for frame in range(nframes): + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=RuntimeWarning) + # Calculate flux in this camera using the old method + flux[0, camid, frame], dflux[0, camid, frame] = SKY.setraw( + raw[frame], + cam, + refit=True, + fit_centroids=False, + Temperature=None, + ) + # Calculate flux in this camera using the new method (including temperature correction) + flux[1, camid, frame], dflux[1, camid, frame] = SKY.setraw( + raw[frame], + cam, + refit=False, + fit_centroids=True, + fast_centroids=True, + Temperature=Texp, + ) + # Save the flux before the temperature correction is applied + flux[2, camid, frame] = SKY.flux_notemp + dflux[2, camid, frame] = SKY.fluxerr_notemp + # Save the fitted centroid shift + offset[:, camid, frame] = [SKY.fit_dx, SKY.fit_dy] + + # Normalize fluxes to exposure time + exptime = np.array(meta.EXPTIME) + flux[:, camid] /= exptime + dflux[:, camid] /= exptime + + # Combine cameras for each frame + assert np.all((dflux > 0) & np.isfinite(dflux)), "Invalid flux errors" + ivar = dflux**-2 + combined = np.sum(flux * ivar, axis=1) / np.sum(ivar, axis=1) + + # Combine frames over the science exposure + mean = np.mean(combined, axis=-1) + + # Save results + nround = 3 + expdata = dict( + exptag=exptag, + Texp=np.round(Texp, nround), + mean=np.round(mean, nround), + combined=np.round(combined, nround), + flux=np.round(flux, nround), + dflux=np.round(dflux, nround), + offset=np.round(offset, nround), + ) + results["exps"].append(expdata) + + if verbose: + print( + f" {night}/{exptag} N={nframes} T={Texp:.1f}C {np.round(mean,2)}" + ) + + # if k >= 2: + # break + + except Exception as e: + print(f"Skipping {night}/{exptag} with error: {e}") + + fname = f"out/etcsky-{night}.json" + with open(fname, "w") as f: + json.dump(results, f, cls=desietc.util.NumpyEncoder) + if verbose: + print(f"Saved results to {fname}") From aa4a592a996acab1adf79a629e06d2333625e491 Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Sun, 9 Feb 2025 07:31:22 -0800 Subject: [PATCH 22/29] use sqlalchemy for db access --- desietc/db.py | 175 ++++++++++++++++++++++++++++++-------------------- 1 file changed, 105 insertions(+), 70 deletions(-) diff --git a/desietc/db.py b/desietc/db.py index a4a4135..1e36e08 100644 --- a/desietc/db.py +++ b/desietc/db.py @@ -5,6 +5,7 @@ are installed (for a direct connection), or else that requests is installed (for an indirect http connection). """ + import collections import datetime import os.path @@ -30,7 +31,7 @@ class DB(object): """Initialize a connection to the database. - To force a direct connection using pyscopg2, set ``http_fallback`` + To force a direct connection using sqlalchemy and pyscopg2, set ``http_fallback`` to ``False``. To force an indirect http connection using requests, set ``config_name`` to ``None``. By default, will attempt a direct connection then fall back to an indirect connection. @@ -49,65 +50,81 @@ class DB(object): Use an indirect http connection when a direct connection fails if True. """ - def __init__(self, config_name='db.yaml', http_fallback=True): - self.method = 'indirect' - if os.path.exists(config_name): + + def __init__( + self, + config_name="/global/cfs/cdirs/desi/engineering/focalplane/db.yaml", + http_fallback=True, + ): + self.method = "indirect" + if pathlib.Path(config_name).exists(): # Try a direct connection. try: import yaml except ImportError: - raise RuntimeError('The pyyaml package is not installed.') - with open(config_name, 'r') as f: + raise RuntimeError("The pyyaml package is not installed.") + with open(config_name, "r") as f: db_config = yaml.safe_load(f) try: - import psycopg2 - self.conn = psycopg2.connect(**db_config) - self.method = 'direct' + import sqlalchemy + + self.engine = sqlalchemy.create_engine( + "postgresql://{user}:{password}@{host}:{port}/{dbname}".format( + **db_config + ) + ) + self.method = "direct" except ImportError: if not http_fallback: - raise RuntimeError('The psycopg2 package is not installed.') + raise RuntimeError("The sqlalchemy package is not installed.") except Exception as e: if not http_fallback: - raise RuntimeError(f'Unable to establish a database connection:\n{e}') - if self.method == 'indirect' and http_fallback: + raise RuntimeError( + f"Unable to establish a database connection:\n{e}" + ) + if self.method == "indirect" and http_fallback: try: import requests except ImportError: - raise RuntimeError('The requests package is not installed.') - logging.info(f'Established {self.method} database connection.') + raise RuntimeError("The requests package is not installed.") + logging.info(f"Established {self.method} database connection.") def query(self, sql, maxrows=10, dates=None): """Perform a query using arbitrary SQL. Returns a pandas dataframe. Use maxrows=None to remove any limit on the number of returned rows. """ - logging.debug(f'SQL: {sql}') - if 'limit ' in sql.lower(): - raise ValueError('Must specify SQL LIMIT using maxrows.') + logging.debug(f"SQL: {sql}") + if "limit " in sql.lower(): + raise ValueError("Must specify SQL LIMIT using maxrows.") if maxrows is None: - maxrows = 'NULL' - if self.method == 'direct': - return pd.read_sql(sql + f' LIMIT {maxrows}', self.conn, parse_dates=dates) + maxrows = "NULL" + if self.method == "direct": + return pd.read_sql( + sql + f" LIMIT {maxrows}", self.engine, parse_dates=dates + ) else: return self.indirect(dict(sql_statement=sql, maxrows=maxrows), dates) def indirect(self, params, dates=None): """Perform an indirect query using an HTTP request. Returns a pandas dataframe.""" - url = 'https://replicator.desi.lbl.gov/QE/DESI/app/query' - params['dbname'] = 'desi' + url = "https://replicator.desi.lbl.gov/QE/DESI/app/query" + params["dbname"] = "desi" # Use tab-separated output since the web interface does not escape embedded # special characters, and there are instances of commas in useful # string columns like PROGRAM. - #params['output_type'] = 'text,' # comma separated - params['output_type'] = 'text' # tab separated - logging.debug(f'INDIRECT PARAMS: {params}') + # params['output_type'] = 'text,' # comma separated + params["output_type"] = "text" # tab separated + logging.debug(f"INDIRECT PARAMS: {params}") req = requests.get(url, params=params) if req.status_code != requests.codes.ok: if req.status_code == 401: - raise RuntimeError('Authentication failed: have you setup your .netrc file?') + raise RuntimeError( + "Authentication failed: have you setup your .netrc file?" + ) req.raise_for_status() # The server response ends each line with "\t\r\n" so we replace that with "\n" here. - text = req.text.replace('\t\r\n', '\n') - return pd.read_csv(io.StringIO(text), sep='\t', parse_dates=dates) + text = req.text.replace("\t\r\n", "\n") + return pd.read_csv(io.StringIO(text), sep="\t", parse_dates=dates) @staticmethod def where(**kwargs): @@ -134,30 +151,30 @@ def where(**kwargs): lo, hi = spec assert lo is None or hi is None or lo < hi if lo == None: - where.append(f'{col}<={hi}') + where.append(f"{col}<={hi}") elif hi == None: - where.append(f'{col}>={lo}') + where.append(f"{col}>={lo}") else: - where.append(f'({col} BETWEEN {lo} AND {hi})') - except (ValueError,TypeError,AssertionError): + where.append(f"({col} BETWEEN {lo} AND {hi})") + except (ValueError, TypeError, AssertionError): try: # Try to interpret spec as a string. - has_wildcard = any([wc in spec for wc in '%_']) + has_wildcard = any([wc in spec for wc in "%_"]) if has_wildcard: where.append(f"{col} LIKE '{spec}'") else: where.append(f"{col}='{spec}'") except TypeError: # Assume that spec is a single numeric value. - where.append(f'{col}={spec}') - return ' AND '.join(where) + where.append(f"{col}={spec}") + return " AND ".join(where) def select(self, table, what, where=None, maxrows=10, order=None, dates=None): - sql = f'select {what} from {table}' + sql = f"select {what} from {table}" if where is not None: - sql += f' where {where}' + sql += f" where {where}" if order is not None: - sql += f' order by {order}' + sql += f" order by {order}" return self.query(sql, maxrows, dates) @@ -165,25 +182,27 @@ class Exposures(object): """Cacheing wrapper class for the exposure database. Note that the exposures table uses 'ID' for the exposure id (not EXPID). """ - def __init__(self, db, columns='*', cachesize=5000): + + def __init__(self, db, columns="*", cachesize=5000): # Run a test query. - test = db.select('exposure.exposure', columns, maxrows=1) + test = db.select("exposure.exposure", columns, maxrows=1) self.columns = list(test.columns) - logging.debug(f'exposure table columns: {self.columns}') - self.what = ','.join(self.columns) + logging.debug(f"exposure table columns: {self.columns}") + self.what = ",".join(self.columns) self.db = db self.cache = collections.OrderedDict() self.cachesize = cachesize def __call__(self, expid, what=None): - """Lookup a single exposure and cache the results. - """ + """Lookup a single exposure and cache the results.""" if what is not None and what not in self.columns: raise ValueError(f'Invalid column name: "{what}".') if expid not in self.cache: - row = self.db.select('exposure.exposure', self.what, where=f'id={expid}', maxrows=1) + row = self.db.select( + "exposure.exposure", self.what, where=f"id={expid}", maxrows=1 + ) if row is None: - raise ValueError('No such exposure id {0}.'.format(expid)) + raise ValueError("No such exposure id {0}.".format(expid)) # Cache the results. self.cache[expid] = row.values[0] # Trim the cache if necessary. @@ -196,30 +215,39 @@ def __call__(self, expid, what=None): return values[self.columns.index(what)] def select(self, where, maxrows=10): - """Get exposures selected by where. Results are not cached. - """ - return self.db.select('exposure.exposure', self.what, where=where, maxrows=maxrows) + """Get exposures selected by where. Results are not cached.""" + return self.db.select( + "exposure.exposure", self.what, where=where, maxrows=maxrows + ) class NightTelemetry(object): - """Lookup telemetry using a cache of local noon-noon results. - """ - def __init__(self, db, tablename, columns='*', cachesize=10, timestamp='time_recorded', verbose=False): + """Lookup telemetry using a cache of local noon-noon results.""" + + def __init__( + self, + db, + tablename, + columns="*", + cachesize=10, + timestamp="time_recorded", + verbose=False, + ): # Run a test query. - test = db.select('telemetry.' + tablename, columns, maxrows=1) + test = db.select("telemetry." + tablename, columns, maxrows=1) self.db = db self.cachesize = int(cachesize) self.tablename = tablename self.columns = list(test.columns) if timestamp not in self.columns: self.columns.append(timestamp) - self.what = ','.join(self.columns) + self.what = ",".join(self.columns) self.timestamp = timestamp if verbose: - print(f'Initialized telemetry from {self.tablename} for {self.what}.') + print(f"Initialized telemetry from {self.tablename} for {self.what}.") self.cache = collections.OrderedDict() - self.MJD_epoch = pd.Timestamp('1858-11-17', tz='UTC') - self.one_day = pd.Timedelta('1 days') + self.MJD_epoch = pd.Timestamp("1858-11-17", tz="UTC") + self.one_day = pd.Timedelta("1 days") def __call__(self, night, what=None, MJD=None): """Return the telemetry for a single night. @@ -248,34 +276,39 @@ def __call__(self, night, what=None, MJD=None): if what is not None and what not in self.columns: raise ValueError(f'Invalid column name "{what}". Pick from {self.what}.') if MJD is not None and what is None: - raise ValueError(f'Must specify a column (what) with MJD values.') + raise ValueError(f"Must specify a column (what) with MJD values.") # Calculate local midnight on night = YYYYMMDD as midnight UTC + 31 hours (assuming local = UTC-7) try: - midnight = datetime.datetime.strptime(str(night), '%Y%m%d') + datetime.timedelta(days=1, hours=7) + midnight = datetime.datetime.strptime( + str(night), "%Y%m%d" + ) + datetime.timedelta(days=1, hours=7) except ValueError: raise ValueError(f'Badly formatted or invalid night: "{night}".') - self.midnight = pd.Timestamp(midnight, tz='UTC') + self.midnight = pd.Timestamp(midnight, tz="UTC") if night not in self.cache or MJD is not None: # Fetch data from local noon on YYYYMMDD until local noon the next day. - tmin = self.midnight - pd.Timedelta(12, 'hours') - tmax = self.midnight + pd.Timedelta(12, 'hours') + tmin = self.midnight - pd.Timedelta(12, "hours") + tmax = self.midnight + pd.Timedelta(12, "hours") if MJD is not None: MJD = np.asarray(MJD) # Check that the min MJD is within our range. timestamp = self.MJD_epoch + MJD.min() * self.one_day if timestamp < tmin or timestamp > tmax: - raise ValueError(f'MJD {MJD.min()} ({timestamp}) not in night {night}.') + raise ValueError(f"MJD {MJD.min()} ({timestamp}) not in night {night}.") # Check that the max MJD is within our range. timestamp = self.MJD_epoch + MJD.max() * self.one_day if timestamp < tmin or timestamp > tmax: - raise ValueError(f'MJD {MJD.max()} ({timestamp}) not in night {night}.') + raise ValueError(f"MJD {MJD.max()} ({timestamp}) not in night {night}.") if night not in self.cache: # Fetch the results. results = self.db.select( - self.tablename, self.what, maxrows=None, - where=f"{self.timestamp}>=TIMESTAMP '{tmin}' and {self.timestamp}<=TIMESTAMP '{tmax}'") + self.tablename, + self.what, + maxrows=None, + where=f"{self.timestamp}>=TIMESTAMP '{tmin}' and {self.timestamp}<=TIMESTAMP '{tmax}'", + ) # Convert the timestamp column to MJD. - results['MJD'] = (results[self.timestamp] - self.MJD_epoch) / self.one_day + results["MJD"] = (results[self.timestamp] - self.MJD_epoch) / self.one_day # Cache the results. self.cache[night] = results # Trim the cache if necessary. @@ -287,11 +320,13 @@ def __call__(self, night, what=None, MJD=None): if what is None: return results # Select the specified column (in addition to MJD). - results = results[['MJD', what]] + results = results[["MJD", what]] if MJD is None: return results # Interpolate to the specified time (assuming "what" is numeric). dtype = results[what].dtype if not np.issubdtype(dtype, np.number): - raise ValueError(f'Nearest neighbor lookup not implemented yet for dtype "{dtype}".') - return np.interp(MJD, results['MJD'], results[what]) + raise ValueError( + f'Nearest neighbor lookup not implemented yet for dtype "{dtype}".' + ) + return np.interp(MJD, results["MJD"], results[what]) From f62d5d6aa63fbbeb730e7a999837702d85ebfc5b Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Sun, 9 Feb 2025 07:34:15 -0800 Subject: [PATCH 23/29] apply black formatting --- desietc/scripts/replay.py | 380 ++++++++++++++++++++++++++------------ 1 file changed, 261 insertions(+), 119 deletions(-) diff --git a/desietc/scripts/replay.py b/desietc/scripts/replay.py index 55254f9..6720651 100644 --- a/desietc/scripts/replay.py +++ b/desietc/scripts/replay.py @@ -15,6 +15,7 @@ Requires that matplotlib is installed, in addition to the desietc dependencies. """ + import os import sys import time @@ -29,7 +30,8 @@ import numpy as np import matplotlib -matplotlib.use('Agg') + +matplotlib.use("Agg") import matplotlib.pyplot as plt import desietc.etc @@ -39,109 +41,127 @@ def etcoffline(args): # Are we running on a recognized host with special defaults? host = None - if os.getenv('NERSC_HOST') is not None: - host = 'NERSC' - logging.info('Detected a NERSC host.') - elif os.getenv('DOS_HOME') is not None: - host = 'DOS' + if os.getenv("NERSC_HOST") is not None: + host = "NERSC" + logging.info("Detected a NERSC host.") + elif os.getenv("DOS_HOME") is not None: + host = "DOS" # Determine where the input raw data is located. if args.inpath is None: - if host == 'NERSC': - args.inpath = '/global/cfs/cdirs/desi/spectro/data/' - elif host == 'DOS': - args.inpath = '/exposures/desi/' + if host == "NERSC": + args.inpath = "/global/cfs/cdirs/desi/spectro/data/" + elif host == "DOS": + args.inpath = "/exposures/desi/" else: - print('No input path specified with --inpath.') + print("No input path specified with --inpath.") sys.exit(-1) args.inpath = pathlib.Path(args.inpath) if not args.inpath.exists(): - print('Non-existant input path: {0}'.format(args.inpath)) + print("Non-existant input path: {0}".format(args.inpath)) sys.exit(-2) - logging.info('Input path is {0}'.format(args.inpath)) + logging.info("Input path is {0}".format(args.inpath)) # Determine which directory to check for completed exposures. if args.checkpath is None: - if host == 'DOS': - args.checkpath = '/data/dts/exposures/raw/' + if host == "DOS": + args.checkpath = "/data/dts/exposures/raw/" else: args.checkpath = args.inpath if args.checkpath != args.inpath: args.checkpath = pathlib.Path(args.checkpath) if not args.checkpath.exists(): - print('Non-existant check path: {0}'.format(args.checkpath)) + print("Non-existant check path: {0}".format(args.checkpath)) sys.exit(-2) - logging.info('Check path is {0}'.format(args.inpath)) + logging.info("Check path is {0}".format(args.inpath)) if args.night is None: - print('Missing required argument: night.') + print("Missing required argument: night.") sys.exit(-1) nightpath = args.inpath / str(args.night) if not nightpath.exists(): - print('Non-existant directory for night: {0}'.format(nightpath)) + print("Non-existant directory for night: {0}".format(nightpath)) # Determine where the outputs will go. if args.outpath is None: - print('No output path specified with --outpath.') + print("No output path specified with --outpath.") sys.exit(-1) args.outpath = pathlib.Path(args.outpath) if not args.outpath.exists(): - print('Non-existant output path: {0}'.format(args.outpath)) + print("Non-existant output path: {0}".format(args.outpath)) sys.exit(-2) # Create the night output path if necessary. args.outpath = args.outpath / str(args.night) args.outpath.mkdir(exist_ok=True) - logging.info('Output path is {0}'.format(args.outpath)) + logging.info("Output path is {0}".format(args.outpath)) # Locate the GFA calibration data. if args.gfa_calib is None: - if host == 'NERSC': - args.gfa_calib = '/global/cfs/cdirs/desi/cmx/gfa/calib/GFA_calib.fits' - elif host == 'DOS': + if host == "NERSC": + args.gfa_calib = "/global/cfs/cdirs/desi/cmx/gfa/calib/GFA_calib.fits" + elif host == "DOS": # Should use a more permanent path than this which is synched via svn. - args.gfa_calib = '/data/desiobserver/gfadiq/GFA_calib.fits' + args.gfa_calib = "/data/desiobserver/gfadiq/GFA_calib.fits" else: - print('No GFA calibration data path specified with --gfa-calib.') + print("No GFA calibration data path specified with --gfa-calib.") sys.exit(-1) args.gfa_calib = pathlib.Path(args.gfa_calib) if not args.gfa_calib.exists(): - print('Non-existant GFA calibration path: {0}'.format(args.gfa_calib)) + print("Non-existant GFA calibration path: {0}".format(args.gfa_calib)) sys.exit(-2) # Locate the SKY calibration data. if args.sky_calib is None: - if host == 'NERSC': - args.sky_calib = '/global/cfs/cdirs/desi/cmx/sky/calib/SKY_calib.fits' - elif host == 'DOS': + if host == "NERSC": + args.sky_calib = "/global/cfs/cdirs/desi/cmx/sky/calib/SKY_calib.fits" + elif host == "DOS": # Should use a more permanent path than this which is synched via svn. - args.sky_calib = '/data/desiobserver/gfadiq/SKY_calib.fits' + args.sky_calib = "/data/desiobserver/gfadiq/SKY_calib.fits" else: - print('No SKY calibration data path specified with --sky-calib.') + print("No SKY calibration data path specified with --sky-calib.") sys.exit(-1) args.sky_calib = pathlib.Path(args.sky_calib) if not args.sky_calib.exists(): - print('Non-existant SKY calibration path: {0}'.format(args.sky_calib)) + print("Non-existant SKY calibration path: {0}".format(args.sky_calib)) sys.exit(-2) # Initialize the global ETC algorithm. ETC = desietc.etc.ETCAlgorithm( - args.sky_calib, args.gfa_calib, args.psf_pixels, args.guide_pixels, args.max_dither, args.num_dither, - args.Ebv_coef, args.X_coef, args.ffrac_ref, args.nbad_threshold, args.nll_threshold, - args.avg_secs, args.avg_min_values, args.grid_resolution, args.parallel) + args.sky_calib, + args.gfa_calib, + args.psf_pixels, + args.guide_pixels, + args.max_dither, + args.num_dither, + args.Ebv_coef, + args.X_coef, + args.ffrac_ref, + args.nbad_threshold, + args.nll_threshold, + args.avg_secs, + args.avg_min_values, + args.grid_resolution, + args.parallel, + ) # Enable GMM debug messages if requested. if args.debug and args.gmm_debug: if args.parallel: - print('Ignoring --gmm-debug with --parallel') + print("Ignoring --gmm-debug with --parallel") else: ETC.GMM.set_debug(True) def process(expid): nonlocal nprocessed success = desietc.offline.replay_exposure( - ETC, nightpath, expid, - outpath=args.outpath, dry_run=args.dry_run, overwrite=args.overwrite, - only_complete=args.only_complete) + ETC, + nightpath, + expid, + outpath=args.outpath, + dry_run=args.dry_run, + overwrite=args.overwrite, + only_complete=args.only_complete, + ) if success: nprocessed += 1 @@ -153,22 +173,31 @@ def process(expid): if args.expid is not None: exposures = set() # Loop over comma-separated tokens. - for token in args.expid.split(','): + for token in args.expid.split(","): # Process a token of the form N or N1-N2. - limits = [int(expid) for expid in token.split('-')] + limits = [int(expid) for expid in token.split("-")] if len(limits) == 1: start, stop = limits[0], limits[0] + 1 elif len(limits) == 2: start, stop = limits[0], limits[1] + 1 else: - print('Invalid --expid (should be N or N1-N2): "{0}"'.format(args.expid)) + print( + 'Invalid --expid (should be N or N1-N2): "{0}"'.format( + args.expid + ) + ) sys.exit(-1) for expid in range(start, stop): process(expid) - expid_pattern = re.compile('^[0-9]{8}$') - get_exposures = lambda: set([ - int(p.name) for p in nightpath.glob('????????') if expid_pattern.match(p.name)]) + expid_pattern = re.compile("^[0-9]{8}$") + get_exposures = lambda: set( + [ + int(p.name) + for p in nightpath.glob("????????") + if expid_pattern.match(p.name) + ] + ) if args.batch or args.watch: # Find the existing exposures on this night. @@ -177,7 +206,7 @@ def process(expid): for expid in sorted(existing): process(expid) if args.watch: - logging.info('Watching for new exposures...hit ^C to exit') + logging.info("Watching for new exposures...hit ^C to exit") try: while True: time.sleep(args.watch_interval) @@ -186,81 +215,190 @@ def process(expid): process(expid) existing |= newexp except KeyboardInterrupt: - logging.info('Bye.') + logging.info("Bye.") pass finally: ETC.shutdown() - logging.info(f'Processed {nprocessed} exposures.') + logging.info(f"Processed {nprocessed} exposures.") def main(): # https://docs.python.org/3/howto/argparse.html parser = argparse.ArgumentParser( - description='Run ETC analysis offline from exposure FITS files.', - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument('-v', '--verbose', action='store_true', - help='provide verbose output on progress') - parser.add_argument('--debug', action='store_true', - help='provide verbose and debugging output') - parser.add_argument('--traceback', action='store_true', - help='print traceback and enter debugger after an exception') - parser.add_argument('--logpath', type=str, metavar='PATH', - help='Path where logging output should be written') - parser.add_argument('--night', type=int, metavar='YYYYMMDD', - help='Night of exposure to process in the format YYYYMMDD') - parser.add_argument('--expid', type=str, metavar='N', - help='Exposure(s) to process specified as N or N1-N2 or N1,N2-N3 etc') - parser.add_argument('--batch', action='store_true', - help='Process all existing exposures on night') - parser.add_argument('--watch', action='store_true', - help='Wait for and process new exposures on night') - parser.add_argument('--watch-interval', type=float, metavar='T', default=2, - help='Interval in seconds to check for new exposures with --watch') - parser.add_argument('--psf-pixels', type=int, default=25, - help='Size of stamp to use for acquisition image PSF measurements (must be odd)') - parser.add_argument('--guide-pixels', type=int, default=31, - help='Size of stamp to use for guide star measurements (must be odd)') - parser.add_argument('--max-dither', type=float, default=7, - help='Maximum dither in pixels to use for guide star fits') - parser.add_argument('--num-dither', type=int, default=1200, - help='Number of dithers to use between (-max,+max)') - parser.add_argument('--Ebv-coef', type=float, default=2.165, - help='Coefficient to use for MW extinction') - parser.add_argument('--X-coef', type=float, default=0.114, - help='Coefficient to use for atmospheric extinction') - parser.add_argument('--ffrac-ref', type=float, default=0.56, - help='Reference value of FFRAC that defines nominal conditions') - parser.add_argument('--nbad-threshold', type=int, default=100, - help='Maximum allowed bad overscan pixels before warning') - parser.add_argument('--nll-threshold', type=float, default=10, - help='Maximum allowed GMM fit NLL value before warning') - parser.add_argument('--avg-secs', type=float, default=60, - help='Compute running averages over this time interval in seconds.') - parser.add_argument('--avg-min-values', type=int, default=3, - help='A running average requires at least this many values.') - parser.add_argument('--grid-resolution', type=float, default=0.5, - help='Resolution of ETC calculations in seconds') - parser.add_argument('--gmm-debug', action='store_true', - help='Generate debug log messages during GMM.fit') - parser.add_argument('--dry-run', action='store_true', - help='Check FITS file names and headers with no ETC processing') - parser.add_argument('--inpath', type=str, metavar='PATH', - help='Path where raw data is organized under YYYYMMDD directories') - parser.add_argument('--checkpath', type=str, metavar='PATH', - help='Optional path where links are created to indicate a complete exposure') - parser.add_argument('--outpath', type=str, metavar='PATH', - help='Path where outputs willl be organized under YYYYMMDD directories') - parser.add_argument('--overwrite', action='store_true', - help='Overwrite existing outputs') - parser.add_argument('--only-complete', action='store_true', - help='Only process exposures with all expected FITS files') - parser.add_argument('--gfa-calib', type=str, metavar='PATH', - help='Path to GFA calibration FITS file to use') - parser.add_argument('--sky-calib', type=str, metavar='PATH', - help='Path to SKYCAM calibration FITS file to use') - parser.add_argument('--parallel', action='store_true', - help='Process GFA cameras in parallel') + description="Run ETC analysis offline from exposure FITS files.", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + parser.add_argument( + "-v", + "--verbose", + action="store_true", + help="provide verbose output on progress", + ) + parser.add_argument( + "--debug", action="store_true", help="provide verbose and debugging output" + ) + parser.add_argument( + "--traceback", + action="store_true", + help="print traceback and enter debugger after an exception", + ) + parser.add_argument( + "--logpath", + type=str, + metavar="PATH", + help="Path where logging output should be written", + ) + parser.add_argument( + "--night", + type=int, + metavar="YYYYMMDD", + help="Night of exposure to process in the format YYYYMMDD", + ) + parser.add_argument( + "--expid", + type=str, + metavar="N", + help="Exposure(s) to process specified as N or N1-N2 or N1,N2-N3 etc", + ) + parser.add_argument( + "--batch", action="store_true", help="Process all existing exposures on night" + ) + parser.add_argument( + "--watch", + action="store_true", + help="Wait for and process new exposures on night", + ) + parser.add_argument( + "--watch-interval", + type=float, + metavar="T", + default=2, + help="Interval in seconds to check for new exposures with --watch", + ) + parser.add_argument( + "--psf-pixels", + type=int, + default=25, + help="Size of stamp to use for acquisition image PSF measurements (must be odd)", + ) + parser.add_argument( + "--guide-pixels", + type=int, + default=31, + help="Size of stamp to use for guide star measurements (must be odd)", + ) + parser.add_argument( + "--max-dither", + type=float, + default=7, + help="Maximum dither in pixels to use for guide star fits", + ) + parser.add_argument( + "--num-dither", + type=int, + default=1200, + help="Number of dithers to use between (-max,+max)", + ) + parser.add_argument( + "--Ebv-coef", + type=float, + default=2.165, + help="Coefficient to use for MW extinction", + ) + parser.add_argument( + "--X-coef", + type=float, + default=0.114, + help="Coefficient to use for atmospheric extinction", + ) + parser.add_argument( + "--ffrac-ref", + type=float, + default=0.56, + help="Reference value of FFRAC that defines nominal conditions", + ) + parser.add_argument( + "--nbad-threshold", + type=int, + default=100, + help="Maximum allowed bad overscan pixels before warning", + ) + parser.add_argument( + "--nll-threshold", + type=float, + default=10, + help="Maximum allowed GMM fit NLL value before warning", + ) + parser.add_argument( + "--avg-secs", + type=float, + default=60, + help="Compute running averages over this time interval in seconds.", + ) + parser.add_argument( + "--avg-min-values", + type=int, + default=3, + help="A running average requires at least this many values.", + ) + parser.add_argument( + "--grid-resolution", + type=float, + default=0.5, + help="Resolution of ETC calculations in seconds", + ) + parser.add_argument( + "--gmm-debug", + action="store_true", + help="Generate debug log messages during GMM.fit", + ) + parser.add_argument( + "--dry-run", + action="store_true", + help="Check FITS file names and headers with no ETC processing", + ) + parser.add_argument( + "--inpath", + type=str, + metavar="PATH", + help="Path where raw data is organized under YYYYMMDD directories", + ) + parser.add_argument( + "--checkpath", + type=str, + metavar="PATH", + help="Optional path where links are created to indicate a complete exposure", + ) + parser.add_argument( + "--outpath", + type=str, + metavar="PATH", + help="Path where outputs willl be organized under YYYYMMDD directories", + ) + parser.add_argument( + "--overwrite", action="store_true", help="Overwrite existing outputs" + ) + parser.add_argument( + "--only-complete", + action="store_true", + help="Only process exposures with all expected FITS files", + ) + parser.add_argument( + "--gfa-calib", + type=str, + metavar="PATH", + help="Path to GFA calibration FITS file to use", + ) + parser.add_argument( + "--sky-calib", + type=str, + metavar="PATH", + help="Path to SKYCAM calibration FITS file to use", + ) + parser.add_argument( + "--parallel", action="store_true", help="Process GFA cameras in parallel" + ) args = parser.parse_args() # Configure logging. @@ -270,11 +408,15 @@ def main(): level = logging.INFO else: level = logging.WARNING - logging.basicConfig(filename=args.logpath, level=level, - format='%(asctime)s %(levelname)s %(message)s', datefmt='%Y%m%d %H:%M:%S') + logging.basicConfig( + filename=args.logpath, + level=level, + format="%(asctime)s %(levelname)s %(message)s", + datefmt="%Y%m%d %H:%M:%S", + ) # Silence matplotlib debug logging. - logging.getLogger('matplotlib.font_manager').level = max(level, logging.INFO) - logging.getLogger('matplotlib.ticker').disabled = max(level, logging.INFO) + logging.getLogger("matplotlib.font_manager").level = max(level, logging.INFO) + logging.getLogger("matplotlib.ticker").disabled = max(level, logging.INFO) try: retval = etcoffline(args) @@ -290,5 +432,5 @@ def main(): sys.exit(-1) -if __name__ == '__main__': +if __name__ == "__main__": main() From a6bbff0c17a7d697eb2bb7cf71fb617aa4d45ff7 Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Sun, 9 Feb 2025 07:37:14 -0800 Subject: [PATCH 24/29] fix import --- desietc/db.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/desietc/db.py b/desietc/db.py index 1e36e08..dd078f6 100644 --- a/desietc/db.py +++ b/desietc/db.py @@ -8,7 +8,7 @@ import collections import datetime -import os.path +import pathlib import io try: From 298ea43422290b4a309b3fda8332a9f452f95295 Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Sun, 9 Feb 2025 10:47:50 -0800 Subject: [PATCH 25/29] add options to sky.process_night --- desietc/sky.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/desietc/sky.py b/desietc/sky.py index c13a908..dbae5d7 100644 --- a/desietc/sky.py +++ b/desietc/sky.py @@ -500,6 +500,8 @@ def process_night( SKY, DB, DATA=pathlib.Path("/global/cfs/cdirs/desi/spectro/data/"), + fname="etcsky-{night}.json" + nmax=None, verbose=False, ): """Process a single night of SkyCam data.""" @@ -634,14 +636,17 @@ def process_night( f" {night}/{exptag} N={nframes} T={Texp:.1f}C {np.round(mean,2)}" ) - # if k >= 2: - # break + if nmax is not None and k >= nmax: + break except Exception as e: print(f"Skipping {night}/{exptag} with error: {e}") - fname = f"out/etcsky-{night}.json" - with open(fname, "w") as f: - json.dump(results, f, cls=desietc.util.NumpyEncoder) - if verbose: - print(f"Saved results to {fname}") + if fname is not None: + fname = fname.format(night=night) + with open(fname, "w") as f: + json.dump(results, f, cls=desietc.util.NumpyEncoder) + if verbose: + print(f"Saved results to {fname}") + + return results From 454427756982a10882d233847900aabe4067c135 Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Sun, 9 Feb 2025 10:57:33 -0800 Subject: [PATCH 26/29] add timing to output --- desietc/sky.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/desietc/sky.py b/desietc/sky.py index dbae5d7..6f710b4 100644 --- a/desietc/sky.py +++ b/desietc/sky.py @@ -5,6 +5,7 @@ import pathlib import json import warnings +import time try: import DOSlib.logger as logging @@ -560,7 +561,7 @@ def process_night( flux = np.zeros((3, 2, nframes)) dflux = np.zeros((3, 2, nframes)) offset = np.zeros((2, 2, nframes)) - ##print(f'{night}/{exptag} has {nframes} frames') + elapsed = np.zeros((2, 2, nframes)) # Get the temperature to use from the start of this exposure tstamps = np.array( pd.to_datetime(meta["DATE-OBS"]).astype(np.int64) @@ -583,6 +584,7 @@ def process_night( with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) # Calculate flux in this camera using the old method + start = time.time() flux[0, camid, frame], dflux[0, camid, frame] = SKY.setraw( raw[frame], cam, @@ -590,6 +592,7 @@ def process_night( fit_centroids=False, Temperature=None, ) + t1 = time.time() # Calculate flux in this camera using the new method (including temperature correction) flux[1, camid, frame], dflux[1, camid, frame] = SKY.setraw( raw[frame], @@ -599,11 +602,15 @@ def process_night( fast_centroids=True, Temperature=Texp, ) + t2 = time.time() # Save the flux before the temperature correction is applied flux[2, camid, frame] = SKY.flux_notemp dflux[2, camid, frame] = SKY.fluxerr_notemp # Save the fitted centroid shift offset[:, camid, frame] = [SKY.fit_dx, SKY.fit_dy] + # Save timing + elapsed[0, camid, frame] = t1 - start + elapsed[1, camid, frame] = t2 - t1 # Normalize fluxes to exposure time exptime = np.array(meta.EXPTIME) @@ -628,6 +635,7 @@ def process_night( flux=np.round(flux, nround), dflux=np.round(dflux, nround), offset=np.round(offset, nround), + elapsed=np.round(offset, nround), ) results["exps"].append(expdata) From 53cbdd13c786e76d2a829b1021a55cade07e3089 Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Sun, 9 Feb 2025 10:58:25 -0800 Subject: [PATCH 27/29] fix typo --- desietc/sky.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/desietc/sky.py b/desietc/sky.py index 6f710b4..1ab19cf 100644 --- a/desietc/sky.py +++ b/desietc/sky.py @@ -501,7 +501,7 @@ def process_night( SKY, DB, DATA=pathlib.Path("/global/cfs/cdirs/desi/spectro/data/"), - fname="etcsky-{night}.json" + fname="etcsky-{night}.json", nmax=None, verbose=False, ): @@ -645,7 +645,7 @@ def process_night( ) if nmax is not None and k >= nmax: - break + break except Exception as e: print(f"Skipping {night}/{exptag} with error: {e}") From 7c97bd93d437bf6ad54580c221e1347c609f7450 Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Sun, 9 Feb 2025 11:07:13 -0800 Subject: [PATCH 28/29] add fast/slow option, fix elapsed --- desietc/sky.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/desietc/sky.py b/desietc/sky.py index 1ab19cf..12733e3 100644 --- a/desietc/sky.py +++ b/desietc/sky.py @@ -501,6 +501,7 @@ def process_night( SKY, DB, DATA=pathlib.Path("/global/cfs/cdirs/desi/spectro/data/"), + fast_centroids=True, fname="etcsky-{night}.json", nmax=None, verbose=False, @@ -529,9 +530,11 @@ def process_night( print( f"Found {nexps} exposures in {good_exps[0]}-{good_exps[-1]} with DESI and SKY data for {night}" ) + if nmax is not None: + nexps = min(nexps, nmax) # Initialize the results - results = dict(night=night, nexps=nexps, exps=[]) + results = dict(night=night, nexps=nexps, fast=fast_centroids, exps=[]) # Initialize temperature interpolation coude_temp = get_db_temp(night, DB) @@ -543,7 +546,7 @@ def process_night( ) # Loop over exposures - for k, exptag in enumerate(good_exps): + for k, exptag in enumerate(good_exps[:nexps]): if verbose: print(f"Processing [{k+1:2d}/{nexps:2d}] {night}/{exptag}...") try: @@ -599,7 +602,7 @@ def process_night( cam, refit=False, fit_centroids=True, - fast_centroids=True, + fast_centroids=fast_centroids, Temperature=Texp, ) t2 = time.time() @@ -635,7 +638,7 @@ def process_night( flux=np.round(flux, nround), dflux=np.round(dflux, nround), offset=np.round(offset, nround), - elapsed=np.round(offset, nround), + elapsed=np.round(elapsed, nround), ) results["exps"].append(expdata) From 82556e8b650b653276bbbaf1a0671c70e4adf670 Mon Sep 17 00:00:00 2001 From: David Kirkby Date: Sun, 9 Feb 2025 17:54:20 -0800 Subject: [PATCH 29/29] make DB optional --- desietc/sky.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/desietc/sky.py b/desietc/sky.py index 12733e3..60c0d9b 100644 --- a/desietc/sky.py +++ b/desietc/sky.py @@ -23,6 +23,7 @@ import fitsio import desietc.util +import desietc.db class BGFitter(object): @@ -499,7 +500,7 @@ def get_db_temp( def process_night( night, SKY, - DB, + DB=None, DATA=pathlib.Path("/global/cfs/cdirs/desi/spectro/data/"), fast_centroids=True, fname="etcsky-{night}.json", @@ -537,6 +538,8 @@ def process_night( results = dict(night=night, nexps=nexps, fast=fast_centroids, exps=[]) # Initialize temperature interpolation + if DB is None: + DB = desietc.db.DB(http_fallback=False) coude_temp = get_db_temp(night, DB) coude_t = pd.to_datetime(coude_temp["time_recorded"]).astype(np.int64) coude_T = coude_temp["temp"].astype(float)