diff --git a/.gitignore b/.gitignore
index b56c81e..b291312 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,6 @@
+*.png
+*random*.fits
+*.DS_Store
*.pyc
.ipynb_checkpoints/
diff --git a/flystar/align.py b/flystar/align.py
index 35156a2..db37954 100755
--- a/flystar/align.py
+++ b/flystar/align.py
@@ -4,6 +4,7 @@
from flystar import plots
from flystar.starlists import StarList
from flystar.startables import StarTable
+from flystar import motion_model
from astropy.table import Table, Column, vstack
import datetime
import copy
@@ -11,26 +12,25 @@
import pdb
import time
import warnings
+import pickle
from astropy.utils.exceptions import AstropyUserWarning
-# Keep a list of columns that are "aggregated" motion model terms.
-motion_model_col_names = ['x0', 'x0e', 'y0', 'y0e',
- 'vx', 'vxe', 'vy', 'vye',
- 'ax', 'axe', 'ay', 'aye',
- 't0', 'm0', 'm0e', 'use_in_trans']
-
class MosaicSelfRef(object):
def __init__(self, list_of_starlists, ref_index=0, iters=2,
dr_tol=[1, 1], dm_tol=[2, 1],
outlier_tol=[None, None],
trans_args=[{'order': 2}, {'order': 2}],
init_order=1,
- mag_trans=True, mag_lim=None, weights=None,
+ mag_trans=True, mag_lim=None, trans_weights=None, vel_weights='var',
trans_input=None, trans_class=transforms.PolyTransform,
- use_vel=False, calc_trans_inverse=False,
+ calc_trans_inverse=False,
init_guess_mode='miracle', iter_callback=None,
+ default_motion_model='Fixed',
+ motion_model_dict = {},
+ use_scipy=True,
+ absolute_sigma=False,
+ save_path=None,
verbose=True):
-
"""
Make a mosaic object by passing in a list of starlists and then running fit().
@@ -50,8 +50,7 @@ def __init__(self, list_of_starlists, ref_index=0, iters=2,
star_list['w'] * ref_list['w'] * weight_from_keyword (see the weights parameter)
- for those stars not trimmed out by the other criteria.
-
+ for those stars not trimmed out by the other criteria.
Optional Parameters
----------
@@ -80,7 +79,7 @@ def __init__(self, list_of_starlists, ref_index=0, iters=2,
magnitudes in each list to bring them into a common magnitude system. This is
essential for matching (with finite dm_tol) starlists of different filters or
starlists that are not photometrically calibrated. Note that the final_table columns
- of 'm', 'm0', and 'm0e' will contain the transformed magnitudes while the
+ of 'm', 'm0', and 'm0_err' will contain the transformed magnitudes while the
final_table column 'm_orig' will contain the original un-transformed magnitudes.
If mag_trans = False, then no such zeropoint offset it applied at any point.
@@ -90,11 +89,15 @@ def __init__(self, list_of_starlists, ref_index=0, iters=2,
separately for each list and each iteration, you need to pass in a 2D array that
has shape (N_lists, 2).
- weights : str
+ trans_weights : str
Either None (def), 'both,var', 'list,var', or 'ref,var' depending on whether you want
to weight by the positional uncertainties (variances) in the individual starlists, or also with
the uncertainties in the reference frame itself. Note weighting only works when there
are positional uncertainties availabe. Other options include 'both,std', 'list,std', 'list,var'.
+
+ vel_weights : str
+ Either 'var' (def) or 'std', depending on whether you want to weight the motion model
+ fits by the variance or standard deviation of the position data
trans_input : array or list of transform objects
def = None. If not None, then this should contain an array or list of transform
@@ -110,12 +113,6 @@ def = None. If not None, then this should contain an array or list of transform
then the transformation argument (i.e. order) will be changed for every iteration in
iters.
- use_vel : boolean
- If velocities are present in the reference list and use_vel == True, then during
- each iteration of the alignment, the reference list will be propogated in time
- using the velocity information. So all transformations will be derived w.r.t.
- the propogated positions. See also update_vel.
-
calc_trans_inverse: boolean
If true, then calculate the inverse transformation (from reference to starlist)
in addition to the normal transformation (from starlist to reference). The inverse
@@ -131,8 +128,25 @@ def = None. If not None, then this should contain an array or list of transform
iter_callback : None or function
A function to call (that accepts a StarTable object and an iteration number)
- at the end of every iteration. This can be used for plotting or printing state.
+ at the end of every iteration. This can be used for plotting or printing state.
+
+ default_motion_model : string
+ Name of motion model to use for new or unassigned stars
+
+ motion_model_dict : None or dict
+ Dict of motion model name keys (strings) and corresponding MotionModel object values
+
+ use_scipy : bool, optional
+ If True, use scipy.optimize.curve_fit for velocity fitting. If False, use linear
+ algebra fitting of posible, by default True.
+ absolute_sigma : bool, optional
+ If True, the velocity fit will use absolute errors in the data. If False, relative
+ errors will be used, by default False.
+
+ save_path : str, optional
+ Path to save the MosaicSelfRef object as a pickle file.
+
verbose : int (0 to 9, inclusive)
Controls the verbosity of print statements. (0 least, 9 most verbose).
For backwards compatibility, 0 = False, 9 = True.
@@ -178,13 +192,18 @@ def = None. If not None, then this should contain an array or list of transform
self.init_order = init_order
self.mag_trans = mag_trans
self.mag_lim = mag_lim
- self.weights = weights
+ self.trans_weights = trans_weights
+ self.vel_weights = vel_weights
self.trans_input = trans_input
self.trans_class = trans_class
- self.calc_trans_inverse = calc_trans_inverse
- self.use_vel = use_vel
+ self.calc_trans_inverse = calc_trans_inverse
+ self.motion_model_dict = motion_model_dict
+ self.use_scipy = use_scipy
+ self.absolute_sigma = absolute_sigma
+ self.default_motion_model = default_motion_model
self.init_guess_mode = init_guess_mode
self.iter_callback = iter_callback
+ self.save_path = save_path
self.verbose = verbose
# For backwards compatibility.
@@ -214,6 +233,10 @@ def = None. If not None, then this should contain an array or list of transform
# is passed in, replicate for all star lists, all loop iterations.
##########
self.setup_trans_info()
+
+ # Make sure the motion models are ready
+ self.motion_model_dict = motion_model.validate_motion_model_dict(self.motion_model_dict,
+ StarTable(), self.default_motion_model)
return
@@ -257,10 +280,7 @@ def fit(self):
x0e
y0e
m0e
- vx (only if use_vel=True)
- vy (only if use_vel=True)
- vxe (only if use_vel=True)
- vye (only if use_vel=True)
+ additional motion_model columns
"""
##########
@@ -270,8 +290,7 @@ def fit(self):
# x_orig, y_orig, m_orig, (opt. errors) -- the transformed errors for the lists: 2D
# w, w_orig (optiona) -- the input and output weights of stars in transform: 2D
##########
- self.ref_table = self.setup_ref_table_from_starlist(self.star_lists[self.ref_index])
-
+ self.ref_table = self.setup_ref_table_from_starlist(self.star_lists[self.ref_index],motion_model_used='Fixed')
# Save the reference index to the meta data on the reference list.
self.ref_table.meta['ref_list'] = self.ref_index
@@ -306,7 +325,7 @@ def fit(self):
self.ref_table.detections()
### Drop all stars that have 0 detections.
- idx = np.where(self.ref_table['n_detect'] == 0)[0]
+ idx = np.where((self.ref_table['n_detect'] == 0))[0]
print(' *** Getting rid of {0:d} out of {1:d} junk sources'.format(len(idx), len(self.ref_table)))
self.ref_table.remove_rows(idx)
@@ -328,6 +347,7 @@ def fit(self):
print("**********")
self.match_lists(self.dr_tol[-1], self.dm_tol[-1])
+ # Hard-coded not to keep ref values for MosaicSelfRef
self.update_ref_table_aggregates()
##########
@@ -342,13 +362,16 @@ def fit(self):
self.ref_table.detections()
### Drop all stars that have 0 detections.
- idx = np.where(self.ref_table['n_detect'] == 0)[0]
+ idx = np.where((self.ref_table['n_detect'] == 0))[0]
print(' *** Getting rid of {0:d} out of {1:d} junk sources'.format(len(idx), len(self.ref_table)))
self.ref_table.remove_rows(idx)
if self.iter_callback != None:
self.iter_callback(self.ref_table, nn)
+ if self.save_path:
+ with open(self.save_path, 'wb') as file:
+ pickle.dump(self, file)
return
def match_and_transform(self, ref_mag_lim, dr_tol, dm_tol, outlier_tol, trans_args):
@@ -386,7 +409,7 @@ def match_and_transform(self, ref_mag_lim, dr_tol, dm_tol, outlier_tol, trans_ar
# Only use "use_in_trans" reference stars, even for initial guessing.
keepers = np.where(ref_list['use_in_trans'] == True)[0]
- trans = trans_initial_guess(ref_list[keepers], star_list_orig_trim, self.trans_args[0],
+ trans = trans_initial_guess(ref_list[keepers], star_list_orig_trim, self.trans_args[0], self.motion_model_dict,
mode=self.init_guess_mode,
order=self.init_order,
verbose=self.verbose,
@@ -396,7 +419,7 @@ def match_and_transform(self, ref_mag_lim, dr_tol, dm_tol, outlier_tol, trans_ar
star_list_T.transform_xym(trans) # trimmed, transformed
else:
star_list_T.transform_xy(trans)
-
+
# Match stars between the transformed, trimmed lists.
idx1, idx2, dr, dm = match.match(star_list_T['x'], star_list_T['y'], star_list_T['m'],
ref_list['x'], ref_list['y'], ref_list['m'],
@@ -495,14 +518,22 @@ def match_and_transform(self, ref_mag_lim, dr_tol, dm_tol, outlier_tol, trans_ar
## Make plot, if desired
plots.trans_positions(ref_list, ref_list[idx_ref], star_list_T, star_list_T[idx_lis],
fileName='{0}'.format(star_list_T['t'][0]))
-
+
### Update the observed (but transformed) values in the reference table.
self.update_ref_table_from_list(star_list, star_list_T, ii, idx_ref, idx_lis, idx2)
-
- ### Update the "average" values to be used as the reference frame for the next list.
- if self.update_ref_orig != 'periter':
- self.update_ref_table_aggregates()
+ ### Update the "average" values to be used as the reference frame for the next list.
+ keep_ref_orig = (self.update_ref_orig==False) or (self.update_ref_orig=='atend') or (self.update_ref_orig=='periter' and ii<(len(self.star_lists)-1))
+ if keep_ref_orig and ii<(len(self.star_lists)-1):
+ keep_orig = np.where(self.ref_table['ref_orig'] | np.isnan(self.ref_table['x'][:,ii]))[0]
+ elif keep_ref_orig:
+ keep_orig = np.where(self.ref_table['ref_orig'])[0]
+ elif ii<(len(self.star_lists)-1):
+ keep_orig = np.where(np.isnan(self.ref_table['x'][:,ii]))[0]
+ else:
+ keep_orig=None
+ self.update_ref_table_aggregates(keep_orig=keep_orig)
+
# Print out some metrics
if self.verbose > 0:
msg1 = ' {0:2s} (mean and std) for {1:10s}: {2:8.5f} +/- {3:8.5f}'
@@ -557,14 +588,15 @@ def setup_trans_info(self):
return
- def setup_ref_table_from_starlist(self, star_list):
+ def setup_ref_table_from_starlist(self, star_list, motion_model_used=None):
"""
Start with the reference list.... this will change and grow
over time, so make a copy that we will keep updating.
- The reference table will contain one columne for every named
+ The reference table will contain one column for every named
array in the original reference star list.
"""
col_arrays = {}
+ motion_model_col_names = motion_model.get_all_motion_model_param_names(with_errors=True, with_fixed=True) + ['m0','m0_err','use_in_trans', 'motion_model_input', 'motion_model_used']
for col_name in star_list.colnames:
if col_name == 'name':
# The "name" column will be 1D; but we will also add a "name_in_list" column.
@@ -617,7 +649,7 @@ def setup_ref_table_from_starlist(self, star_list):
# just fill these tables with zeros. We need something
# in these columns in order for the error propagation to
# work later on.
- new_err_cols = ['x0e', 'y0e', 'm0e']
+ new_err_cols = ['x0_err', 'y0_err', 'm0_err']
orig_err_cols = ['xe', 'ye', 'me']
for ii in range(len(new_err_cols)):
# If the orig col name (e.g. xe) is in the ref_table, but the new col name
@@ -626,7 +658,6 @@ def setup_ref_table_from_starlist(self, star_list):
# Some munging to convert data shape from (N,1) to (N,),
# since these are all 1D cols
vals = np.transpose(np.array(ref_table[orig_err_cols[ii]]))[0]
-
# Now add to ref_table
new_col = Column(vals, name=new_err_cols[ii])
ref_table.add_column(new_col)
@@ -658,13 +689,20 @@ def setup_ref_table_from_starlist(self, star_list):
# Keep track of whether this is an original reference star.
col_ref_orig = Column(np.ones(len(ref_table), dtype=bool), name='ref_orig')
ref_table.add_column(col_ref_orig)
-
# Now reset the original values to invalids... they will be filled in
# at later times. Preserve content only in the columns: name, x0, y0, m0 (and 0e).
# Note that these are all the 1D columsn.
for col_name in ref_table.colnames:
if len(ref_table[col_name].data.shape) == 2: # Find the 2D columns
- ref_table._set_invalid_list_values(col_name, -1)
+ ref_table._set_invalid_list_values(col_name, -1)
+
+ if 'motion_model_input' not in ref_table.colnames:
+ ref_table.add_column(Column(np.repeat(self.default_motion_model, len(ref_table)), name='motion_model_input'))
+ if 'motion_model_used' not in ref_table.colnames:
+ if motion_model_used is None:
+ ref_table.add_column(Column(np.repeat(self.default_motion_model, len(ref_table)), name='motion_model_used'))
+ else:
+ ref_table.add_column(Column(np.repeat(motion_model_used, len(ref_table)), name='motion_model_used'))
return ref_table
@@ -771,7 +809,8 @@ def update_ref_table_from_list(self, star_list, star_list_T, ii, idx_ref, idx_li
self.ref_table['used_in_trans'][idx_ref_in_trans, ii] = True
### Add the unmatched stars and grow the size of the reference table.
- self.ref_table, idx_lis_new, idx_ref_new = add_rows_for_new_stars(self.ref_table, star_list, idx_lis)
+ self.ref_table, idx_lis_new, idx_ref_new = add_rows_for_new_stars(self.ref_table, star_list, idx_lis,
+ default_motion_model=self.default_motion_model)
if len(idx_ref_new) > 0:
if self.verbose > 0:
print(' Adding {0:d} new stars to the reference table.'.format(len(idx_ref_new)))
@@ -792,66 +831,62 @@ def update_ref_table_from_list(self, star_list, star_list_T, ii, idx_ref, idx_li
return
- def update_ref_table_aggregates(self, n_boot=0, weighting='var', use_scipy=True, absolute_sigma=False, show_progress=True):
+ def update_ref_table_aggregates(self, keep_orig=None, n_boot=0):
"""
Average positions or fit velocities.
Average magnitudes.
Calculate bootstrap errors if desired.
- Update the use_in_trans values as needed.
+ Update the use_in_trans values as needed. TODO: ????
Updates aggregate columns in self.ref_table in place.
"""
# Keep track of the original reference values.
# In certain cases, we will NOT update these.
- if not self.update_ref_orig:
- ref_orig_idx = np.where(self.ref_table['ref_orig'] == True)[0]
- x0_orig = self.ref_table['x0'][ref_orig_idx]
- y0_orig = self.ref_table['y0'][ref_orig_idx]
- m0_orig = self.ref_table['m0'][ref_orig_idx]
- x0e_orig = self.ref_table['x0e'][ref_orig_idx]
- y0e_orig = self.ref_table['y0e'][ref_orig_idx]
- m0e_orig = self.ref_table['m0e'][ref_orig_idx]
-
- if self.use_vel:
- vx_orig = self.ref_table['vx'][ref_orig_idx]
- vy_orig = self.ref_table['vy'][ref_orig_idx]
- vxe_orig = self.ref_table['vxe'][ref_orig_idx]
- vye_orig = self.ref_table['vye'][ref_orig_idx]
- t0_orig = self.ref_table['t0'][ref_orig_idx]
-
- if self.use_vel:
+ if keep_orig is not None:
+ vals_orig = {}
+ vals_orig['m0'] = self.ref_table['m0'][keep_orig]
+ vals_orig['m0_err'] = self.ref_table['m0_err'][keep_orig]
+ motion_model_class_names = self.ref_table['motion_model_input'].tolist()
+ if 'motion_model_used' in self.ref_table.keys():
+ motion_model_class_names += self.ref_table['motion_model_used'][keep_orig].tolist()
+ vals_orig['motion_model_used'] = self.ref_table['motion_model_used'][keep_orig]
+ motion_model_col_names = motion_model.get_list_motion_model_param_names(motion_model_class_names, with_errors=True, with_fixed=True)
+ for mm in motion_model_col_names:
+ if mm in self.ref_table.keys():
+ vals_orig[mm] = self.ref_table[mm][keep_orig]
+ fit_star_idxs = [idx for idx in range(len(self.ref_table)) if idx not in keep_orig]
+ else:
+ fit_star_idxs = None
+ #pdb.set_trace()
+ # Figure out whether motion fits are necessary
+ all_fixed = np.all(self.ref_table['motion_model_input']=='Fixed')
+ if all_fixed:
+ weighted_xy = ('xe' in self.ref_table.colnames) and ('ye' in self.ref_table.colnames)
+ weighted_m = ('me' in self.ref_table.colnames)
+ self.ref_table.combine_lists_xym(weighted_xy=weighted_xy, weighted_m=weighted_m)
+ else:
# Combine positions with a velocity fit.
- self.ref_table.fit_velocities(weighting=weighting, use_scipy=use_scipy, absolute_sigma=absolute_sigma, bootstrap=n_boot, verbose=self.verbose, show_progress=show_progress)
-
+ self.ref_table.fit_velocities(bootstrap=n_boot,
+ verbose=self.verbose,
+ show_progress=(self.verbose>0),
+ default_motion_model=self.default_motion_model,
+ select_stars=fit_star_idxs,
+ motion_model_dict=self.motion_model_dict,
+ weighting=self.vel_weights,
+ use_scipy=self.use_scipy,
+ absolute_sigma=self.absolute_sigma)
+
# Combine (transformed) magnitudes
if 'me' in self.ref_table.colnames:
weights_col = None
else:
weights_col = 'me'
-
self.ref_table.combine_lists('m', weights_col=weights_col, ismag=True)
- else:
- weighted_xy = ('xe' in self.ref_table.colnames) and ('ye' in self.ref_table.colnames)
- weighted_m = ('me' in self.ref_table.colnames)
-
- self.ref_table.combine_lists_xym(weighted_xy=weighted_xy, weighted_m=weighted_m)
-
# Replace the originals if we are supposed to keep them fixed.
- if not self.update_ref_orig:
- self.ref_table['x0'][ref_orig_idx] = x0_orig
- self.ref_table['y0'][ref_orig_idx] = y0_orig
- self.ref_table['m0'][ref_orig_idx] = m0_orig
- self.ref_table['x0e'][ref_orig_idx] = x0e_orig
- self.ref_table['y0e'][ref_orig_idx] = y0e_orig
- self.ref_table['m0e'][ref_orig_idx] = m0e_orig
-
- if self.use_vel:
- self.ref_table['vx'][ref_orig_idx] = vx_orig
- self.ref_table['vy'][ref_orig_idx] = vy_orig
- self.ref_table['vxe'][ref_orig_idx] = vxe_orig
- self.ref_table['vye'][ref_orig_idx] = vye_orig
- self.ref_table['t0'][ref_orig_idx] = t0_orig
+ if keep_orig is not None:
+ for val in vals_orig.keys():
+ self.ref_table[val][keep_orig] = vals_orig[val]
return
@@ -870,18 +905,18 @@ def get_weights_for_lists(self, ref_list, star_list):
var_xlis = 0.0
var_ylis = 0.0
- if self.weights != None:
- if self.weights == 'both,var':
+ if self.trans_weights != None:
+ if self.trans_weights == 'both,var':
weight = 1.0 / (var_xref + var_xlis + var_yref + var_ylis)
- if self.weights == 'both,std':
+ if self.trans_weights == 'both,std':
weight = 1.0 / np.sqrt(var_xref + var_xlis + var_yref + var_ylis)
- if self.weights == 'ref,var':
+ if self.trans_weights == 'ref,var':
weight = 1.0 / (var_xref + var_yref)
- if self.weights == 'ref,std':
+ if self.trans_weights == 'ref,std':
weight = 1.0 / np.sqrt(var_xref + var_yref)
- if self.weights == 'list,var':
+ if self.trans_weights == 'list,var':
weight = 1.0 / (var_xlis + var_ylis)
- if self.weights == 'list,std':
+ if self.trans_weights == 'list,std':
weight = 1.0 / np.sqrt(var_xlis, var_ylis)
else:
weight = None
@@ -923,12 +958,13 @@ def match_lists(self, dr_tol, dm_tol):
else:
star_list_T.transform_xy(self.trans_list[ii])
- xref, yref = get_pos_at_time(star_list_T['t'][0], self.ref_table, use_vel=self.use_vel) # optional velocity propogation.
+ xref, yref = get_pos_at_time(star_list_T['t'][0], self.ref_table, self.motion_model_dict)
mref = self.ref_table['m0']
idx_lis, idx_ref, dr, dm = match.match(star_list_T['x'], star_list_T['y'], star_list_T['m'],
xref, yref, mref,
dr_tol=dr_tol, dm_tol=dm_tol, verbose=self.verbose)
+
if self.verbose > 0:
fmt = 'Matched {0:5d} out of {1:5d} stars in list {2:2d} [dr = {3:7.4f} +/- {4:6.4f}, dm = {5:5.2f} +/- {6:4.2f}'
print(fmt.format(len(idx_lis), len(star_list_T), ii, dr.mean(), dr.std(), dm.mean(), dm.std()))
@@ -955,36 +991,24 @@ def get_ref_list_from_table(self, epoch):
# Reference stars will be named.
name = self.ref_table['name']
- if self.use_vel and ('vx' in self.ref_table.colnames):
- # First check if we should use velocities and if they exist.
- dt = epoch - self.ref_table['t0']
- x = self.ref_table['x0'] + (self.ref_table['vx'] * dt)
- y = self.ref_table['y0'] + (self.ref_table['vy'] * dt)
-
- xe = np.hypot(self.ref_table['x0e'], self.ref_table['vxe']*dt)
- ye = np.hypot(self.ref_table['y0e'], self.ref_table['vye']*dt)
-
- idx = np.where(np.isfinite(self.ref_table['vx']) == False)[0]
- x[idx] = self.ref_table['x0'][idx]
- y[idx] = self.ref_table['y0'][idx]
- xe[idx] = self.ref_table['x0e'][idx]
- ye[idx] = self.ref_table['y0e'][idx]
+ if ('motion_model_used' in self.ref_table.colnames):
+ x,y,xe,ye = self.ref_table.get_star_positions_at_time(epoch, self.motion_model_dict, allow_alt_models=True)
else:
# No velocities... just used average positions.
x = self.ref_table['x0']
y = self.ref_table['y0']
- if 'x0e' in self.ref_table.colnames:
- xe = self.ref_table['x0e']
- ye = self.ref_table['y0e']
+ if 'x0_err' in self.ref_table.colnames:
+ xe = self.ref_table['x0_err']
+ ye = self.ref_table['y0_err']
else:
xe = None
ye = None
m = self.ref_table['m0']
- if 'm0e' in self.ref_table.colnames:
- me = self.ref_table['m0e']
+ if 'm0_err' in self.ref_table.colnames:
+ me = self.ref_table['m0_err']
else:
me = None
@@ -1026,7 +1050,7 @@ def reset_ref_values(self, exclude=None):
return
- def calc_bootstrap_errors(self, n_boot=100, boot_epochs_min=-1, calc_vel_in_bootstrap=True, weighting='var', use_scipy=True, absolute_sigma=False, show_progress=True):
+ def calc_bootstrap_errors(self, n_boot=100, boot_epochs_min=-1, calc_vel_in_bootstrap=True, weighting='var', use_scipy=True, absolute_sigma=False, show_progress=True, update_errors=False):
"""
Function to calculate bootstrap errors for the transformations as well
as the proper motions. For each iteration, this will:
@@ -1071,25 +1095,21 @@ def calc_bootstrap_errors(self, n_boot=100, boot_epochs_min=-1, calc_vel_in_boot
'var' or 'std' weighting for velocity fitting, by default 'var'. If 'var', use the variance of the residuals to weight the fit.
If 'std', use the standard deviation of the residuals to weight the fit.
- use_scipy: boolean
- If True, use scipy.optimize.curve_fit to fit the velocity. If False, use flystar.fit_velocity.linear_fit, by default True.
-
absolute_sigma: boolean
If True, use the absolute sigma in the velocity fitting. If False, use the relative sigma, by default False.
+ update_errors: boolean
+ If True, save the starlist errors as xe_list, bootstrap errors as xe_boot, and their quad sum as xe (and likewise for ye and me). If False (default), leave the starlist errors in place as xe and bootstrap errors as xe_boot.
Output:
------
- Seven new columns will be added to self.ref_table:
+ New columns will be added to self.ref_table:
'xe_boot', 2D column: bootstrap x pos uncertainties due to transformation for each epoch
'ye_boot', 2D column: bootstrap y pos uncertainties due to transformation for each epoch
'me_boot', 2D column: bootstrap mag uncertainties due to transformation for each epoch
If calc_vel_in_bootstrap:
- 'x0e_boot', 1D column: bootstrap uncertainties in x0 for PM fit
- 'y0e_boot', 1D column: bootstrap uncertainties in y0 for PM fit
- 'vxe_boot', 1D column: bootstrap uncertainties in vx for PM fit
- 'vye_boot', 1D column: bootstrap uncertainties in vy for PM fit
+ '_err_boot', 1D column: bootstrap uncertainties in for motion model fit
For stars that fail boot_epochs_min criteria, np.nan is used
"""
@@ -1110,20 +1130,32 @@ def calc_bootstrap_errors(self, n_boot=100, boot_epochs_min=-1, calc_vel_in_boot
t0_arr = t0_arr[idx_good]
else:
idx_good = np.arange(0, len(ref_table), 1)
- idx_ref = np.where(ref_table['use_in_trans'] == True)
-
- # Initialize output arrays
- x_trans_arr = np.ones((len(ref_table['x']), n_boot, n_epochs)) * -999
- y_trans_arr = np.ones((len(ref_table['x']), n_boot, n_epochs)) * -999
- m_trans_arr = np.ones((len(ref_table['x']), n_boot, n_epochs)) * -999
- xe_trans_arr = np.ones((len(ref_table['x']), n_boot, n_epochs)) * -999
- ye_trans_arr = np.ones((len(ref_table['x']), n_boot, n_epochs)) * -999
- me_trans_arr = np.ones((len(ref_table['x']), n_boot, n_epochs)) * -999
+
+ #idx_ref = np.where(ref_table['use_in_trans'] == True)
+
+ # Initialize sums for output
+ x_boot_sum = np.zeros((len(ref_table['x']), n_epochs))
+ x2_boot_sum = np.zeros((len(ref_table['x']), n_epochs))
+ y_boot_sum = np.zeros((len(ref_table['x']), n_epochs))
+ y2_boot_sum = np.zeros((len(ref_table['x']), n_epochs))
+ m_boot_sum = np.zeros((len(ref_table['x']), n_epochs))
+ m2_boot_sum = np.zeros((len(ref_table['x']), n_epochs))
+
+ # Set up motion model parameters
+ motion_model_list = ['Fixed', self.default_motion_model]
+ if 'motion_model_used' in ref_table.keys():
+ motion_model_list += ref_table['motion_model_used'].tolist()
+ elif 'motion_model_input' in ref_table.keys():
+ motion_model_list += ref_table['motion_model_input'].tolist()
+ motion_col_list = motion_model.get_list_motion_model_param_names(np.unique(motion_model_list).tolist(), with_errors=False, with_fixed=False)
if calc_vel_in_bootstrap:
- x0_arr = np.ones((len(ref_table['x']), n_boot)) * -999
- y0_arr = np.ones((len(ref_table['x']), n_boot)) * -999
- vx_arr = np.ones((len(ref_table['x']), n_boot)) * -999
- vy_arr = np.ones((len(ref_table['x']), n_boot)) * -999
+ motion_boot_sum = {}
+ motion2_boot_sum = {}
+ for col in motion_col_list:
+ motion_boot_sum[col] = np.zeros((len(ref_table['x'])))
+ motion2_boot_sum[col] = np.zeros((len(ref_table['x'])))
+ motion_boot_min_epochs = np.max([self.motion_model_dict[mod].n_pts_req
+ for mod in np.unique(motion_model_list)])
### IF MEMORY PROBLEMS HERE:
### DEFINE MEAN, STD VARIABLES AND BUILD THEM RATHER THAN SAVING FULL ARRAY
@@ -1135,44 +1167,58 @@ def calc_bootstrap_errors(self, n_boot=100, boot_epochs_min=-1, calc_vel_in_boot
# reference stars. Use a loop for each epoch here, so we
# can handle case where different reference stars are used
# in different epochs
+
+ # Initialize data arrays
+ x_trans_arr = np.ones((len(ref_table['x']), n_epochs)) * -999
+ y_trans_arr = np.ones((len(ref_table['x']), n_epochs)) * -999
+ m_trans_arr = np.ones((len(ref_table['x']), n_epochs)) * -999
+ xe_trans_arr = np.ones((len(ref_table['x']), n_epochs)) * -999
+ ye_trans_arr = np.ones((len(ref_table['x']), n_epochs)) * -999
+ me_trans_arr = np.ones((len(ref_table['x']), n_epochs)) * -999
+
for jj in range(n_epochs):
- # Extract bootstrap sample of matched reference stars
- good = np.where(~np.isnan(ref_table['x_orig'][idx_ref][:,jj]))
+ # Extract bootstrap sample of matched reference stars for this epoch
+ #good = np.where(~np.isnan(ref_table['x_orig'][idx_ref][:,jj]))
+ good = np.where( (ref_table['used_in_trans'][:,jj] == True) & (~np.isnan(ref_table['x_orig'][:,jj])) )
samp_idx = np.random.choice(good[0], len(good[0]), replace=True)
# Get reference star positions in particular epoch from ref_list.
t_epoch = t_arr[jj]
- ref_orig = self.get_ref_list_from_table(t_epoch)
-
- # Get idx of reference stars in bootstrap sample in the ref_orig.
- # Then, use these to build reference starlist for the alignment
- idx_tmp = []
- for ff in range(len(samp_idx)):
- name_tmp = ref_table['name'][samp_idx[ff]]
- foo = np.where(ref_orig['name'] == name_tmp)[0][0]
- idx_tmp.append(foo)
-
- ref_boot = StarList(name=ref_orig['name'][idx_tmp],
- x=ref_orig['x'][idx_tmp],
- y=ref_orig['y'][idx_tmp],
- m=ref_orig['m'][idx_tmp],
- xe=ref_orig['xe'][idx_tmp],
- ye=ref_orig['ye'][idx_tmp],
- me=ref_orig['me'][idx_tmp])
+ ref_orig = self.get_ref_list_from_table(t_epoch)[idx_good]
+
+ ## Get idx of reference stars in bootstrap sample in the ref_orig.
+ ## Then, use these to build reference starlist for the alignment
+ #idx_tmp = []
+ #for ff in range(len(samp_idx)):
+ # name_tmp = ref_table['name'][idx_ref][samp_idx[ff]]
+ # foo = np.where(ref_orig['name'] == name_tmp)[0][0]
+ # idx_tmp.append(foo)
+
+ ref_boot = StarList(name=ref_orig['name'][samp_idx],
+ x=ref_orig['x'][samp_idx],
+ y=ref_orig['y'][samp_idx],
+ m=ref_orig['m'][samp_idx],
+ xe=ref_orig['xe'][samp_idx],
+ ye=ref_orig['ye'][samp_idx],
+ me=ref_orig['me'][samp_idx])
# Now build star list with original positions of the reference stars
# in the bootstrap sample
- starlist_boot = StarList(name=ref_table['name'][idx_ref][samp_idx],
- x=ref_table['x_orig'][:,jj][idx_ref][samp_idx],
- y=ref_table['y_orig'][:,jj][idx_ref][samp_idx],
- m=ref_table['m_orig'][:,jj][idx_ref][samp_idx],
- xe=ref_table['xe_orig'][:,jj][idx_ref][samp_idx],
- ye=ref_table['ye_orig'][:,jj][idx_ref][samp_idx],
- me=ref_table['me_orig'][:,jj][idx_ref][samp_idx])
-
+ starlist_boot = StarList(name=ref_table['name'][samp_idx],
+ x=ref_table['x_orig'][:,jj][samp_idx],
+ y=ref_table['y_orig'][:,jj][samp_idx],
+ m=ref_table['m_orig'][:,jj][samp_idx],
+ xe=ref_table['xe_orig'][:,jj][samp_idx],
+ ye=ref_table['ye_orig'][:,jj][samp_idx],
+ me=ref_table['me_orig'][:,jj][samp_idx])
+
+ # Sanity check: makes sure names match between ref_boot and starlist_boot,
+ # since they need to line up
+ assert np.all(ref_boot['name'] == starlist_boot['name'])
+
# Calculate weights based on weights keyword. If weights desired, will need to
# make starlist objects for this
- if self.weights != None:
+ if self.trans_weights != None:
# In order for weights calculation to work, we need to apply a transformation
# to the star_list_T so it is in the same units as ref_boot. So, we'll apply
# the final transformation for the epoch to get close enough for the
@@ -1193,6 +1239,8 @@ def calc_bootstrap_errors(self, n_boot=100, boot_epochs_min=-1, calc_vel_in_boot
self.trans_args[0]['order'],
m=starlist_boot['m'], mref=ref_boot['m'],
weights=weight, mag_trans=self.mag_trans)
+ #print(jj)
+ #pdb.set_trace()
# Apply transformation to *all* orig positions in this epoch. Need to make a new
# FLYSTAR starlist object with the original positions for this. We don't
@@ -1211,13 +1259,21 @@ def calc_bootstrap_errors(self, n_boot=100, boot_epochs_min=-1, calc_vel_in_boot
starlist_T.transform_xy(trans)
# Add output to pos arrays
- x_trans_arr[:,ii,jj] = starlist_T['x']
- y_trans_arr[:,ii,jj] = starlist_T['y']
- m_trans_arr[:,ii,jj] = starlist_T['m']
- xe_trans_arr[:,ii,jj] = starlist_T['xe']
- ye_trans_arr[:,ii,jj] = starlist_T['ye']
- me_trans_arr[:,ii,jj] = starlist_T['me']
-
+ x_trans_arr[:,jj] = starlist_T['x']
+ y_trans_arr[:,jj] = starlist_T['y']
+ m_trans_arr[:,jj] = starlist_T['m']
+ xe_trans_arr[:,jj] = starlist_T['xe']
+ ye_trans_arr[:,jj] = starlist_T['ye']
+ me_trans_arr[:,jj] = starlist_T['me']
+
+ x_boot_sum += x_trans_arr
+ x2_boot_sum += x_trans_arr**2
+ y_boot_sum += y_trans_arr
+ y2_boot_sum += y_trans_arr**2
+ if self.mag_trans:
+ m_boot_sum += m_trans_arr
+ m2_boot_sum += m_trans_arr**2
+
t2 = time.time()
#print('=================================================')
#print('Time to do {0} epochs: {1}s'.format(n_epochs, t2-t1))
@@ -1228,26 +1284,35 @@ def calc_bootstrap_errors(self, n_boot=100, boot_epochs_min=-1, calc_vel_in_boot
# for each star, and then run it through the startable fit_velocities machinery
if calc_vel_in_bootstrap:
boot_idx = np.random.choice(np.arange(0, n_epochs, 1), size=n_epochs)
+ while len(np.unique(boot_idx)) < motion_boot_min_epochs:
+ boot_idx = np.random.choice(np.arange(0, n_epochs, 1), size=n_epochs)
t_boot = t_arr[boot_idx]
star_table = StarTable(name=ref_table['name'],
- x=x_trans_arr[:,ii,boot_idx],
- y=y_trans_arr[:,ii,boot_idx],
- m=m_trans_arr[:,ii,boot_idx],
- xe=xe_trans_arr[:,ii,boot_idx],
- ye=ye_trans_arr[:,ii,boot_idx],
- me=me_trans_arr[:,ii,boot_idx],
- t=np.tile(t_boot, (len(ref_table),1)) )
+ x=x_trans_arr[:,boot_idx],
+ y=y_trans_arr[:,boot_idx],
+ m=m_trans_arr[:,boot_idx],
+ xe=xe_trans_arr[:,boot_idx],
+ ye=ye_trans_arr[:,boot_idx],
+ me=me_trans_arr[:,boot_idx],
+ t=np.tile(t_boot, (len(ref_table),1)))
+ if 'motion_model_used' in ref_table.columns:
+ star_table['motion_model_input'] = ref_table['motion_model_used']
# Now, do proper motion calculation, making sure to fix t0 to the
# orig value (so we can get a reasonable error on x0, y0)
- star_table.fit_velocities(weighting=weighting, use_scipy=use_scipy, absolute_sigma=absolute_sigma, fixed_t0=t0_arr, show_progress=show_progress)
+ star_table.fit_velocities(
+ fixed_t0=t0_arr,
+ default_motion_model=self.default_motion_model,
+ motion_model_dict=self.motion_model_dict,
+ use_scipy=self.use_scipy,
+ absolute_sigma=self.absolute_sigma
+ )
# Save proper motion fit results to output arrays
- x0_arr[:,ii] = star_table['x0']
- y0_arr[:,ii] = star_table['y0']
- vx_arr[:,ii] = star_table['vx']
- vy_arr[:,ii] = star_table['vy']
+ for col in motion_col_list:
+ motion_boot_sum[col] += star_table[col]
+ motion2_boot_sum[col] += star_table[col]**2
# Quick check to make sure bootstrap calc was valid: output t0 should be
# same as input t0_arr, since we used fixed_t0 option
@@ -1259,27 +1324,29 @@ def calc_bootstrap_errors(self, n_boot=100, boot_epochs_min=-1, calc_vel_in_boot
#print('=================================================')
# Calculate the bootstrap error values.
- x_err_b = np.std(x_trans_arr, ddof=1, axis=1)
- y_err_b = np.std(y_trans_arr, ddof=1, axis=1)
- m_err_b = np.std(m_trans_arr, ddof=1, axis=1)
-
+ x_boot_mean = x_boot_sum/n_boot
+ x_err_b = np.sqrt((x2_boot_sum - 2*x_boot_mean*x_boot_sum + n_boot*x_boot_mean**2)/n_boot)
+ y_boot_mean = y_boot_sum/n_boot
+ y_err_b = np.sqrt((y2_boot_sum - 2*y_boot_mean*y_boot_sum + n_boot*y_boot_mean**2)/n_boot)
+ m_boot_mean = m_boot_sum/n_boot
+ m_err_b = np.sqrt((m2_boot_sum - 2*m_boot_mean*m_boot_sum + n_boot*m_boot_mean**2)/n_boot)
+
+ motion_data_err = {}
if calc_vel_in_bootstrap:
- x0_err_b = np.std(x0_arr, ddof=1, axis=1)
- y0_err_b = np.std(y0_arr, ddof=1, axis=1)
- vx_err_b = np.std(vx_arr, ddof=1, axis=1)
- vy_err_b = np.std(vy_arr, ddof=1, axis=1)
+ for col in motion_col_list:
+ mot_boot_mean = motion_boot_sum[col]/n_boot
+ motion_data_err[col] = np.sqrt((motion2_boot_sum[col] -
+ 2*mot_boot_mean*motion_boot_sum[col] + n_boot*mot_boot_mean**2)/n_boot)
else:
- x0_err_b = np.nan
- y0_err_b = np.nan
- vx_err_b = np.nan
- vy_err_b = np.nan
+ for col in motion_col_list:
+ motion_data_err[col] = np.nan
# Add summary statistics to *original* ref_table, i.e. ref_table
# hanging off of mosaic object.
col_heads_2D = ['xe_boot', 'ye_boot', 'me_boot']
- data_dict = {'xe_boot': x_err_b, 'ye_boot': y_err_b, 'me_boot': m_err_b,
- 'x0e_boot': x0_err_b, 'y0e_boot': y0_err_b,
- 'vxe_boot': vx_err_b, 'vye_boot': vy_err_b}
+ data_dict = {'xe_boot': x_err_b, 'ye_boot': y_err_b, 'me_boot': m_err_b}
+ for col in motion_col_list:
+ data_dict[col+'_err_boot'] = motion_data_err[col]
for ff in col_heads_2D:
col = Column(np.ones((len(self.ref_table), n_epochs)), name=ff)
@@ -1287,10 +1354,23 @@ def calc_bootstrap_errors(self, n_boot=100, boot_epochs_min=-1, calc_vel_in_boot
col[idx_good] = data_dict[ff]
self.ref_table.add_column(col)
+
+ # Calculate chi^2 with bootstrap positional errors
+ x_pred, y_pred, _, _ = self.ref_table.get_star_positions_at_time(t_arr, self.motion_model_dict, allow_alt_models=True)
+ xe_comb = np.hypot(self.ref_table['xe'], self.ref_table['xe_boot'])
+ ye_comb = np.hypot(self.ref_table['ye'], self.ref_table['ye_boot'])
+ data_dict['chi2_x_boot'] = np.nansum((self.ref_table['x']-x_pred)**2/(xe_comb)**2,axis=1)
+ data_dict['chi2_y_boot'] = np.nansum((self.ref_table['y']-y_pred)**2/(ye_comb)**2,axis=1)
+ for ff in ['chi2_x_boot', 'chi2_y_boot']:
+ col = Column(np.ones(len(self.ref_table)), name=ff)
+ col.fill(np.nan)
+
+ col[idx_good] = data_dict[ff][idx_good]
+ self.ref_table.add_column(col)
# Now handle the velocities, if they were calculated
if calc_vel_in_bootstrap:
- col_heads_1D = [ 'x0e_boot', 'y0e_boot', 'vxe_boot', 'vye_boot']
+ col_heads_1D = [col+'_err_boot' for col in motion_col_list]
for ff in col_heads_1D:
col = Column(np.ones(len(self.ref_table)), name=ff)
@@ -1298,11 +1378,23 @@ def calc_bootstrap_errors(self, n_boot=100, boot_epochs_min=-1, calc_vel_in_boot
col[idx_good] = data_dict[ff]
self.ref_table.add_column(col)
-
+ #pdb.set_trace()
+
print('===============================')
print('Done with bootstrap')
print('===============================')
+ if update_errors:
+ self.ref_table['xe_list'] = self.ref_table['xe']
+ self.ref_table['ye_list'] = self.ref_table['ye']
+ self.ref_table['me_list'] = self.ref_table['me']
+ self.ref_table['xe'] = np.hypot(self.ref_table['xe_list'], self.ref_table['xe_boot'])
+ self.ref_table['ye'] = np.hypot(self.ref_table['ye_list'], self.ref_table['ye_boot'])
+ self.ref_table['me'] = np.hypot(self.ref_table['me_list'], self.ref_table['me_boot'])
+ print("Saved starlist errors to xe_list and added xe_boot to xe in quadrature.")
+ print("The same was done for ye and me.")
+
+
return
@@ -1313,14 +1405,19 @@ def __init__(self, ref_list, list_of_starlists, iters=2,
trans_args=[{'order': 2}, {'order': 2}],
init_order=1,
mag_trans=True, mag_lim=None, ref_mag_lim=None,
- weights=None,
+ trans_weights=None, vel_weights='var',
trans_input=None,
trans_class=transforms.PolyTransform,
calc_trans_inverse=False,
use_ref_new=False,
- use_vel=False, update_ref_orig=False,
+ update_ref_orig=False,
init_guess_mode='miracle',
iter_callback=None,
+ default_motion_model='Fixed',
+ motion_model_dict={},
+ use_scipy=True,
+ absolute_sigma=False,
+ save_path=None,
verbose=True):
"""
@@ -1368,7 +1465,7 @@ def __init__(self, ref_list, list_of_starlists, iters=2,
magnitudes in each list to bring them into a common magnitude system. This is
essential for matching (with finite dm_tol) starlists of different filters or
starlists that are not photometrically calibrated. Note that the final_table columns
- of 'm', 'm0', and 'm0e' will contain the transformed magnitudes while the
+ of 'm', 'm0', and 'm0_err' will contain the transformed magnitudes while the
final_table column 'm_orig' will contain the original un-transformed magnitudes.
If mag_trans = False, then no such zeropoint offset it applied at any point.
@@ -1382,11 +1479,15 @@ def __init__(self, ref_list, list_of_starlists, iters=2,
If different from None, it indicates the minimum and maximum magnitude
on the reference catalog for finding the transformations.
- weights : str
+ trans_weights : str
Either None (def), 'both,var', 'list,var', or 'ref,var' depending on whether you want
to weight by the positional uncertainties (variances) in the individual starlists, or also with
the uncertainties in the reference frame itself. Note weighting only works when there
are positional uncertainties availabe. Other options include 'both,std', 'list,std', 'list,var'.
+
+ vel_weights : str
+ Either 'var' (def) or 'std', depending on whether you want to weight the motion model
+ fits by the variance or standard deviation of the position data
trans_input : array or list of transform objects
def = None. If not None, then this should contain an array or list of transform
@@ -1431,13 +1532,7 @@ def = None. If not None, then this should contain an array or list of transform
necessarily want to use these in the reference frame in subsequent passes.
If True, then the new stars will be used in later passes/iterations.
If False, then the new stars will be carried, but not used in the transformation.
- We determine which stars to use through setting a boolean use_in_trans flag.
-
- use_vel : boolean
- If velocities are present in the reference list and use_vel == True, then during
- each iteration of the alignment, the reference list will be propogated in time
- using the velocity information. So all transformations will be derived w.r.t.
- the propogated positions. See also update_vel.
+ We determine which stars to use through setting a boolean use_in_trans flag.
init_guess_mode : string
If no initial transformations are passed in via the trans_input keyword, then we have
@@ -1446,7 +1541,22 @@ def = None. If not None, then this should contain an array or list of transform
iter_callback : None or function
A function to call (that accepts a StarTable object and an iteration number)
- at the end of every iteration. This can be used for plotting or printing state.
+ at the end of every iteration. This can be used for plotting or printing state.
+
+ default_motion_model : string
+ Name of motion model to use for new or unassigned stars
+
+ motion_model_dict : None or dict
+ Dict of motion model name keys (strings) and corresponding MotionModel object values
+
+ use_scipy : bool, optional
+ If True, use scipy.optimize.curve_fit for velocity fitting. If False, use linear algebra fitting, by default True.
+
+ absolute_sigma : bool, optional
+ If True, the velocity fit will use absolute errors in the data. If False, relative errors will be used, by default False.
+
+ save_path : str, optional
+ Path to save the MosaicToRef object as a pickle file.
Example
----------
@@ -1455,7 +1565,6 @@ def = None. If not None, then this should contain an array or list of transform
outlier_tol=[None], mag_lim=[13, 21],
trans_class=transforms.PolyTransform,
trans_args=[{'order': 1}],
- use_vel=True,
use_ref_new=False,
update_ref_orig=False,
mag_trans=False,
@@ -1484,13 +1593,17 @@ def = None. If not None, then this should contain an array or list of transform
dr_tol=dr_tol, dm_tol=dm_tol,
outlier_tol=outlier_tol, trans_args=trans_args,
init_order=init_order,
- mag_trans=mag_trans, mag_lim=mag_lim, weights=weights,
+ mag_trans=mag_trans, mag_lim=mag_lim,
+ trans_weights=trans_weights, vel_weights=vel_weights,
trans_input=trans_input, trans_class=trans_class,
- calc_trans_inverse=calc_trans_inverse, use_vel=use_vel,
+ calc_trans_inverse=calc_trans_inverse,
+ default_motion_model = default_motion_model,
init_guess_mode=init_guess_mode,
iter_callback=iter_callback,
- verbose=verbose)
-
+ motion_model_dict=motion_model_dict,
+ verbose=verbose, use_scipy=use_scipy,
+ absolute_sigma=absolute_sigma, save_path=save_path)
+
self.ref_list = copy.deepcopy(ref_list)
self.ref_mag_lim = ref_mag_lim
self.update_ref_orig = update_ref_orig
@@ -1500,15 +1613,19 @@ def = None. If not None, then this should contain an array or list of transform
if ('x' not in self.ref_list.colnames) and ('x0' in self.ref_list.colnames):
self.ref_list['x'] = self.ref_list['x0']
self.ref_list['y'] = self.ref_list['y0']
- if ('xe' not in self.ref_list.colnames) and ('x0e' in self.ref_list.colnames):
- self.ref_list['xe'] = self.ref_list['x0e']
- self.ref_list['ye'] = self.ref_list['y0e']
+ if ('xe' not in self.ref_list.colnames) and ('x0_err' in self.ref_list.colnames):
+ self.ref_list['xe'] = self.ref_list['x0_err']
+ self.ref_list['ye'] = self.ref_list['y0_err']
if ('m' not in self.ref_list.colnames) and ('m0' in self.ref_list.colnames):
self.ref_list['m'] = self.ref_list['m0']
- if ('me' not in self.ref_list.colnames) and ('m0e' in self.ref_list.colnames):
- self.ref_list['me'] = self.ref_list['m0e']
+ if ('me' not in self.ref_list.colnames) and ('m0_err' in self.ref_list.colnames):
+ self.ref_list['me'] = self.ref_list['m0_err']
if ('t' not in self.ref_list.colnames) and ('t0' in self.ref_list.colnames):
self.ref_list['t'] = self.ref_list['t0']
+
+ # Make sure the motion models are ready
+ self.motion_model_dict = motion_model.validate_motion_model_dict(self.motion_model_dict,
+ self.ref_list, self.default_motion_model)
return
@@ -1531,10 +1648,7 @@ def fit(self):
x0e
y0e
m0e
- vx (only if use_vel=True)
- vy (only if use_vel=True)
- vxe (only if use_vel=True)
- vye (only if use_vel=True)
+ addl. motion_model parameters
"""
# Create a log file of the parameters used in the fit.
@@ -1548,12 +1662,13 @@ def fit(self):
logger(_log, ' mag_trans = ' + str(self.mag_trans), self.verbose)
logger(_log, ' mag_lim = ' + str(self.mag_lim), self.verbose)
logger(_log, ' ref_mag_lim = ' + str(self.ref_mag_lim), self.verbose)
- logger(_log, ' weights = ' + str(self.weights), self.verbose)
+ logger(_log, ' trans_weights = ' + str(self.trans_weights), self.verbose)
+ logger(_log, ' vel_weights = ' + str(self.vel_weights), self.verbose)
logger(_log, ' trans_input = ' + str(self.trans_input), self.verbose)
logger(_log, ' trans_class = ' + str(self.trans_class), self.verbose)
logger(_log, ' calc_trans_inverse = ' + str(self.calc_trans_inverse), self.verbose)
logger(_log, ' use_ref_new = ' + str(self.use_ref_new), self.verbose)
- logger(_log, ' use_vel = ' + str(self.use_vel), self.verbose)
+ logger(_log, ' default_motion_model = ' + str(self.default_motion_model), self.verbose)
logger(_log, ' update_ref_orig = ' + str(self.update_ref_orig), self.verbose)
logger(_log, ' init_guess_mode = ' + str(self.init_guess_mode), self.verbose)
logger(_log, ' iter_callback = ' + str(self.iter_callback), self.verbose)
@@ -1568,16 +1683,6 @@ def fit(self):
# w, w_orig (optiona) -- the input and output weights of stars in transform: 2D
##########
self.ref_table = self.setup_ref_table_from_starlist(self.ref_list)
-
- # copy over velocities if they exist in the reference list
- if 'vx' in self.ref_list.colnames:
- self.ref_table['vx'] = self.ref_list['vx']
- self.ref_table['vy'] = self.ref_list['vy']
- self.ref_table['t0'] = self.ref_list['t0']
- if 'vxe' in self.ref_list.colnames:
- self.ref_table['vxe'] = self.ref_list['vxe']
- self.ref_table['vye'] = self.ref_list['vye']
-
##########
#
@@ -1604,13 +1709,13 @@ def fit(self):
self.match_and_transform(self.ref_mag_lim,
self.dr_tol[nn], self.dm_tol[nn], self.outlier_tol[nn],
self.trans_args[nn])
-
+
# Clean up the reference table
# Find where stars are detected.
self.ref_table.detections()
### Drop all stars that have 0 detections.
- idx = np.where((self.ref_table['n_detect'] == 0) & (self.ref_table['ref_orig'] == False))[0]
+ idx = np.where((self.ref_table['n_detect'] == 0))[0] # & (self.ref_table['ref_orig'] == False))[0]
if self.verbose > 0:
print(' *** Getting rid of {0:d} out of {1:d} junk sources'.format(len(idx), len(self.ref_table)))
self.ref_table.remove_rows(idx)
@@ -1622,7 +1727,7 @@ def fit(self):
#
# Re-do all matching given final transformations.
# No trimming this time.
- # First rest the reference table 2D values.
+ # First reset the reference table 2D values.
##########
self.reset_ref_values(exclude=['used_in_trans'])
@@ -1632,7 +1737,12 @@ def fit(self):
print("**********")
self.match_lists(self.dr_tol[-1], self.dm_tol[-1])
- self.update_ref_table_aggregates()
+ keep_ref_orig = (self.update_ref_orig==False)
+ if keep_ref_orig:
+ keep_orig = np.where(self.ref_table['ref_orig'])[0]
+ else:
+ keep_orig=None
+ self.update_ref_table_aggregates(keep_orig=keep_orig)
##########
# Clean up output table.
@@ -1646,12 +1756,16 @@ def fit(self):
self.ref_table.detections()
### Drop all stars that have 0 detections.
- idx = np.where(self.ref_table['n_detect'] == 0)[0]
+ idx = np.where((self.ref_table['n_detect'] == 0) & (self.ref_table['ref_orig'] == False))[0]
print(' *** Getting rid of {0:d} out of {1:d} junk sources'.format(len(idx), len(self.ref_table)))
self.ref_table.remove_rows(idx)
if self.iter_callback != None:
self.iter_callback(self.ref_table, nn)
+
+ if self.save_path:
+ with open(self.save_path, 'wb') as file:
+ pickle.dump(self, file)
return
def get_all_epochs(t):
@@ -1686,6 +1800,7 @@ def setup_ref_table_from_starlist(star_list):
array in the original reference star list.
"""
col_arrays = {}
+ motion_model_col_names = motion_model.get_all_motion_model_param_names(with_errors=True)
for col_name in star_list.colnames:
if col_name == 'name':
# The "name" column will be 1D; but we will also add a "name_in_list" column.
@@ -1720,7 +1835,7 @@ def setup_ref_table_from_starlist(star_list):
# Make sure ref_table has the necessary x0, y0, m0 and associated
# error columns. If they don't exist, then add them as a copy of
# the original x,y,m etc columns.
- new_cols_arr = ['x0', 'x0e', 'y0', 'y0e', 'm0', 'm0e']
+ new_cols_arr = ['x0', 'x0_err', 'y0', 'y0_err', 'm0', 'm0_err']
orig_cols_arr = ['x', 'xe', 'y', 'ye', 'm', 'me']
assert len(new_cols_arr) == len(orig_cols_arr)
ref_cols = ref_table.keys()
@@ -1738,7 +1853,7 @@ def setup_ref_table_from_starlist(star_list):
if 'use_in_trans' not in ref_table.colnames:
new_col = Column(np.ones(len(ref_table), dtype=bool), name='use_in_trans')
ref_table.add_column(new_col)
-
+
# Now reset the original values to invalids... they will be filled in
# at later times. Preserve content only in the columns: name, x0, y0, m0 (and 0e).
# Note that these are all the 1D columsn.
@@ -1769,7 +1884,7 @@ def copy_over_values(ref_table, star_list, star_list_T, idx_epoch, idx_ref, idx_
idx_ref : list or array
The indices into the ref_table where values are copied to.
idx_lis : list or array
- The indices into the star_list or star_lsit_T where values are copied from.
+ The indices into the star_list or star_list_T where values are copied from.
"""
for col_name in ref_table.colnames:
if col_name in star_list_T.colnames:
@@ -1799,7 +1914,7 @@ def reset_ref_values(ref_table):
return
-def add_rows_for_new_stars(ref_table, star_list, idx_lis):
+def add_rows_for_new_stars(ref_table, star_list, idx_lis, default_motion_model='Fixed'):
"""
For each star that is in star_list and NOT in idx_list, make a
new row in the reference table. The values will be empty (None, NAN, etc.).
@@ -1843,6 +1958,10 @@ def add_rows_for_new_stars(ref_table, star_list, idx_lis):
new_col_empty = -1
elif ref_table[col_name].dtype == np.dtype('bool'):
new_col_empty = False
+ elif col_name=='motion_model_input':
+ new_col_empty = default_motion_model
+ elif col_name=='motion_model_used':
+ new_col_empty = 'Fixed'
else:
new_col_empty = np.nan
@@ -1866,372 +1985,9 @@ def add_rows_for_new_stars(ref_table, star_list, idx_lis):
return ref_table, idx_lis_new, idx_ref_new
-
-def run_align_iter(catalog, trans_order=1, poly_deg=1, ref_mag_lim=19, ref_radius_lim=300):
- # Load up data with matched stars.
- d = Table.read(catalog)
-
- # Determine how many epochs there are.
- N_epochs = len([n for n, c in enumerate(d.colnames) if c.startswith('name')])
-
- # Determine how many stars there are.
- N_stars = len(d)
-
- # Determine the reference epoch
- ref = d.meta['L_REF']
-
- # Figure out the number of free parameters for the specified
- # poly2d order.
- poly2d = models.Polynomial2D(trans_order)
- N_par_trans_per_epoch = 2.0 * poly2d.get_num_coeff(2) # one poly2d for each dimension (X, Y)
- N_par_trans = N_par_trans_per_epoch * N_epochs
-
- ##########
- # First iteration -- align everything to REF epoch with zero velocities.
- ##########
- print('ALIGN_EPOCHS: run_align_iter() -- PASS 1')
- ee_ref = d.meta['L_REF']
-
- target_name = 'OB120169'
-
- trans1, used1 = calc_transform_ref_epoch(d, target_name, ee_ref, ref_mag_lim, ref_radius_lim)
-
- ##########
- # Derive the velocity of each stars using the round 1 transforms.
- ##########
- calc_polyfit_all_stars(d, poly_deg, init_fig_idx=0)
-
- calc_mag_avg_all_stars(d)
-
- tdx = np.where((d['name_0'] == 'OB120169') | (d['name_0'] == 'OB120169_L'))[0]
- print(d[tdx]['name_0', 't0', 'mag', 'x0', 'vx', 'x0e', 'vxe', 'chi2x', 'y0', 'vy', 'y0e', 'vye', 'chi2y', 'dof'])
-
- ##########
- # Second iteration -- align everything to reference positions derived from iteration 1
- ##########
- print('ALIGN_EPOCHS: run_align_iter() -- PASS 2')
- target_name = 'OB120169'
-
- trans2, used2 = calc_transform_ref_poly(d, target_name, poly_deg, ref_mag_lim, ref_radius_lim)
-
- ##########
- # Derive the velocity of each stars using the round 1 transforms.
- ##########
- calc_polyfit_all_stars(d, poly_deg, init_fig_idx=4)
-
- ##########
- # Save output
- ##########
- d.write(catalog.replace('.fits', '_aln.fits'), overwrite=True)
-
- return
-
-def calc_transform_ref_epoch(d, target_name, ee_ref, ref_mag_lim, ref_radius_lim):
- # Determine how many epochs there are.
- N_epochs = len([n for n, c in enumerate(d.colnames) if c.startswith('name')])
-
- # output array
- trans = []
- used = []
-
- # Find the target
- tdx = np.where(d['name_0'] == 'OB120169')[0][0]
-
- # Reference values
- t_ref = d['t_{0:d}'.format(ee_ref)]
- m_ref = d['m_{0:d}'.format(ee_ref)]
- x_ref = d['x_{0:d}'.format(ee_ref)]
- y_ref = d['y_{0:d}'.format(ee_ref)]
- xe_ref = d['xe_{0:d}'.format(ee_ref)]
- ye_ref = d['ye_{0:d}'.format(ee_ref)]
-
- # Calculate some quanitites we use for selecting reference stars.
- r_ref = np.hypot(x_ref - x_ref[tdx], y_ref - y_ref[tdx])
-
- # Loop through and align each epoch to the reference epoch.
- for ee in range(N_epochs):
- # Pull out the X, Y positions (and errors) for the two
- # starlists we are going to align.
- x_epo = d['x_{0:d}'.format(ee)]
- y_epo = d['y_{0:d}'.format(ee)]
- t_epo = d['t_{0:d}'.format(ee)]
- xe_epo = d['xe_{0:d}'.format(ee)]
- ye_epo = d['ye_{0:d}'.format(ee)]
-
- # Figure out the set of stars detected in both epochs.
- idx = np.where((t_ref != 0) & (t_epo != 0) & (xe_ref != 0) & (xe_epo != 0))[0]
-
- # Find those in both epochs AND reference stars. This is [idx][rdx]
- rdx = np.where((r_ref[idx] < ref_radius_lim) & (m_ref[idx] < ref_mag_lim))[0]
-
- # Average the positional errors together to get one weight per star.
- xye_ref = (xe_ref + ye_ref) / 2.0
- xye_epo = (xe_epo + ye_epo) / 2.0
- xye_wgt = (xye_ref**2 + xye_epo**2)**0.5
-
- # Calculate transform based on the matched stars
- trans_tmp = transforms.PolyTransform(x_epo[idx][rdx], y_epo[idx][rdx], x_ref[idx][rdx], y_ref[idx][rdx],
- weights=xye_wgt[idx][rdx], order=2)
-
- trans.append(trans_tmp)
-
-
- # Apply thte transformation to the stars positions and errors:
- xt_epo = np.zeros(len(d), dtype=float)
- yt_epo = np.zeros(len(d), dtype=float)
- xet_epo = np.zeros(len(d), dtype=float)
- yet_epo = np.zeros(len(d), dtype=float)
-
- xt_epo[idx], xet_epo[idx], yt_epo[idx], yet_epo[idx] = trans_tmp.evaluate_errors(x_epo[idx], xe_epo[idx],
- y_epo[idx], ye_epo[idx],
- nsim=100)
-
- d['xt_{0:d}'.format(ee)] = xt_epo
- d['yt_{0:d}'.format(ee)] = yt_epo
- d['xet_{0:d}'.format(ee)] = xet_epo
- d['yet_{0:d}'.format(ee)] = yet_epo
-
- # Record which stars we used in the transform.
- used_tmp = np.zeros(len(d), dtype=bool)
- used_tmp[idx[rdx]] = True
-
- used.append(used_tmp)
-
- if True:
- plot_quiver_residuals(xt_epo, yt_epo, x_ref, y_ref, idx, rdx, 'Epoch: ' + str(ee))
-
- used = np.array(used)
-
- return trans, used
-
-
-def calc_transform_ref_poly(d, target_name, poly_deg, ref_mag_lim, ref_radius_lim):
- # Determine how many epochs there are.
- N_epochs = len([n for n, c in enumerate(d.colnames) if c.startswith('name')])
-
- # output array
- trans = []
- used = []
-
- # Find the target
- tdx = np.where(d['name_0'] == 'OB120169')[0][0]
-
- # Temporary Reference values
- t_ref = d['t0']
- m_ref = d['mag']
- x_ref = d['x0']
- y_ref = d['y0']
- xe_ref = d['x0e']
- ye_ref = d['y0e']
-
- # Calculate some quanitites we use for selecting reference stars.
- r_ref = np.hypot(x_ref - x_ref[tdx], y_ref - y_ref[tdx])
-
- for ee in range(N_epochs):
- # Pull out the X, Y positions (and errors) for the two
- # starlists we are going to align.
- x_epo = d['x_{0:d}'.format(ee)]
- y_epo = d['y_{0:d}'.format(ee)]
- t_epo = d['t_{0:d}'.format(ee)]
- xe_epo = d['xe_{0:d}'.format(ee)]
- ye_epo = d['ye_{0:d}'.format(ee)]
-
- # Shift the reference position by the polyfit for each star.
- dt = t_epo - t_ref
- if poly_deg >= 0:
- x_ref_ee = x_ref
- y_ref_ee = y_ref
- xe_ref_ee = x_ref
- ye_ref_ee = y_ref
-
- if poly_deg >= 1:
- x_ref_ee += d['vx'] * dt
- y_ref_ee += d['vy'] * dt
- xe_ref_ee = np.hypot(xe_ref_ee, d['vxe'] * dt)
- ye_ref_ee = np.hypot(ye_ref_ee, d['vye'] * dt)
-
- if poly_deg >= 2:
- x_ref_ee += d['ax'] * dt
- y_ref_ee += d['ay'] * dt
- xe_ref_ee = np.hypot(xe_ref_ee, d['axe'] * dt)
- ye_ref_ee = np.hypot(ye_ref_ee, d['aye'] * dt)
-
- # Figure out the set of stars detected in both.
- idx = np.where((t_ref != 0) & (t_epo != 0) & (xe_ref != 0) & (xe_epo != 0))[0]
-
- # Find those in both AND reference stars. This is [idx][rdx]
- rdx = np.where((r_ref[idx] < ref_radius_lim) & (m_ref[idx] < ref_mag_lim))[0]
-
- # Average the positional errors together to get one weight per star.
- xye_ref = (xe_ref_ee + ye_ref_ee) / 2.0
- xye_epo = (xe_epo + ye_epo) / 2.0
- xye_wgt = (xye_ref**2 + xye_epo**2)**0.5
-
- # Calculate transform based on the matched stars
- trans_tmp = transforms.PolyTransform(x_epo[idx][rdx], y_epo[idx][rdx], x_ref_ee[idx][rdx], y_ref_ee[idx][rdx],
- weights=xye_wgt[idx][rdx], order=2)
- trans.append(trans_tmp)
-
- # Apply thte transformation to the stars positions and errors:
- xt_epo = np.zeros(len(d), dtype=float)
- yt_epo = np.zeros(len(d), dtype=float)
- xet_epo = np.zeros(len(d), dtype=float)
- yet_epo = np.zeros(len(d), dtype=float)
-
- xt_epo[idx], xet_epo[idx], yt_epo[idx], yet_epo[idx] = trans_tmp.evaluate_errors(x_epo[idx], xe_epo[idx],
- y_epo[idx], ye_epo[idx],
- nsim=100)
- d['xt_{0:d}'.format(ee)] = xt_epo
- d['yt_{0:d}'.format(ee)] = yt_epo
- d['xet_{0:d}'.format(ee)] = xet_epo
- d['yet_{0:d}'.format(ee)] = yet_epo
-
- # Record which stars we used in the transform.
- used_tmp = np.zeros(len(d), dtype=bool)
- used_tmp[idx[rdx]] = True
-
- used.append(used_tmp)
-
- if True:
- plot_quiver_residuals(xt_epo, yt_epo, x_ref_ee, y_ref_ee, idx, rdx, 'Epoch: ' + str(ee))
-
- used = np.array(used)
-
- return trans, used
-
-def calc_polyfit_all_stars(d, poly_deg, init_fig_idx=0):
- # Determine how many stars there are.
- N_stars = len(d)
-
- # Determine how many epochs there are.
- N_epochs = len([n for n, c in enumerate(d.colnames) if c.startswith('name')])
-
- # Setup some variables to save the results
- t0_all = []
- px_all = []
- py_all = []
- pxe_all = []
- pye_all = []
- chi2x_all = []
- chi2y_all = []
- dof_all = []
-
- # Get the time array, which is the same for all stars.
- # Also, sort the time indices.
- t = np.array([d['t_{0:d}'.format(ee)][0] for ee in range(N_epochs)])
- tdx = t.argsort()
- t_sorted = t[tdx]
-
- # Run polyfit on each star.
- for ss in range(N_stars):
- # Get the x, y, xe, ye, and t arrays for this star.
- xt = np.array([d['xt_{0:d}'.format(ee)][ss] for ee in range(N_epochs)])
- yt = np.array([d['yt_{0:d}'.format(ee)][ss] for ee in range(N_epochs)])
- xet = np.array([d['xet_{0:d}'.format(ee)][ss] for ee in range(N_epochs)])
- yet = np.array([d['yet_{0:d}'.format(ee)][ss] for ee in range(N_epochs)])
- t_tmp = np.array([d['t_{0:d}'.format(ee)][ss] for ee in range(N_epochs)])
-
- # Sort these arrays.
- xt_sorted = xt[tdx]
- yt_sorted = yt[tdx]
- xet_sorted = xet[tdx]
- yet_sorted = yet[tdx]
- t_tmp_sorted = t_tmp[tdx]
-
- # Get only the detected epochs.
- edx = np.where(t_tmp_sorted != 0)[0]
-
- # Calculate the weighted t0 (using the transformed errors).
- weight_for_t0 = 1.0 / np.hypot(xet_sorted, yet_sorted)
- t0 = np.average(t_sorted[edx], weights=weight_for_t0[edx])
-
- # for ee in edx:
- # print('{0:8.3f} {1:10.5f} {2:10.5f} {3:8.5f} {4:8.5f}'.format(t[ee], xt[ee], yt[ee], xet[ee], yet[ee]))
- # pdb.set_trace()
-
- # Run polyfit
- dt = t_sorted - t0
- px, covx = np.polyfit(dt[edx], xt_sorted[edx], poly_deg, w=1./xet_sorted[edx], cov=True)
- py, covy = np.polyfit(dt[edx], yt_sorted[edx], poly_deg, w=1./yet_sorted[edx], cov=True)
-
- pxe = np.sqrt(np.diag(covx))
- pye = np.sqrt(np.diag(covy))
-
-
- x_mod = np.polyval(px, dt[edx])
- y_mod = np.polyval(py, dt[edx])
- chi2x = np.sum( ((x_mod - xt_sorted[edx]) / xet_sorted[edx])**2 )
- chi2y = np.sum( ((y_mod - yt_sorted[edx]) / yet_sorted[edx])**2 )
- dof = len(edx) - (poly_deg + 1)
-
- # Save results:
- t0_all.append(t0)
- px_all.append(px)
- py_all.append(py)
- pxe_all.append(pxe)
- pye_all.append(pye)
- chi2x_all.append(chi2x)
- chi2y_all.append(chi2y)
- dof_all.append(dof)
-
- if d[ss]['name_0'] in ['OB120169', 'OB120169_L']:
- gs = GridSpec(3, 2) # 3 rows, 1 column
- fig = plt.figure(ss + 1 + init_fig_idx, figsize=(12, 8))
- a0 = fig.add_subplot(gs[0:2, 0])
- a1 = fig.add_subplot(gs[2, 0])
- a2 = fig.add_subplot(gs[0:2, 1])
- a3 = fig.add_subplot(gs[2, 1])
-
- a0.errorbar(t_sorted[edx], xt_sorted[edx], yerr=xet_sorted[edx], fmt='ro')
- a0.plot(t_sorted[edx], x_mod, 'k-')
- a0.set_title(d[ss]['name_0'] + ' X')
- a1.errorbar(t_sorted[edx], xt_sorted[edx] - x_mod, yerr=xet_sorted[edx], fmt='ro')
- a1.axhline(0, linestyle='--')
- a1.set_xlabel('Time (yrs)')
- a2.errorbar(t_sorted[edx], yt_sorted[edx], yerr=yet_sorted[edx], fmt='ro')
- a2.plot(t_sorted[edx], y_mod, 'k-')
- a2.set_title(d[ss]['name_0'] + ' Y')
- a3.errorbar(t_sorted[edx], yt_sorted[edx] - y_mod, yerr=yet_sorted[edx], fmt='ro')
- a3.axhline(0, linestyle='--')
- a3.set_xlabel('Time (yrs)')
-
-
-
- t0_all = np.array(t0_all)
- px_all = np.array(px_all)
- py_all = np.array(py_all)
- pxe_all = np.array(pxe_all)
- pye_all = np.array(pye_all)
- chi2x_all = np.array(chi2x_all)
- chi2y_all = np.array(chi2y_all)
- dof_all = np.array(dof_all)
-
- # Done with all the stars... recast as numpy arrays and save to output table.
- d['t0'] = t0_all
- d['chi2x'] = chi2x_all
- d['chi2y'] = chi2y_all
- d['dof'] = dof_all
- if poly_deg >= 0:
- d['x0'] = px_all[:, -1]
- d['y0'] = py_all[:, -1]
- d['x0e'] = pxe_all[:, -1]
- d['y0e'] = pye_all[:, -1]
-
- if poly_deg >= 1:
- d['vx'] = px_all[:, -2]
- d['vy'] = py_all[:, -2]
- d['vxe'] = pxe_all[:, -2]
- d['vye'] = pye_all[:, -2]
-
- if poly_deg >= 2:
- d['ax'] = px_all[:, -3]
- d['ay'] = py_all[:, -3]
- d['axe'] = pxe_all[:, -3]
- d['aye'] = pye_all[:, -3]
-
- pdb.set_trace()
-
- return
+"""
+Functions specific to OB120169 moved to align_old_functions,py
+"""
def calc_mag_avg_all_stars(d):
# Determine how many stars there are.
@@ -2383,7 +2139,6 @@ def transform_and_match(table1, table2, transform, dr_tol=1.0, dm_tol=None, verb
y2 = table2['y']
m2 = table2['m']
-
# Transform x, y coordinates from starlist 1 into starlist 2
x1t, y1t = transform.evaluate(x1, y1)
@@ -2464,7 +2219,7 @@ def find_transform(table1, table1_trans, table2, transModel=transforms.PolyTrans
# calculate weights from *transformed* coords. This is where we use the
# transformation object
- if (table1_trans != None) and ('xe' in table1_trans.colnames):
+ if (table1_trans is not None) and ('xe' in table1_trans.colnames):
x1e = table1_trans['xe']
y1e = table1_trans['ye']
@@ -2707,6 +2462,8 @@ def write_transform(transform, starlist, reference, N_trans, deltaMag=0, restric
return
+# Transform_from_file original version moved to align_old_functions.py
+# This version makes the transFile an object and uses transform_from_object
def transform_from_file(starlist, transFile):
"""
Apply transformation from transFile to starlist. Returns astropy table with
@@ -2714,8 +2471,7 @@ def transform_from_file(starlist, transFile):
positions/position errors, plus velocities and velocity errors if they
are present in starlist.
- WARNING: THIS CODE WILL NOT WORK FOR LEGENDRE POLYNOMIAL
- TRANSFORMS
+ WARNING: THIS CODE WORKS FOR POLYTRANSFORM
Parameters:
----------
@@ -2731,153 +2487,31 @@ def transform_from_file(starlist, transFile):
------
Copy of starlist astropy table with transformed coordinates.
"""
- # Make a copy of starlist. This is what we will eventually modify with
- # the transformed coordinates
- starlist_f = copy.deepcopy(starlist)
-
- # Check to see if velocities are present in starlist. If so, we will
- # need to transform these as well as positions
- vel = False
- keys = list(starlist.keys())
- if 'vx' in keys:
- vel = True
-
- # Extract needed information from starlist
- x_orig = starlist['x']
- y_orig = starlist['y']
- xe_orig = starlist['xe']
- ye_orig = starlist['ye']
-
- if vel:
- x0_orig = starlist['x0']
- y0_orig = starlist['y0']
- x0e_orig = starlist['x0e']
- y0e_orig = starlist['y0e']
-
- vx_orig = starlist['vx']
- vy_orig = starlist['vy']
- vxe_orig = starlist['vxe']
- vye_orig = starlist['vye']
-
- # Read transFile
- trans = Table.read(transFile, format='ascii.commented_header', header_start=-1)
- Xcoeff = trans['Xcoeff']
- Ycoeff = trans['Ycoeff']
-
- #-----------------------------------------------#
- # General equation for applying the transform
- #-----------------------------------------------#
- #"""
+ # Make transform object
+ trans_table = Table.read(transFile, format='ascii.commented_header', header_start=-1)
+ Xcoeff = trans_table['Xcoeff']
+ Ycoeff = trans_table['Ycoeff']
# First determine the order based on the number of terms
# Comes from Nterms = (N+1)*(N+2) / 2.
order = (np.sqrt(1 + 8*len(Xcoeff)) - 3) / 2.
-
if order%1 != 0:
print( 'Incorrect number of coefficients for polynomial')
print( 'Stopping')
return
order = int(order)
-
- # Position transformation
- x_new, y_new = transform_pos_from_file(Xcoeff, Ycoeff, order, x_orig,
- y_orig)
+ # Do transform
+ transform = transforms.PolyTransform(order, Xcoeff, Ycoeff)
+ return transform_from_object(starlist, transform)
- if vel:
- x0_new, y0_new = transform_pos_from_file(Xcoeff, Ycoeff, order, x0_orig,
- y0_orig)
-
- # Position error transformation
- xe_new, ye_new = transform_poserr_from_file(Xcoeff, Ycoeff, order, xe_orig,
- ye_orig, x_orig, y_orig)
-
- if vel:
- x0e_new, y0e_new = transform_poserr_from_file(Xcoeff, Ycoeff, order, x0e_orig,
- y0e_orig, x0_orig, y0_orig)
-
- if vel:
- # Velocity transformation
- vx_new, vy_new = transform_vel_from_file(Xcoeff, Ycoeff, order, vx_orig,
- vy_orig, x_orig, y_orig)
-
- # Velocity error transformation
- vxe_new, vye_new = transform_velerr_from_file(Xcoeff, Ycoeff, order,
- vxe_orig, vye_orig,
- vx_orig, vy_orig,
- xe_orig, ye_orig,
- x_orig, y_orig)
-
- #----------------------------------------#
- # Hard coded example: old but functional
- #----------------------------------------#
- """
- # How the transformation is applied depends on the type of transform.
- # This can be determined by the length of Xcoeff, Ycoeff
- if len(Xcoeff) == 3:
- x_new = Xcoeff[0] + Xcoeff[1] * x_orig + Xcoeff[2] * y_orig
- y_new = Ycoeff[0] + Ycoeff[1] * x_orig + Ycoeff[2] * y_orig
- xe_new = np.sqrt( (Xcoeff[1] * xe_orig)**2 + (Xcoeff[2] * ye_orig)**2 )
- ye_new = np.sqrt( (Ycoeff[1] * xe_orig)**2 + (Ycoeff[2] * ye_orig)**2 )
-
- if vel:
- vx_new = Xcoeff[1] * vx_orig + Xcoeff[2] * vy_orig
- vy_new = Ycoeff[1] * vx_orig + Ycoeff[2] * vy_orig
- vxe_new = np.sqrt( (Xcoeff[1] * vxe_orig)**2 + (Xcoeff[2] * vye_orig)**2 )
- vye_new = np.sqrt( (Ycoeff[1] * vxe_orig)**2 + (Ycoeff[2] * vye_orig)**2 )
-
- elif len(Xcoeff) == 6:
- x_new = Xcoeff[0] + Xcoeff[1]*x_orig + Xcoeff[3]*x_orig**2 + Xcoeff[2]*y_orig + \
- Xcoeff[5]*y_orig**2. + Xcoeff[4]*x_orig*y_orig
-
- y_new = Ycoeff[0] + Ycoeff[1]*x_orig + Ycoeff[3]*x_orig**2 + Ycoeff[2]*y_orig + \
- Ycoeff[5]*y_orig**2. + Ycoeff[4]*x_orig*y_orig
-
- xe_new = np.sqrt( (Xcoeff[1] + 2*Xcoeff[3]*x_orig + Xcoeff[4]*y_orig)**2 * xe_orig**2 + \
- (Xcoeff[2] + 2*Xcoeff[5]*y_orig + Xcoeff[4]*x_orig)**2 * ye_orig**2 )
-
- ye_new = np.sqrt( (Ycoeff[1] + 2*Ycoeff[3]*x_orig + Ycoeff[4]*y_orig)**2 * xe_orig**2 + \
- (Ycoeff[2] + 2*Ycoeff[5]*y_orig + Ycoeff[4]*x_orig)**2 * ye_orig**2 )
-
- if vel:
- vx_new = Xcoeff[1]*vx_orig + 2*Xcoeff[3]*x_orig*vx_orig + Xcoeff[2]*vy_orig + \
- 2.*Xcoeff[5]*y_orig*vy_orig + Xcoeff[4]*(x_orig*vy_orig + vx_orig*y_orig)
-
- vy_new = Ycoeff[1]*vx_orig + 2*Ycoeff[3]*x_orig*vx_orig + Ycoeff[2]*vy_orig + \
- 2.*Ycoeff[5]*y_orig*vy_orig + Ycoeff[4]*(x_orig*vy_orig + vx_orig*y_orig)
-
- vxe_new = np.sqrt( (Xcoeff[1] + 2*Xcoeff[3]*x_orig + Xcoeff[4]*y_orig)**2 * vxe_orig**2 + \
- (Xcoeff[2] + 2*Xcoeff[5]*y_orig + Xcoeff[4]*x_orig)**2 * vye_orig**2 + \
- (2*Xcoeff[3]*vx_orig + Xcoeff[4]*vy_orig)**2 * xe_orig**2 + \
- (2*Xcoeff[5]*vy_orig + Xcoeff[4]*vx_orig)**2 * ye_orig**2 )
-
- vye_new = np.sqrt( (Ycoeff[1] + 2*Ycoeff[3]*x_orig + Ycoeff[4]*y_orig)**2 * vxe_orig**2 + \
- (Ycoeff[2] + 2*Ycoeff[5]*y_orig + Ycoeff[4]*x_orig)**2 * vye_orig**2 + \
- (2*Ycoeff[3]*vx_orig + Ycoeff[4]*vy_orig)**2 * xe_orig**2 + \
- (2*Ycoeff[5]*vy_orig + Ycoeff[4]*vx_orig)**2 * ye_orig**2 )
- """
- #Update transformed coords to copy of astropy table
- starlist_f['x'] = x_new
- starlist_f['y'] = y_new
- starlist_f['xe'] = xe_new
- starlist_f['ye'] = ye_new
- if vel:
- starlist_f['x0'] = x0_new
- starlist_f['y0'] = y0_new
- starlist_f['x0e'] = x0e_new
- starlist_f['y0e'] = y0e_new
- starlist_f['vx'] = vx_new
- starlist_f['vy'] = vy_new
- starlist_f['vxe'] = vxe_new
- starlist_f['vye'] = vye_new
-
- return starlist_f
-
def transform_from_object(starlist, transform):
"""
Apply transformation to starlist. Returns astropy table with
transformed positions/position errors, velocities and velocity errors
- if they are present in starlits
+ if they are present in starlits. If a more complex motion_model is
+ implemented, the motion parameters are set to nan, as we need the full time
+ series to refit.
Parameters:
----------
@@ -2899,9 +2533,22 @@ def transform_from_object(starlist, transform):
starlist_f = copy.deepcopy(starlist)
keys = list(starlist.keys())
- # Check to see if velocities are present in starlist. If so, we will
- # need to transform these as well as positions
- vel = 'vx' in keys
+ # Check to see if velocities or motion_model are present in starlist.
+ vel = ('vx' in keys)and ~("motion_model_input" in keys)
+ mot = ("motion_model_input" in keys)
+ # If the only motion models used are Fixed and Linear, we can still transform velocities.
+ if mot:
+ motion_models_unique = list(np.unique(starlist_f['motion_model_input']))
+ if 'Linear' in motion_models_unique:
+ motion_models_unique.remove('Linear')
+ if 'Fixed' in motion_models_unique:
+ motion_models_unique.remove('Fixed')
+ if len(motion_models_unique)==0:
+ vel=True
+ mot=False
+
+ # Prior code before motion_model implementation
+ # Can still be used as shortcut for Linear+Fixed motion_model only
err = 'xe' in keys
# Extract needed information from starlist
@@ -2911,27 +2558,25 @@ def transform_from_object(starlist, transform):
if err:
xe = starlist_f['xe']
ye = starlist_f['ye']
+ else:
+ xe = np.zeros(len(starlist_f))
+ ye = np.zeros(len(starlist_f))
if vel:
x0 = starlist_f['x0']
y0 = starlist_f['y0']
- x0e = starlist_f['x0e']
- y0e = starlist_f['y0e']
+ x0e = starlist_f['x0_err']
+ y0e = starlist_f['y0_err']
vx = starlist_f['vx']
vy = starlist_f['vy']
- vxe = starlist_f['vxe']
- vye = starlist_f['vye']
-
+ vxe = starlist_f['vx_err']
+ vye = starlist_f['vy_err']
+
# calculate the transformed position and velocity
-
- # (x_new, y_new, xe_new, ye_new) in (x,y)
x_new, y_new, xe_new, ye_new = position_transform_from_object(x, y, xe, ye, transform)
-
if vel:
- # (x0_new, y0_new, x0e_new, y0e_new) in (x0, y0, x0e, y0e)
x0_new, y0_new, x0e_new, y0e_new = position_transform_from_object(x0, y0, x0e, y0e, transform)
- # (vx_new, vy_new, vxe_new, vye_new) in (x0, y0, x0e, y0e, vx, vy, vxe, vye)
vx_new, vy_new, vxe_new, vye_new = velocity_transform_from_object(x0, y0, x0e, y0e, vx, vy, vxe, vye, transform)
# update transformed coords to copy of astropy table
@@ -2943,19 +2588,24 @@ def transform_from_object(starlist, transform):
if vel:
starlist_f['x0'] = x0_new
starlist_f['y0'] = y0_new
- starlist_f['x0e'] = x0e_new
- starlist_f['y0e'] = y0e_new
+ starlist_f['x0_err'] = x0e_new
+ starlist_f['y0_err'] = y0e_new
starlist_f['vx'] = vx_new
starlist_f['vy'] = vy_new
- starlist_f['vxe'] = vxe_new
- starlist_f['vye'] = vye_new
+ starlist_f['vx_err'] = vxe_new
+ starlist_f['vy_err'] = vye_new
+
+ # For more complicated motion_models,
+ # we can't easily transform them, set the values to nans and refit later.
+ if mot:
+ motion_model_params = motion_model.get_all_motion_model_param_names()
+ for param in motion_model_params:
+ if param in keys:
+ starlist_f[param] = np.nan
return starlist_f
-
-
-
def position_transform_from_object(x, y, xe, ye, transform):
"""
given the orginal position and position error, calculate the transformed
@@ -3007,10 +2657,9 @@ def position_transform_from_object(x, y, xe, ye, transform):
for j in range(1, N+2-i):
sub = int(2*N + 2 + j + (2*N+2-i) * (i-1)/2.)
y_new += Ycoeff[sub] * (x**i) * (y**j)
-
"""
- THIS IS WRONG BELOW!
+ THIS IS WRONG BELOW! - NOTE: I don't think this is wrong any more
Currently doing:
((A + B + C) * xe)**2
@@ -3155,7 +2804,6 @@ def velocity_transform_from_object(x0, y0, x0e, y0e, vx, vy, vxe, vye, transform
vxe_new = np.sqrt((temp1*x0e)**2 + (temp2*y0e)**2 + (temp3*vxe)**2 + (temp4*vye)**2)
-
vye_new = 0
temp1 = 0
temp2 = 0
@@ -3202,291 +2850,6 @@ def velocity_transform_from_object(x0, y0, x0e, y0e, vx, vy, vxe, vye, transform
return vx_new, vy_new, vxe_new, vye_new
-def transform_pos_from_file(Xcoeff, Ycoeff, order, x_orig, y_orig):
- """
- Given the read-in coefficients from transform_from_file, apply the
- transformation to the observed positions. This is generalized to
- work with any order polynomial transform.
-
- WARNING: THIS CODE WILL NOT WORK FOR LEGENDRE POLYNOMIAL
- TRANSFORMS
-
- Parameters:
- ----------
- Xcoeff: Array
- Array with the coefficients of the X pos transformation
-
- Ycoeff: Array
- Array with the coefficients of the Y pos transformation
-
- order: int
- Order of transformation
-
- x_orig: array
- Array with the original X positions
-
- y_orig: array
- Array with the original Y positions
-
- Output:
- ------
- x_new: array
- Transformed X positions
-
- y_new: array
- Transformed Y positions
-
- """
- idx = 0 # coeff index
- x_new = 0.0
- y_new = 0.0
- for i in range(order+1):
- for j in range(i+1):
- x_new += Xcoeff[idx] * x_orig**(i-j) * y_orig**j
- y_new += Ycoeff[idx] * x_orig**(i-j) * y_orig**j
-
- idx += 1
-
- return x_new, y_new
-
-def transform_poserr_from_file(Xcoeff, Ycoeff, order, xe_orig, ye_orig, x_orig, y_orig):
- """
- Given the read-in coefficients from transform_from_file, apply the
- transformation to the observed position errors. This is generalized to
- work with any order transform.
-
- WARNING: THIS CODE WILL NOT WORK FOR LEGENDRE POLYNOMIAL
- TRANSFORMS
-
- Parameters:
- ----------
- Xcoeff: Array
- Array with the coefficients of the X pos transformation
-
- Ycoeff: Array
- Array with the coefficients of the Y pos transformation
-
- order: int
- Order of transformation
-
- xe_orig: array
- Array with the original X position errs
-
- ye_orig: array
- Array with the original Y position errs
-
- x_orig: array
- Array with the original X positions
-
- y_orig: array
- Array with the original Y positions
-
- Output:
- ------
- xe_new: array
- Transformed X position errs
-
- ye_new: array
- Transformed Y position errs
- """
- idx = 0 # coeff index
- xe_new_tmp1 = 0.0
- ye_new_tmp1 = 0.0
- xe_new_tmp2 = 0.0
- ye_new_tmp2 = 0.0
-
- # First loop: dx'/dx
- for i in range(order+1):
- for j in range(i+1):
- xe_new_tmp1 += Xcoeff[idx] * (i - j) * x_orig**(i-j-1) * y_orig**j
- ye_new_tmp1 += Ycoeff[idx] * (i - j) * x_orig**(i-j-1) * y_orig**j
-
- idx += 1
-
- # Second loop: dy'/dy
- idx = 0 # coeff index
- for i in range(order+1):
- for j in range(i+1):
- xe_new_tmp2 += Xcoeff[idx] * (j) * x_orig**(i-j) * y_orig**(j-1)
- ye_new_tmp2 += Ycoeff[idx] * (j) * x_orig**(i-j) * y_orig**(j-1)
-
- idx += 1
- # Take square root for xe/ye_new
- xe_new = np.sqrt((xe_new_tmp1 * xe_orig)**2 + (xe_new_tmp2 * ye_orig)**2)
- ye_new = np.sqrt((ye_new_tmp1 * ye_orig)**2 + (ye_new_tmp2 * ye_orig)**2)
-
- return xe_new, ye_new
-
-def transform_vel_from_file(Xcoeff, Ycoeff, order, vx_orig, vy_orig, x_orig, y_orig):
- """
- Given the read-in coefficients from transform_from_file, apply the
- transformation to the observed proper motions. This is generalized to
- work with any order transform.
-
- WARNING: THIS CODE WILL NOT WORK FOR LEGENDRE POLYNOMIAL
- TRANSFORMS
-
- Parameters:
- ----------
- Xcoeff: Array
- Array with the coefficients of the X pos transformation
-
- Ycoeff: Array
- Array with the coefficients of the Y pos transformation
-
- order: int
- Order of transformation
-
- vx_orig: array
- Array with the original X proper motions
-
- vy_orig: array
- Array with the original Y proper motions
-
- x_orig: array
- Array with the original X positions
-
- y_orig: array
- Array with the original Y positions
-
- Output:
- ------
- vx_new: array
- Transformed X proper motions
-
- vy_new: array
- Transformed Y proper motions
- """
- idx = 0 # coeff index
- vx_new = 0.0
- vy_new = 0.0
- # First loop: dx'/dx
- for i in range(order+1):
- for j in range(i+1):
- vx_new += Xcoeff[idx] * (i - j) * x_orig**(i-j-1) * y_orig**j * vx_orig
- vy_new += Ycoeff[idx] * (i - j) * x_orig**(i-j-1) * y_orig**j * vx_orig
-
- idx += 1
- # Second loop: dy'/dy
- idx = 0 # coeff index
- for i in range(order+1):
- for j in range(i+1):
- vx_new += Xcoeff[idx] * (j) * x_orig**(i-j) * y_orig**(j-1) * vy_orig
- vy_new += Ycoeff[idx] * (j) * x_orig**(i-j) * y_orig**(j-1) * vy_orig
-
- idx += 1
-
- return vx_new, vy_new
-
-def transform_velerr_from_file(Xcoeff, Ycoeff, order, vxe_orig, vye_orig, vx_orig,
- vy_orig, xe_orig, ye_orig, x_orig, y_orig):
- """
- Given the read-in coefficients from transform_from_file, apply the
- transformation to the observed proper motion errors. This is generalized to
- work with any order transform.
-
- WARNING: THIS CODE WILL NOT WORK FOR LEGENDRE POLYNOMIAL
- TRANSFORMS
-
- Parameters:
- ----------
- Xcoeff: Array
- Array with the coefficients of the X pos transformation
-
- Ycoeff: Array
- Array with the coefficients of the Y pos transformation
-
- order: int
- Order of transformation
-
- vxe_orig: array
- Array with the original X proper motion errs
-
- vye_orig: array
- Array with the original Y proper motion errs
-
- vx_orig: array
- Array with the original X proper motions
-
- vy_orig: array
- Array with the original Y proper motions
-
- xe_orig: array
- Array with the original X position errs
-
- ye_orig: array
- Array with the original Y position errs
-
- x_orig: array
- Array with the original X positions
-
- y_orig: array
- Array with the original Y positions
-
- Output:
- ------
- vxe_new: array
- Transformed X proper motion errs
-
- vye_new: array
- Transformed Y proper motion errs
- """
- idx = 0
- vxe_new_tmp1 = 0.0
- vye_new_tmp1 = 0.0
- vxe_new_tmp2 = 0.0
- vye_new_tmp2 = 0.0
- vxe_new_tmp3 = 0.0
- vye_new_tmp3 = 0.0
- vxe_new_tmp4 = 0.0
- vye_new_tmp4 = 0.0
-
-
- # First loop: dvx' / dx
- for i in range(order+1):
- for j in range(i+1):
- vxe_new_tmp1 += Xcoeff[idx] * (i-j) * (i-j-1) * x_orig**(i-j-2) * y_orig**j * vx_orig
- vxe_new_tmp1 += Xcoeff[idx] * (j) * (i-j) * x_orig**(i-j-1) * y_orig**(j-1) * vy_orig
- vye_new_tmp1 += Ycoeff[idx] * (i-j) * (i-j-1) * x_orig**(i-j-2) * y_orig**j * vx_orig
- vye_new_tmp1 += Ycoeff[idx] * (j) * (i-j) * x_orig**(i-j-1) * y_orig**(j-1) * vy_orig
-
- idx += 1
-
- # Second loop: dvx' / dy
- idx = 0
- for i in range(order+1):
- for j in range(i+1):
- vxe_new_tmp2 += Xcoeff[idx] * (i-j) * (j) * x_orig**(i-j-1) * y_orig**(j-1) * vx_orig
- vxe_new_tmp2 += Xcoeff[idx] * (j) * (j-1) * x_orig**(i-j-1) * y_orig**(j-2) * vy_orig
- vye_new_tmp2 += Ycoeff[idx] * (i-j) * (j) * x_orig**(i-j-1) * y_orig**(j-1) * vx_orig
- vye_new_tmp2 += Ycoeff[idx] * (j) * (j-1) * x_orig**(i-j-1) * y_orig**(j-2) * vy_orig
-
- idx += 1
-
- # Third loop: dvx' / dvx
- idx = 0
- for i in range(order+1):
- for j in range(i+1):
- vxe_new_tmp3 += Xcoeff[idx] * (i-j) * x_orig**(i-j-1) * y_orig**j
- vye_new_tmp3 += Ycoeff[idx] * (i-j) * x_orig**(i-j-1) * y_orig**j
-
- idx += 1
-
- # Fourth loop: dvx' / dvy
- idx = 0
- for i in range(order+1):
- for j in range(i+1):
- vxe_new_tmp4 += Xcoeff[idx] * (j) * x_orig**(i-j) * y_orig**(j-1)
- vye_new_tmp4 += Ycoeff[idx] * (j) * x_orig**(i-j) * y_orig**(j-1)
-
- idx += 1
-
- vxe_new = np.sqrt((vxe_new_tmp1 * xe_orig)**2 + (vxe_new_tmp2 * ye_orig)**2 + \
- (vxe_new_tmp3 * vxe_orig)**2 + (vxe_new_tmp4 * vye_orig)**2)
- vye_new = np.sqrt((vye_new_tmp1 * xe_orig)**2 + (vye_new_tmp2 * ye_orig)**2 + \
- (vye_new_tmp3 * vxe_orig)**2 + (vye_new_tmp4 * vye_orig)**2)
-
- return vxe_new, vye_new
def check_iter_tolerances(iters, dr_tol, dm_tol, outlier_tol):
@@ -3516,7 +2879,7 @@ def check_trans_input(list_of_starlists, trans_input, mag_trans):
return
-def trans_initial_guess(ref_list, star_list, trans_args, mode='miracle',
+def trans_initial_guess(ref_list, star_list, trans_args, motion_model_dict, mode='miracle',
ignore_contains='star', verbose=True, n_req_match=3,
mag_trans=True, order=1):
"""
@@ -3554,7 +2917,7 @@ def trans_initial_guess(ref_list, star_list, trans_args, mode='miracle',
# If there are velocities in the reference list, use them.
# We assume velocities are in the same units as the positions.
- xref, yref = get_pos_at_time(star_list['t'][0], ref_list)
+ xref, yref = get_pos_at_time(star_list['t'][0], ref_list, motion_model_dict)
if 'm' in ref_list.colnames:
mref = ref_list['m']
else:
@@ -3637,13 +3000,13 @@ def copy_and_rename_for_ref(star_list):
if 'xe' in star_list.colnames:
old_cols += ['xe']
- new_cols += ['x0e']
+ new_cols += ['x0_err']
if 'ye' in star_list.colnames:
old_cols += ['ye']
- new_cols += ['y0e']
+ new_cols += ['y0_err']
if 'me' in star_list.colnames:
old_cols += ['me']
- new_cols += ['m0e']
+ new_cols += ['m0_err']
if 'w' in star_list.colnames:
old_cols += ['w']
new_cols += ['w']
@@ -3792,11 +3155,12 @@ def get_weighting_scheme(weights, ref_list, star_list):
return weight
-def get_pos_at_time(t, starlist, use_vel=True):
+# TODO: This is sometimes run on a startable, not a starlist, at least as currently used
+def get_pos_at_time(t, starlist, motion_model_dict):
"""
- Take a starlist, check to see if it has velocity columns.
+ Take a starlist, check to see if it has motion/velocity columns.
If it does, then propogate the positions forward in time
- to the desired epoch. If no velocities exist, then just
+ to the desired epoch. If no motion/velocities exist, then just
use ['x0', 'y0'] or ['x', 'y']
Inputs
@@ -3806,17 +3170,21 @@ def get_pos_at_time(t, starlist, use_vel=True):
but it should be in the same units
as the 't0' column in starlist.
"""
- if use_vel and ('vx' in starlist.colnames) and ('vy' in starlist.colnames):
- dt = t - starlist['t0']
- x = starlist['x0'] + (starlist['vx'] * dt)
- y = starlist['y0'] + (starlist['vy'] * dt)
+ # Check for motion model
+ if 'motion_model_used' in starlist.colnames:
+ x,y,xe,ye = starlist.get_star_positions_at_time(t, motion_model_dict, allow_alt_models=True)
+ # If no motion model, check for velocities
+ elif ('vx' in starlist.colnames) and ('vy' in starlist.colnames):
+ x = starlist['x0'] + starlist['vx']*(t-starlist['t0'])
+ y = starlist['y0'] + starlist['vy']*(t-starlist['t0'])
+ # If no velocities, try fitted positon
+ elif ('x0' in starlist.colnames) and ('y0' in starlist.colnames):
+ x = starlist['x0']
+ y = starlist['y0']
+ # Otherwise, use measured position
else:
- if ('x0' in starlist.colnames) and ('y0' in starlist.colnames):
- x = starlist['x0']
- y = starlist['y0']
- else:
- x = starlist['x']
- y = starlist['y']
+ x = starlist['x']
+ y = starlist['y']
return (x, y)
diff --git a/flystar/align_old_functions.py b/flystar/align_old_functions.py
new file mode 100755
index 0000000..9bae670
--- /dev/null
+++ b/flystar/align_old_functions.py
@@ -0,0 +1,828 @@
+"""
+Old functions that are only referenced in examples and template
+"""
+def transform_from_file(starlist, transFile):
+ """
+ Apply transformation from transFile to starlist. Returns astropy table with
+ added columns with the transformed coordinates. NOTE: Transforms
+ positions/position errors, plus velocities and velocity errors if they
+ are present in starlist.
+
+ WARNING: THIS CODE WILL NOT WORK FOR LEGENDRE POLYNOMIAL
+ TRANSFORMS
+
+ Parameters:
+ ----------
+ starlist: astropy table
+ Starlist we want to apply the transformation too. Must already
+ have standard column headers
+
+ transFile: ascii file
+ File with the transformation coefficients. Assumed to be output of
+ write_transform, with coefficients specified as code documents
+
+ Output:
+ ------
+ Copy of starlist astropy table with transformed coordinates.
+ """
+ # Make a copy of starlist. This is what we will eventually modify with
+ # the transformed coordinates
+ starlist_f = copy.deepcopy(starlist)
+
+ # Check to see if velocities are present in starlist. If so, we will
+ # need to transform these as well as positions
+ vel = False
+ keys = list(starlist.keys())
+ if 'vx' in keys:
+ vel = True
+
+ # Extract needed information from starlist
+ x_orig = starlist['x']
+ y_orig = starlist['y']
+ xe_orig = starlist['xe']
+ ye_orig = starlist['ye']
+
+ if vel:
+ x0_orig = starlist['x0']
+ y0_orig = starlist['y0']
+ x0e_orig = starlist['x0_err']
+ y0e_orig = starlist['y0_err']
+
+ vx_orig = starlist['vx']
+ vy_orig = starlist['vy']
+ vxe_orig = starlist['vx_err']
+ vye_orig = starlist['vy_err']
+
+ # Read transFile
+ trans = Table.read(transFile, format='ascii.commented_header', header_start=-1)
+ Xcoeff = trans['Xcoeff']
+ Ycoeff = trans['Ycoeff']
+
+ #-----------------------------------------------#
+ # General equation for applying the transform
+ #-----------------------------------------------#
+ #"""
+ # First determine the order based on the number of terms
+ # Comes from Nterms = (N+1)*(N+2) / 2.
+ order = (np.sqrt(1 + 8*len(Xcoeff)) - 3) / 2.
+
+ if order%1 != 0:
+ print( 'Incorrect number of coefficients for polynomial')
+ print( 'Stopping')
+ return
+ order = int(order)
+
+ # Position transformation
+ x_new, y_new = transform_pos_from_file(Xcoeff, Ycoeff, order, x_orig,
+ y_orig)
+
+ if vel:
+ x0_new, y0_new = transform_pos_from_file(Xcoeff, Ycoeff, order, x0_orig,
+ y0_orig)
+
+ # Position error transformation
+ xe_new, ye_new = transform_poserr_from_file(Xcoeff, Ycoeff, order, xe_orig,
+ ye_orig, x_orig, y_orig)
+
+ if vel:
+ x0e_new, y0e_new = transform_poserr_from_file(Xcoeff, Ycoeff, order, x0e_orig,
+ y0e_orig, x0_orig, y0_orig)
+
+ if vel:
+ # Velocity transformation
+ vx_new, vy_new = transform_vel_from_file(Xcoeff, Ycoeff, order, vx_orig,
+ vy_orig, x_orig, y_orig)
+
+ # Velocity error transformation
+ vxe_new, vye_new = transform_velerr_from_file(Xcoeff, Ycoeff, order,
+ vxe_orig, vye_orig,
+ vx_orig, vy_orig,
+ xe_orig, ye_orig,
+ x_orig, y_orig)
+
+ #----------------------------------------#
+ # Hard coded example: old but functional
+ #----------------------------------------#
+ """
+ # How the transformation is applied depends on the type of transform.
+ # This can be determined by the length of Xcoeff, Ycoeff
+ if len(Xcoeff) == 3:
+ x_new = Xcoeff[0] + Xcoeff[1] * x_orig + Xcoeff[2] * y_orig
+ y_new = Ycoeff[0] + Ycoeff[1] * x_orig + Ycoeff[2] * y_orig
+ xe_new = np.sqrt( (Xcoeff[1] * xe_orig)**2 + (Xcoeff[2] * ye_orig)**2 )
+ ye_new = np.sqrt( (Ycoeff[1] * xe_orig)**2 + (Ycoeff[2] * ye_orig)**2 )
+
+ if vel:
+ vx_new = Xcoeff[1] * vx_orig + Xcoeff[2] * vy_orig
+ vy_new = Ycoeff[1] * vx_orig + Ycoeff[2] * vy_orig
+ vxe_new = np.sqrt( (Xcoeff[1] * vxe_orig)**2 + (Xcoeff[2] * vye_orig)**2 )
+ vye_new = np.sqrt( (Ycoeff[1] * vxe_orig)**2 + (Ycoeff[2] * vye_orig)**2 )
+
+ elif len(Xcoeff) == 6:
+ x_new = Xcoeff[0] + Xcoeff[1]*x_orig + Xcoeff[3]*x_orig**2 + Xcoeff[2]*y_orig + \
+ Xcoeff[5]*y_orig**2. + Xcoeff[4]*x_orig*y_orig
+
+ y_new = Ycoeff[0] + Ycoeff[1]*x_orig + Ycoeff[3]*x_orig**2 + Ycoeff[2]*y_orig + \
+ Ycoeff[5]*y_orig**2. + Ycoeff[4]*x_orig*y_orig
+
+ xe_new = np.sqrt( (Xcoeff[1] + 2*Xcoeff[3]*x_orig + Xcoeff[4]*y_orig)**2 * xe_orig**2 + \
+ (Xcoeff[2] + 2*Xcoeff[5]*y_orig + Xcoeff[4]*x_orig)**2 * ye_orig**2 )
+
+ ye_new = np.sqrt( (Ycoeff[1] + 2*Ycoeff[3]*x_orig + Ycoeff[4]*y_orig)**2 * xe_orig**2 + \
+ (Ycoeff[2] + 2*Ycoeff[5]*y_orig + Ycoeff[4]*x_orig)**2 * ye_orig**2 )
+
+ if vel:
+ vx_new = Xcoeff[1]*vx_orig + 2*Xcoeff[3]*x_orig*vx_orig + Xcoeff[2]*vy_orig + \
+ 2.*Xcoeff[5]*y_orig*vy_orig + Xcoeff[4]*(x_orig*vy_orig + vx_orig*y_orig)
+
+ vy_new = Ycoeff[1]*vx_orig + 2*Ycoeff[3]*x_orig*vx_orig + Ycoeff[2]*vy_orig + \
+ 2.*Ycoeff[5]*y_orig*vy_orig + Ycoeff[4]*(x_orig*vy_orig + vx_orig*y_orig)
+
+ vxe_new = np.sqrt( (Xcoeff[1] + 2*Xcoeff[3]*x_orig + Xcoeff[4]*y_orig)**2 * vxe_orig**2 + \
+ (Xcoeff[2] + 2*Xcoeff[5]*y_orig + Xcoeff[4]*x_orig)**2 * vye_orig**2 + \
+ (2*Xcoeff[3]*vx_orig + Xcoeff[4]*vy_orig)**2 * xe_orig**2 + \
+ (2*Xcoeff[5]*vy_orig + Xcoeff[4]*vx_orig)**2 * ye_orig**2 )
+
+ vye_new = np.sqrt( (Ycoeff[1] + 2*Ycoeff[3]*x_orig + Ycoeff[4]*y_orig)**2 * vxe_orig**2 + \
+ (Ycoeff[2] + 2*Ycoeff[5]*y_orig + Ycoeff[4]*x_orig)**2 * vye_orig**2 + \
+ (2*Ycoeff[3]*vx_orig + Ycoeff[4]*vy_orig)**2 * xe_orig**2 + \
+ (2*Ycoeff[5]*vy_orig + Ycoeff[4]*vx_orig)**2 * ye_orig**2 )
+ """
+ #Update transformed coords to copy of astropy table
+ starlist_f['x'] = x_new
+ starlist_f['y'] = y_new
+ starlist_f['xe'] = xe_new
+ starlist_f['ye'] = ye_new
+
+ if vel:
+ starlist_f['x0'] = x0_new
+ starlist_f['y0'] = y0_new
+ starlist_f['x0_err'] = x0e_new
+ starlist_f['y0_err'] = y0e_new
+ starlist_f['vx'] = vx_new
+ starlist_f['vy'] = vy_new
+ starlist_f['vx_err'] = vxe_new
+ starlist_f['vy_err'] = vye_new
+
+ return starlist_f
+
+def transform_pos_from_file(Xcoeff, Ycoeff, order, x_orig, y_orig):
+ """
+ Given the read-in coefficients from transform_from_file, apply the
+ transformation to the observed positions. This is generalized to
+ work with any order polynomial transform.
+
+ WARNING: THIS CODE WILL NOT WORK FOR LEGENDRE POLYNOMIAL
+ TRANSFORMS
+
+ Parameters:
+ ----------
+ Xcoeff: Array
+ Array with the coefficients of the X pos transformation
+
+ Ycoeff: Array
+ Array with the coefficients of the Y pos transformation
+
+ order: int
+ Order of transformation
+
+ x_orig: array
+ Array with the original X positions
+
+ y_orig: array
+ Array with the original Y positions
+
+ Output:
+ ------
+ x_new: array
+ Transformed X positions
+
+ y_new: array
+ Transformed Y positions
+
+ """
+ idx = 0 # coeff index
+ x_new = 0.0
+ y_new = 0.0
+ for i in range(order+1):
+ for j in range(i+1):
+ x_new += Xcoeff[idx] * x_orig**(i-j) * y_orig**j
+ y_new += Ycoeff[idx] * x_orig**(i-j) * y_orig**j
+
+ idx += 1
+
+ return x_new, y_new
+
+def transform_poserr_from_file(Xcoeff, Ycoeff, order, xe_orig, ye_orig, x_orig, y_orig):
+ """
+ Given the read-in coefficients from transform_from_file, apply the
+ transformation to the observed position errors. This is generalized to
+ work with any order transform.
+
+ WARNING: THIS CODE WILL NOT WORK FOR LEGENDRE POLYNOMIAL
+ TRANSFORMS
+
+ Parameters:
+ ----------
+ Xcoeff: Array
+ Array with the coefficients of the X pos transformation
+
+ Ycoeff: Array
+ Array with the coefficients of the Y pos transformation
+
+ order: int
+ Order of transformation
+
+ xe_orig: array
+ Array with the original X position errs
+
+ ye_orig: array
+ Array with the original Y position errs
+
+ x_orig: array
+ Array with the original X positions
+
+ y_orig: array
+ Array with the original Y positions
+
+ Output:
+ ------
+ xe_new: array
+ Transformed X position errs
+
+ ye_new: array
+ Transformed Y position errs
+ """
+ idx = 0 # coeff index
+ xe_new_tmp1 = 0.0
+ ye_new_tmp1 = 0.0
+ xe_new_tmp2 = 0.0
+ ye_new_tmp2 = 0.0
+
+ # First loop: dx'/dx
+ for i in range(order+1):
+ for j in range(i+1):
+ xe_new_tmp1 += Xcoeff[idx] * (i - j) * x_orig**(i-j-1) * y_orig**j
+ ye_new_tmp1 += Ycoeff[idx] * (i - j) * x_orig**(i-j-1) * y_orig**j
+
+ idx += 1
+
+ # Second loop: dy'/dy
+ idx = 0 # coeff index
+ for i in range(order+1):
+ for j in range(i+1):
+ xe_new_tmp2 += Xcoeff[idx] * (j) * x_orig**(i-j) * y_orig**(j-1)
+ ye_new_tmp2 += Ycoeff[idx] * (j) * x_orig**(i-j) * y_orig**(j-1)
+
+ idx += 1
+ # Take square root for xe/ye_new
+ xe_new = np.sqrt((xe_new_tmp1 * xe_orig)**2 + (xe_new_tmp2 * ye_orig)**2)
+ ye_new = np.sqrt((ye_new_tmp1 * ye_orig)**2 + (ye_new_tmp2 * ye_orig)**2)
+
+ return xe_new, ye_new
+
+def transform_vel_from_file(Xcoeff, Ycoeff, order, vx_orig, vy_orig, x_orig, y_orig):
+ """
+ Given the read-in coefficients from transform_from_file, apply the
+ transformation to the observed proper motions. This is generalized to
+ work with any order transform.
+
+ WARNING: THIS CODE WILL NOT WORK FOR LEGENDRE POLYNOMIAL
+ TRANSFORMS
+
+ Parameters:
+ ----------
+ Xcoeff: Array
+ Array with the coefficients of the X pos transformation
+
+ Ycoeff: Array
+ Array with the coefficients of the Y pos transformation
+
+ order: int
+ Order of transformation
+
+ vx_orig: array
+ Array with the original X proper motions
+
+ vy_orig: array
+ Array with the original Y proper motions
+
+ x_orig: array
+ Array with the original X positions
+
+ y_orig: array
+ Array with the original Y positions
+
+ Output:
+ ------
+ vx_new: array
+ Transformed X proper motions
+
+ vy_new: array
+ Transformed Y proper motions
+ """
+ idx = 0 # coeff index
+ vx_new = 0.0
+ vy_new = 0.0
+ # First loop: dx'/dx
+ for i in range(order+1):
+ for j in range(i+1):
+ vx_new += Xcoeff[idx] * (i - j) * x_orig**(i-j-1) * y_orig**j * vx_orig
+ vy_new += Ycoeff[idx] * (i - j) * x_orig**(i-j-1) * y_orig**j * vx_orig
+
+ idx += 1
+ # Second loop: dy'/dy
+ idx = 0 # coeff index
+ for i in range(order+1):
+ for j in range(i+1):
+ vx_new += Xcoeff[idx] * (j) * x_orig**(i-j) * y_orig**(j-1) * vy_orig
+ vy_new += Ycoeff[idx] * (j) * x_orig**(i-j) * y_orig**(j-1) * vy_orig
+
+ idx += 1
+
+ return vx_new, vy_new
+
+def transform_velerr_from_file(Xcoeff, Ycoeff, order, vxe_orig, vye_orig, vx_orig,
+ vy_orig, xe_orig, ye_orig, x_orig, y_orig):
+ """
+ Given the read-in coefficients from transform_from_file, apply the
+ transformation to the observed proper motion errors. This is generalized to
+ work with any order transform.
+
+ WARNING: THIS CODE WILL NOT WORK FOR LEGENDRE POLYNOMIAL
+ TRANSFORMS
+
+ Parameters:
+ ----------
+ Xcoeff: Array
+ Array with the coefficients of the X pos transformation
+
+ Ycoeff: Array
+ Array with the coefficients of the Y pos transformation
+
+ order: int
+ Order of transformation
+
+ vxe_orig: array
+ Array with the original X proper motion errs
+
+ vye_orig: array
+ Array with the original Y proper motion errs
+
+ vx_orig: array
+ Array with the original X proper motions
+
+ vy_orig: array
+ Array with the original Y proper motions
+
+ xe_orig: array
+ Array with the original X position errs
+
+ ye_orig: array
+ Array with the original Y position errs
+
+ x_orig: array
+ Array with the original X positions
+
+ y_orig: array
+ Array with the original Y positions
+
+ Output:
+ ------
+ vxe_new: array
+ Transformed X proper motion errs
+
+ vye_new: array
+ Transformed Y proper motion errs
+ """
+ idx = 0
+ vxe_new_tmp1 = 0.0
+ vye_new_tmp1 = 0.0
+ vxe_new_tmp2 = 0.0
+ vye_new_tmp2 = 0.0
+ vxe_new_tmp3 = 0.0
+ vye_new_tmp3 = 0.0
+ vxe_new_tmp4 = 0.0
+ vye_new_tmp4 = 0.0
+
+
+ # First loop: dvx' / dx
+ for i in range(order+1):
+ for j in range(i+1):
+ vxe_new_tmp1 += Xcoeff[idx] * (i-j) * (i-j-1) * x_orig**(i-j-2) * y_orig**j * vx_orig
+ vxe_new_tmp1 += Xcoeff[idx] * (j) * (i-j) * x_orig**(i-j-1) * y_orig**(j-1) * vy_orig
+ vye_new_tmp1 += Ycoeff[idx] * (i-j) * (i-j-1) * x_orig**(i-j-2) * y_orig**j * vx_orig
+ vye_new_tmp1 += Ycoeff[idx] * (j) * (i-j) * x_orig**(i-j-1) * y_orig**(j-1) * vy_orig
+
+ idx += 1
+
+ # Second loop: dvx' / dy
+ idx = 0
+ for i in range(order+1):
+ for j in range(i+1):
+ vxe_new_tmp2 += Xcoeff[idx] * (i-j) * (j) * x_orig**(i-j-1) * y_orig**(j-1) * vx_orig
+ vxe_new_tmp2 += Xcoeff[idx] * (j) * (j-1) * x_orig**(i-j-1) * y_orig**(j-2) * vy_orig
+ vye_new_tmp2 += Ycoeff[idx] * (i-j) * (j) * x_orig**(i-j-1) * y_orig**(j-1) * vx_orig
+ vye_new_tmp2 += Ycoeff[idx] * (j) * (j-1) * x_orig**(i-j-1) * y_orig**(j-2) * vy_orig
+
+ idx += 1
+
+ # Third loop: dvx' / dvx
+ idx = 0
+ for i in range(order+1):
+ for j in range(i+1):
+ vxe_new_tmp3 += Xcoeff[idx] * (i-j) * x_orig**(i-j-1) * y_orig**j
+ vye_new_tmp3 += Ycoeff[idx] * (i-j) * x_orig**(i-j-1) * y_orig**j
+
+ idx += 1
+
+ # Fourth loop: dvx' / dvy
+ idx = 0
+ for i in range(order+1):
+ for j in range(i+1):
+ vxe_new_tmp4 += Xcoeff[idx] * (j) * x_orig**(i-j) * y_orig**(j-1)
+ vye_new_tmp4 += Ycoeff[idx] * (j) * x_orig**(i-j) * y_orig**(j-1)
+
+ idx += 1
+
+ vxe_new = np.sqrt((vxe_new_tmp1 * xe_orig)**2 + (vxe_new_tmp2 * ye_orig)**2 + \
+ (vxe_new_tmp3 * vxe_orig)**2 + (vxe_new_tmp4 * vye_orig)**2)
+ vye_new = np.sqrt((vye_new_tmp1 * xe_orig)**2 + (vye_new_tmp2 * ye_orig)**2 + \
+ (vye_new_tmp3 * vxe_orig)**2 + (vye_new_tmp4 * vye_orig)**2)
+
+ return vxe_new, vye_new
+
+
+
+
+
+
+"""
+Old functions with things hard-coded for OB120169
+"""
+
+def run_align_iter(catalog, trans_order=1, poly_deg=1, ref_mag_lim=19, ref_radius_lim=300):
+ # Load up data with matched stars.
+ d = Table.read(catalog)
+
+ # Determine how many epochs there are.
+ N_epochs = len([n for n, c in enumerate(d.colnames) if c.startswith('name')])
+
+ # Determine how many stars there are.
+ N_stars = len(d)
+
+ # Determine the reference epoch
+ ref = d.meta['L_REF']
+
+ # Figure out the number of free parameters for the specified
+ # poly2d order.
+ poly2d = models.Polynomial2D(trans_order)
+ N_par_trans_per_epoch = 2.0 * poly2d.get_num_coeff(2) # one poly2d for each dimension (X, Y)
+ N_par_trans = N_par_trans_per_epoch * N_epochs
+
+ ##########
+ # First iteration -- align everything to REF epoch with zero velocities.
+ ##########
+ print('ALIGN_EPOCHS: run_align_iter() -- PASS 1')
+ ee_ref = d.meta['L_REF']
+
+ target_name = 'OB120169'
+
+ trans1, used1 = calc_transform_ref_epoch(d, target_name, ee_ref, ref_mag_lim, ref_radius_lim)
+
+ ##########
+ # Derive the velocity of each stars using the round 1 transforms.
+ ##########
+ calc_polyfit_all_stars(d, poly_deg, init_fig_idx=0)
+
+ calc_mag_avg_all_stars(d)
+
+ tdx = np.where((d['name_0'] == 'OB120169') | (d['name_0'] == 'OB120169_L'))[0]
+ print(d[tdx]['name_0', 't0', 'mag', 'x0', 'vx', 'x0_err', 'vx_err', 'chi2x', 'y0', 'vy', 'y0_err', 'vy_err', 'chi2y', 'dof'])
+
+ ##########
+ # Second iteration -- align everything to reference positions derived from iteration 1
+ ##########
+ print('ALIGN_EPOCHS: run_align_iter() -- PASS 2')
+ target_name = 'OB120169'
+
+ trans2, used2 = calc_transform_ref_poly(d, target_name, poly_deg, ref_mag_lim, ref_radius_lim)
+
+ ##########
+ # Derive the velocity of each stars using the round 1 transforms.
+ ##########
+ calc_polyfit_all_stars(d, poly_deg, init_fig_idx=4)
+
+ ##########
+ # Save output
+ ##########
+ d.write(catalog.replace('.fits', '_aln.fits'), overwrite=True)
+
+ return
+
+def calc_transform_ref_epoch(d, target_name, ee_ref, ref_mag_lim, ref_radius_lim):
+ # Determine how many epochs there are.
+ N_epochs = len([n for n, c in enumerate(d.colnames) if c.startswith('name')])
+
+ # output array
+ trans = []
+ used = []
+
+ # Find the target
+ tdx = np.where(d['name_0'] == 'OB120169')[0][0]
+
+ # Reference values
+ t_ref = d['t_{0:d}'.format(ee_ref)]
+ m_ref = d['m_{0:d}'.format(ee_ref)]
+ x_ref = d['x_{0:d}'.format(ee_ref)]
+ y_ref = d['y_{0:d}'.format(ee_ref)]
+ xe_ref = d['xe_{0:d}'.format(ee_ref)]
+ ye_ref = d['ye_{0:d}'.format(ee_ref)]
+
+ # Calculate some quanitites we use for selecting reference stars.
+ r_ref = np.hypot(x_ref - x_ref[tdx], y_ref - y_ref[tdx])
+
+ # Loop through and align each epoch to the reference epoch.
+ for ee in range(N_epochs):
+ # Pull out the X, Y positions (and errors) for the two
+ # starlists we are going to align.
+ x_epo = d['x_{0:d}'.format(ee)]
+ y_epo = d['y_{0:d}'.format(ee)]
+ t_epo = d['t_{0:d}'.format(ee)]
+ xe_epo = d['xe_{0:d}'.format(ee)]
+ ye_epo = d['ye_{0:d}'.format(ee)]
+
+ # Figure out the set of stars detected in both epochs.
+ idx = np.where((t_ref != 0) & (t_epo != 0) & (xe_ref != 0) & (xe_epo != 0))[0]
+
+ # Find those in both epochs AND reference stars. This is [idx][rdx]
+ rdx = np.where((r_ref[idx] < ref_radius_lim) & (m_ref[idx] < ref_mag_lim))[0]
+
+ # Average the positional errors together to get one weight per star.
+ xye_ref = (xe_ref + ye_ref) / 2.0
+ xye_epo = (xe_epo + ye_epo) / 2.0
+ xye_wgt = (xye_ref**2 + xye_epo**2)**0.5
+
+ # Calculate transform based on the matched stars
+ trans_tmp = transforms.PolyTransform(x_epo[idx][rdx], y_epo[idx][rdx], x_ref[idx][rdx], y_ref[idx][rdx],
+ weights=xye_wgt[idx][rdx], order=2)
+
+ trans.append(trans_tmp)
+
+
+ # Apply thte transformation to the stars positions and errors:
+ xt_epo = np.zeros(len(d), dtype=float)
+ yt_epo = np.zeros(len(d), dtype=float)
+ xet_epo = np.zeros(len(d), dtype=float)
+ yet_epo = np.zeros(len(d), dtype=float)
+
+ xt_epo[idx], xet_epo[idx], yt_epo[idx], yet_epo[idx] = trans_tmp.evaluate_errors(x_epo[idx], xe_epo[idx],
+ y_epo[idx], ye_epo[idx],
+ nsim=100)
+
+ d['xt_{0:d}'.format(ee)] = xt_epo
+ d['yt_{0:d}'.format(ee)] = yt_epo
+ d['xet_{0:d}'.format(ee)] = xet_epo
+ d['yet_{0:d}'.format(ee)] = yet_epo
+
+ # Record which stars we used in the transform.
+ used_tmp = np.zeros(len(d), dtype=bool)
+ used_tmp[idx[rdx]] = True
+
+ used.append(used_tmp)
+
+ if True:
+ plot_quiver_residuals(xt_epo, yt_epo, x_ref, y_ref, idx, rdx, 'Epoch: ' + str(ee))
+
+ used = np.array(used)
+
+ return trans, used
+
+def calc_transform_ref_poly(d, target_name, poly_deg, ref_mag_lim, ref_radius_lim):
+ # Determine how many epochs there are.
+ N_epochs = len([n for n, c in enumerate(d.colnames) if c.startswith('name')])
+
+ # output array
+ trans = []
+ used = []
+
+ # Find the target
+ tdx = np.where(d['name_0'] == 'OB120169')[0][0]
+
+ # Temporary Reference values
+ t_ref = d['t0']
+ m_ref = d['mag']
+ x_ref = d['x0']
+ y_ref = d['y0']
+ xe_ref = d['x0_err']
+ ye_ref = d['y0_err']
+
+ # Calculate some quanitites we use for selecting reference stars.
+ r_ref = np.hypot(x_ref - x_ref[tdx], y_ref - y_ref[tdx])
+
+ for ee in range(N_epochs):
+ # Pull out the X, Y positions (and errors) for the two
+ # starlists we are going to align.
+ x_epo = d['x_{0:d}'.format(ee)]
+ y_epo = d['y_{0:d}'.format(ee)]
+ t_epo = d['t_{0:d}'.format(ee)]
+ xe_epo = d['xe_{0:d}'.format(ee)]
+ ye_epo = d['ye_{0:d}'.format(ee)]
+
+ # Shift the reference position by the polyfit for each star.
+ dt = t_epo - t_ref
+ if poly_deg >= 0:
+ x_ref_ee = x_ref
+ y_ref_ee = y_ref
+ xe_ref_ee = x_ref
+ ye_ref_ee = y_ref
+
+ if poly_deg >= 1:
+ x_ref_ee += d['vx'] * dt
+ y_ref_ee += d['vy'] * dt
+ xe_ref_ee = np.hypot(xe_ref_ee, d['vx_err'] * dt)
+ ye_ref_ee = np.hypot(ye_ref_ee, d['vy_err'] * dt)
+
+ if poly_deg >= 2:
+ x_ref_ee += d['ax'] * dt
+ y_ref_ee += d['ay'] * dt
+ xe_ref_ee = np.hypot(xe_ref_ee, d['axe'] * dt)
+ ye_ref_ee = np.hypot(ye_ref_ee, d['aye'] * dt)
+
+ # Figure out the set of stars detected in both.
+ idx = np.where((t_ref != 0) & (t_epo != 0) & (xe_ref != 0) & (xe_epo != 0))[0]
+
+ # Find those in both AND reference stars. This is [idx][rdx]
+ rdx = np.where((r_ref[idx] < ref_radius_lim) & (m_ref[idx] < ref_mag_lim))[0]
+
+ # Average the positional errors together to get one weight per star.
+ xye_ref = (xe_ref_ee + ye_ref_ee) / 2.0
+ xye_epo = (xe_epo + ye_epo) / 2.0
+ xye_wgt = (xye_ref**2 + xye_epo**2)**0.5
+
+ # Calculate transform based on the matched stars
+ trans_tmp = transforms.PolyTransform(x_epo[idx][rdx], y_epo[idx][rdx], x_ref_ee[idx][rdx], y_ref_ee[idx][rdx],
+ weights=xye_wgt[idx][rdx], order=2)
+ trans.append(trans_tmp)
+
+ # Apply thte transformation to the stars positions and errors:
+ xt_epo = np.zeros(len(d), dtype=float)
+ yt_epo = np.zeros(len(d), dtype=float)
+ xet_epo = np.zeros(len(d), dtype=float)
+ yet_epo = np.zeros(len(d), dtype=float)
+
+ xt_epo[idx], xet_epo[idx], yt_epo[idx], yet_epo[idx] = trans_tmp.evaluate_errors(x_epo[idx], xe_epo[idx],
+ y_epo[idx], ye_epo[idx],
+ nsim=100)
+ d['xt_{0:d}'.format(ee)] = xt_epo
+ d['yt_{0:d}'.format(ee)] = yt_epo
+ d['xet_{0:d}'.format(ee)] = xet_epo
+ d['yet_{0:d}'.format(ee)] = yet_epo
+
+ # Record which stars we used in the transform.
+ used_tmp = np.zeros(len(d), dtype=bool)
+ used_tmp[idx[rdx]] = True
+
+ used.append(used_tmp)
+
+ if True:
+ plot_quiver_residuals(xt_epo, yt_epo, x_ref_ee, y_ref_ee, idx, rdx, 'Epoch: ' + str(ee))
+
+ used = np.array(used)
+
+ return trans, used
+
+def calc_polyfit_all_stars(d, poly_deg, init_fig_idx=0):
+ # Determine how many stars there are.
+ N_stars = len(d)
+
+ # Determine how many epochs there are.
+ N_epochs = len([n for n, c in enumerate(d.colnames) if c.startswith('name')])
+
+ # Setup some variables to save the results
+ t0_all = []
+ px_all = []
+ py_all = []
+ pxe_all = []
+ pye_all = []
+ chi2x_all = []
+ chi2y_all = []
+ dof_all = []
+
+ # Get the time array, which is the same for all stars.
+ # Also, sort the time indices.
+ t = np.array([d['t_{0:d}'.format(ee)][0] for ee in range(N_epochs)])
+ tdx = t.argsort()
+ t_sorted = t[tdx]
+
+ # Run polyfit on each star.
+ for ss in range(N_stars):
+ # Get the x, y, xe, ye, and t arrays for this star.
+ xt = np.array([d['xt_{0:d}'.format(ee)][ss] for ee in range(N_epochs)])
+ yt = np.array([d['yt_{0:d}'.format(ee)][ss] for ee in range(N_epochs)])
+ xet = np.array([d['xet_{0:d}'.format(ee)][ss] for ee in range(N_epochs)])
+ yet = np.array([d['yet_{0:d}'.format(ee)][ss] for ee in range(N_epochs)])
+ t_tmp = np.array([d['t_{0:d}'.format(ee)][ss] for ee in range(N_epochs)])
+
+ # Sort these arrays.
+ xt_sorted = xt[tdx]
+ yt_sorted = yt[tdx]
+ xet_sorted = xet[tdx]
+ yet_sorted = yet[tdx]
+ t_tmp_sorted = t_tmp[tdx]
+
+ # Get only the detected epochs.
+ edx = np.where(t_tmp_sorted != 0)[0]
+
+ # Calculate the weighted t0 (using the transformed errors).
+ weight_for_t0 = 1.0 / np.hypot(xet_sorted, yet_sorted)
+ t0 = np.average(t_sorted[edx], weights=weight_for_t0[edx])
+
+ # for ee in edx:
+ # print('{0:8.3f} {1:10.5f} {2:10.5f} {3:8.5f} {4:8.5f}'.format(t[ee], xt[ee], yt[ee], xet[ee], yet[ee]))
+ # pdb.set_trace()
+
+ # Run polyfit
+ dt = t_sorted - t0
+ px, covx = np.polyfit(dt[edx], xt_sorted[edx], poly_deg, w=1./xet_sorted[edx], cov=True)
+ py, covy = np.polyfit(dt[edx], yt_sorted[edx], poly_deg, w=1./yet_sorted[edx], cov=True)
+
+ pxe = np.sqrt(np.diag(covx))
+ pye = np.sqrt(np.diag(covy))
+
+
+ x_mod = np.polyval(px, dt[edx])
+ y_mod = np.polyval(py, dt[edx])
+ chi2x = np.sum( ((x_mod - xt_sorted[edx]) / xet_sorted[edx])**2 )
+ chi2y = np.sum( ((y_mod - yt_sorted[edx]) / yet_sorted[edx])**2 )
+ dof = len(edx) - (poly_deg + 1)
+
+ # Save results:
+ t0_all.append(t0)
+ px_all.append(px)
+ py_all.append(py)
+ pxe_all.append(pxe)
+ pye_all.append(pye)
+ chi2x_all.append(chi2x)
+ chi2y_all.append(chi2y)
+ dof_all.append(dof)
+
+ if d[ss]['name_0'] in ['OB120169', 'OB120169_L']:
+ gs = GridSpec(3, 2) # 3 rows, 1 column
+ fig = plt.figure(ss + 1 + init_fig_idx, figsize=(12, 8))
+ a0 = fig.add_subplot(gs[0:2, 0])
+ a1 = fig.add_subplot(gs[2, 0])
+ a2 = fig.add_subplot(gs[0:2, 1])
+ a3 = fig.add_subplot(gs[2, 1])
+
+ a0.errorbar(t_sorted[edx], xt_sorted[edx], yerr=xet_sorted[edx], fmt='ro')
+ a0.plot(t_sorted[edx], x_mod, 'k-')
+ a0.set_title(d[ss]['name_0'] + ' X')
+ a1.errorbar(t_sorted[edx], xt_sorted[edx] - x_mod, yerr=xet_sorted[edx], fmt='ro')
+ a1.axhline(0, linestyle='--')
+ a1.set_xlabel('Time (yrs)')
+ a2.errorbar(t_sorted[edx], yt_sorted[edx], yerr=yet_sorted[edx], fmt='ro')
+ a2.plot(t_sorted[edx], y_mod, 'k-')
+ a2.set_title(d[ss]['name_0'] + ' Y')
+ a3.errorbar(t_sorted[edx], yt_sorted[edx] - y_mod, yerr=yet_sorted[edx], fmt='ro')
+ a3.axhline(0, linestyle='--')
+ a3.set_xlabel('Time (yrs)')
+
+
+
+ t0_all = np.array(t0_all)
+ px_all = np.array(px_all)
+ py_all = np.array(py_all)
+ pxe_all = np.array(pxe_all)
+ pye_all = np.array(pye_all)
+ chi2x_all = np.array(chi2x_all)
+ chi2y_all = np.array(chi2y_all)
+ dof_all = np.array(dof_all)
+
+ # Done with all the stars... recast as numpy arrays and save to output table.
+ d['t0'] = t0_all
+ d['chi2x'] = chi2x_all
+ d['chi2y'] = chi2y_all
+ d['dof'] = dof_all
+ if poly_deg >= 0:
+ d['x0'] = px_all[:, -1]
+ d['y0'] = py_all[:, -1]
+ d['x0_err'] = pxe_all[:, -1]
+ d['y0_err'] = pye_all[:, -1]
+
+ if poly_deg >= 1:
+ d['vx'] = px_all[:, -2]
+ d['vy'] = py_all[:, -2]
+ d['vx_err'] = pxe_all[:, -2]
+ d['vy_err'] = pye_all[:, -2]
+
+ if poly_deg >= 2:
+ d['ax'] = px_all[:, -3]
+ d['ay'] = py_all[:, -3]
+ d['axe'] = pxe_all[:, -3]
+ d['aye'] = pye_all[:, -3]
+
+ pdb.set_trace()
+
+ return
+
diff --git a/flystar/analysis.py b/flystar/analysis.py
index 953461b..3121458 100644
--- a/flystar/analysis.py
+++ b/flystar/analysis.py
@@ -18,10 +18,10 @@
##################################################
# New codes for velocity support in FlyStar and using
-# the new StarTable and StarList format.
+# the new StarTable and StarList format.
##################################################
-def query_gaia(ra, dec, search_radius=30.0, table_name='gaiadr2'):
+def query_gaia(ra, dec, search_radius=30.0, table_name='gaiadr3'):
"""
Query the Gaia database at the specified location
and with the specified search radius
@@ -57,8 +57,59 @@ def query_gaia(ra, dec, search_radius=30.0, table_name='gaiadr2'):
return gaia
+def check_gaia_parallaxes(ra,dec,search_radius=10.0,table_name='gaiadr3',target='(unnamed)',
+ file_ext=''):
+ """
+ Query the Gaia database at the specified location
+ and with the specified search radius, and plot
+ parallaxes.
+
+ Input
+ ----------
+ ra : string
+ R.A. in hours in the format such as '17:45:40.3'
+
+ dec : string
+ Dec. in degrees in the format such as '-29:00:28.0'
+
+ search_radius : float
+ The search radius in arcseconds.
-def prepare_gaia_for_flystar(gaia, ra, dec, targets_dict=None, match_dr_max=0.2):
+ Optional Input
+ --------------
+ table_name : string
+ Options are 'gaiadr2' or 'gaiadr3'
+ """
+ # Query Gaia
+ gaia = query_gaia(ra,dec,search_radius=search_radius,table_name=table_name)
+ # Set up reasonable histogram bins
+ plim0,plim1 = np.min(gaia['parallax']),np.max(gaia['parallax'])
+ pplim0,pplim1 = np.min(gaia['parallax']/gaia['parallax_error']),np.max(gaia['parallax']/gaia['parallax_error'])
+ binwidth = 1
+ pbins = np.arange(np.floor(plim0),np.ceil(plim1)+binwidth,binwidth)
+ ppbins = np.arange(np.floor(pplim0),np.ceil(pplim1)+binwidth,binwidth)
+ # Find number where plx/plx_err>3
+ p_perr = (gaia['parallax']/gaia['parallax_error']).compressed()
+ nppe3 = sum((p_perr>3).astype(int))
+ nppen3 = sum((p_perr<-3).astype(int))
+ print(table_name,'stars within',search_radius,'\" with plx/plx_err>3: ', nppe3, ' of ', len(gaia['parallax']))
+ print(table_name,'stars within',search_radius,'\" with plx/plx_err<-3: ', nppen3, ' of ', len(gaia['parallax']))
+ # Plot
+ plt.subplots(nrows=1,ncols=2,figsize=(12,6))
+ plt.subplot(121)
+ plt.xlabel('parallax (mas)'); plt.ylabel('N stars')
+ plt.hist(gaia['parallax'],bins=pbins)
+ plt.yscale('log')
+ plt.title(table_name+' parallax histograms, '+str(search_radius)+'\" radius around '+target, loc='left')
+ plt.subplot(122)
+ plt.xlabel('parallax/parallax_error')
+ plt.hist(gaia['parallax']/gaia['parallax_error'],bins=ppbins)
+ plt.yscale('log')
+ plt.tight_layout()
+ plt.savefig('gaiaplx'+file_ext+'.png')
+
+
+def prepare_gaia_for_flystar(gaia, ra, dec, targets_dict=None, match_dr_max=0.2, pi_err_limit=0.4, default_motion_model='Linear'):
"""
Take a Gaia table (from astroquery) and produce a new table with a tangential projection
and shift such that the origin is centered on the target of interest.
@@ -90,14 +141,14 @@ def prepare_gaia_for_flystar(gaia, ra, dec, targets_dict=None, match_dr_max=0.2)
gaia_new['x0'] = x * -1.0
gaia_new['y0'] = y
- gaia_new['x0e'] = xe
- gaia_new['y0e'] = ye
+ gaia_new['x0_err'] = xe
+ gaia_new['y0_err'] = ye
# Also convert the velocities. Note that Gaia PM are already * cos(dec)
gaia_new['vx'] = gaia['pmra'].data * -1.0 / 1e3 # asec/yr
gaia_new['vy'] = gaia['pmdec'].data / 1e3
- gaia_new['vxe'] = gaia['pmra_error'].data / 1e3
- gaia_new['vye'] = gaia['pmdec_error'].data / 1e3
+ gaia_new['vx_err'] = gaia['pmra_error'].data / 1e3
+ gaia_new['vy_err'] = gaia['pmdec_error'].data / 1e3
gaia_new['t0'] = gaia['ref_epoch'].data
gaia_new['source_id'] = gaia['source_id'].data.astype('S19')
@@ -106,38 +157,78 @@ def prepare_gaia_for_flystar(gaia, ra, dec, targets_dict=None, match_dr_max=0.2)
idx = np.where(gaia['pmdec'].mask == True)[0]
gaia_new['vx'][idx] = 0.0
gaia_new['vy'][idx] = 0.0
- gaia_new['vxe'][idx] = 0.0
- gaia_new['vye'][idx] = 0.0
+ gaia_new['vx_err'][idx] = 0.0
+ gaia_new['vy_err'][idx] = 0.0
gaia_new['m'] = gaia['phot_g_mean_mag']
gaia_new['me'] = 1.09/gaia['phot_g_mean_flux_over_error']
- gaia_new['parallax'] = gaia['parallax']
- gaia_new['parallax_error'] = gaia['parallax_error']
+ gaia_new['pi'] = gaia['parallax'].data*1e-3
+ gaia_new['pi_err'] = gaia['parallax_error'].data*1e-3
# Set the velocities (and uncertainties) to zero if they aren't measured.
idx = np.where(np.isnan(gaia_new['vx']) == True)[0]
gaia_new['vx'][idx] = 0.0
- gaia_new['vxe'][idx] = 0.0
+ gaia_new['vx_err'][idx] = 0.0
gaia_new['vy'][idx] = 0.0
- gaia_new['vye'][idx] = 0.0
+ gaia_new['vy_err'][idx] = 0.0
+
+ # Cut out stars with high plx error and set motion models
+ idx = np.where((gaia_new['pi_err']>(pi_err_limit/1e3)) | (gaia['parallax'].mask == True))[0]
+ gaia_new['pi'][idx] = 0.0
+ gaia_new['pi_err'][idx] = 0.0
+ if default_motion_model=='Parallax':
+ gaia_new['motion_model_input'] = 'Parallax'
+ gaia_new['motion_model_used'] = 'Parallax'
+ gaia_new['motion_model_used'][idx] = 'Linear'
+ gaia_new['n_params'] = 3
+ gaia_new['n_params'][idx] = 2
+ elif default_motion_model=='Linear':
+ gaia_new['motion_model_input'] = 'Linear'
+ gaia_new['motion_model_used'] = 'Linear'
+ gaia_new['n_params'] = 2
+ elif default_motion_model=='Fixed':
+ gaia_new['motion_model_input'] = 'Fixed'
+ gaia_new['motion_model_used'] = 'Fixed'
+ gaia_new['n_params'] = 1
+ else:
+ print("Invalid motion model",default_motion_model,"- none assigned")
+
+ #macy additions to try to fix wild magnitude values
+ #gaia_new['ruwe'] = gaia['ruwe']
+ #try:
+ # gaia_new = gaia_new[~gaia_new['m'].mask]
+ #except:
+ # print('no invalig mags')
gaia_new = gaia_new.filled() #convert masked colunms to regular columns
if targets_dict != None:
- for targ_name, targ_coo in targets_dict.items():
- dx = gaia_new['x0'] - (targ_coo[0] * -1.0)
- dy = gaia_new['y0'] - targ_coo[1]
+# for targ_name, targ_coo in targets_dict.items():
+# dx = gaia_new['x0'] - (targ_coo[0] * -1.0)
+# dy = gaia_new['y0'] - targ_coo[1]
+# dr = np.hypot(dx, dy)
+#
+# idx = dr.argmin()
+#
+# if dr[idx] < match_dr_max:
+# gaia_new['name'][idx] = targ_name
+# print('Found match for: ', targ_name, ' - ',gaia_new['source_id'][idx])
+ targ_names = [x for x in targets_dict]
+ targ_xs = np.array([targets_dict[x][0] for x in targets_dict])
+ targ_ys = np.array([targets_dict[x][1] for x in targets_dict])
+ for i_gaia in range(len(gaia_new)):
+ dx = gaia_new['x0'][i_gaia] - (targ_xs * -1.0)
+ dy = gaia_new['y0'][i_gaia] - targ_ys
dr = np.hypot(dx, dy)
idx = dr.argmin()
if dr[idx] < match_dr_max:
- gaia_new['name'][idx] = targ_name
- print('Found match for: ', targ_name)
+ gaia_new['name'][i_gaia] = targ_names[idx]
+ print('Found match for: ', targ_names[idx], ' - ',gaia_new['source_id'][i_gaia])
return gaia_new
-
def run_flystar():
test_file = '/u/jlu/work/microlens/OB150211/a_2018_10_19/a_ob150211_2018_10_19/lis/stars_matched2.fits'
@@ -279,6 +370,9 @@ def rename_after_flystar(star_tab, label_dat_file, new_copy=True, dr_tol=0.05, d
idx_lab, idx_star, dr, dm = match.match(x_lab, y_lab, m_lab,
star_tab['x0'], star_tab['y0'], star_tab['m0'],
dr_tol=dr_tol, dm_tol=dm_tol, verbose=verbose)
+ #print('idx_lab:')
+ #for iii in range(len(idx_lab)):
+ # print(label_tab["name"][idx_lab[iii]], star_tab["name"][idx_star[iii]])
print('Renaming {0:d} out of {1:d} stars'.format(len(idx_lab), len(star_tab)))
@@ -317,12 +411,12 @@ def pick_good_ref_stars(star_tab, r_cut=None, m_cut=None, p_err_cut=None, pm_err
print('pick_good_ref_stars: Use {0:d} stars after m<{1:.2f}.'.format(use.sum(), m_cut))
if p_err_cut is not None:
- p_err = np.mean((star_tab['x0e'], star_tab['y0e']), axis=0)
+ p_err = np.mean((star_tab['x0_err'], star_tab['y0_err']), axis=0)
use = use & (p_err < p_err_cut)
print('pick_good_ref_stars: Use {0:d} stars after p_err<{1:.5f}.'.format(use.sum(), p_err_cut))
if pm_err_cut is not None:
- pm_err = np.mean((star_tab['vxe'], star_tab['vye']), axis=0)
+ pm_err = np.mean((star_tab['vx_err'], star_tab['vy_err']), axis=0)
use = use & (pm_err < pm_err_cut)
print('pick_good_ref_stars: Use {0:d} stars after pm_err<{1:.5f}.'.format(use.sum(), pm_err_cut))
@@ -344,38 +438,18 @@ def startable_subset(tab, idx, mag_trans=True, mag_trans_orig=False):
combined astrometry + uncombined photometry table.
"""
# Multiples: ['x', 'y', 'm', 'name_in_list', 'xe', 'ye', 'me', 't',
- # 'x_orig', 'y_orig', 'm_orig', 'xe_orig', 'ye_orig', 'me_orig', 'used_in_trans']
- # Single: ['name', 'm0', 'm0e', 'use_in_trans', 'ref_orig', 'n_detect',
- # 'x0', 'vx', 'y0', 'vy', 'x0e', 'vxe', 'y0e', 'vye', 't0']
+ # 'x_orig', 'y_orig', 'm_orig', 'xe_orig', 'ye_orig', 'me_orig', 'used_in_trans',
+ # 'xe_boot','ye_boot','me_boot']
+ # Single: ['name', 'm0', 'm0_err', 'use_in_trans', 'ref_orig', 'n_detect',
+ # 'x0', 'vx', 'y0', 'vy', 'x0_err', 'vx_err', 'y0_err', 'vy_err', 't0']
# Don't include n_vfit
- new_tab = startables.StarTable(name=tab['name'].data,
- x=tab['x'][:,idx].data,
- y=tab['y'][:,idx].data,
- m=tab['m'][:,idx].data,
- xe=tab['xe'][:,idx].data,
- ye=tab['ye'][:,idx].data,
- me=tab['me'][:,idx].data,
- t=tab['t'][:,idx].data,
- x_orig=tab['x_orig'][:,idx].data,
- y_orig=tab['y_orig'][:,idx].data,
- m_orig=tab['m_orig'][:,idx].data,
- xe_orig=tab['xe_orig'][:,idx].data,
- ye_orig=tab['ye_orig'][:,idx].data,
- me_orig=tab['me_orig'][:,idx].data,
- used_in_trans=tab['used_in_trans'][:,idx].data,
- m0=tab['m0'].data,
- m0e=tab['m0e'].data,
- use_in_trans=tab['use_in_trans'].data,
- x0=tab['x0'].data,
- vx=tab['vx'].data,
- y0=tab['y0'].data,
- vy=tab['vy'].data,
- x0e=tab['x0e'].data,
- vxe=tab['vxe'].data,
- y0e=tab['y0e'].data,
- vye=tab['vye'].data,
- t0=tab['t0'].data)
+ new_tab = copy.deepcopy(tab)
+ #new_tab.remove_column('n_fit')
+ new_tab.remove_column('n_detect')
+ for col in ['x','y','m','name_in_list','xe','ye','me','t','x_orig','y_orig','m_orig',
+ 'xe_orig','ye_orig','me_orig','used_in_trans','xe_boot','ye_boot','me_boot']:
+ new_tab[col] = tab[col][:,idx]
new_tab.combine_lists('m', weights_col='me', sigma=3, ismag=True)
diff --git a/flystar/conftest.py b/flystar/conftest.py
index 672b273..da164b5 100644
--- a/flystar/conftest.py
+++ b/flystar/conftest.py
@@ -31,9 +31,9 @@ def pytest_configure(config):
PYTEST_HEADER_MODULES.pop('Pandas', None)
PYTEST_HEADER_MODULES['scikit-image'] = 'skimage'
- from . import __version__
+ #from . import __version__
packagename = os.path.basename(os.path.dirname(__file__))
- TESTED_VERSIONS[packagename] = __version__
+ #TESTED_VERSIONS[packagename] = __version__
# Uncomment the last two lines in this block to treat all DeprecationWarnings as
# exceptions. For Astropy v2.0 or later, there are 2 additional keywords,
diff --git a/flystar/fit_velocity.py b/flystar/fit_velocity.py
deleted file mode 100755
index 0317322..0000000
--- a/flystar/fit_velocity.py
+++ /dev/null
@@ -1,205 +0,0 @@
-from tqdm import tqdm
-import numpy as np
-import pandas as pd
-
-def linear(x, k, b):
- return k*x + b
-
-def linear_fit(x, y, sigma=None, absolute_sigma=True):
- """Weighted linear regression (See https://en.wikipedia.org/wiki/Weighted_least_squares#Solution). Recommended for low-dimension, non-degenerate data. Otherwise, please use scipy.curve_fit.
-
- Parameters
- ----------
- x : array-like
- x data
- y : array-like
- y data
- sigma : array-like, optional
- Weighted by 1/sigma**2. If not provided, weight = 1, by default None
- absolute_sigma : bool, optional
- If True (default), sigma is used in an absolute sense and the estimated parameter uncertainty reflects these absolute values. If False, only the relative magnitudes of the sigma values matter, by default True
-
- Returns
- -------
- result : dictionary
- Dictionary with keys 'slope', 'e_slope', 'intercept', 'e_intercept', and 'chi2' if return_chi2=True.
- """
- x = np.array(x)
- y = np.array(y)
- if sigma is None:
- sigma = np.ones_like(x)
- else:
- sigma = np.array(sigma)
-
- X = np.vander(x, 2)
- W = np.diag(1/sigma**2)
- XTWX = X.T @ W @ X
- pcov = np.linalg.inv(XTWX) # Covariance Matrix
- popt = pcov @ X.T @ W @ y # Linear Solution
- perr = np.sqrt(np.diag(pcov)) # Uncertainty of Linear Solution
-
- residual = y - X @ popt
- chi2 = residual.T @ W @ residual
-
- if not absolute_sigma:
- reduced_chi2 = chi2/(len(x) - 2)
- perr *= reduced_chi2**0.5
-
- result = {
- 'slope': popt[0],
- 'intercept': popt[1],
- 'e_slope': perr[0],
- 'e_intercept': perr[1],
- 'chi2': chi2
- }
-
- return result
-
-
-def calc_chi2(x, y, sigma, slope, intercept):
- popt = np.array([slope, intercept])
- X = np.vander(x, 2)
- W = np.diag(1/sigma**2)
- residual = y - X @ popt
- return residual.T @ W @ residual
-
-
-def fit_velocity(startable, weighting='var', use_scipy=False, absolute_sigma=True, epoch_cols='all', art_star=False):
- """Fit proper motion with weighted linear regression equations (see https://en.wikipedia.org/wiki/Weighted_least_squares#Solution).
- Assumes that all data are valid.
-
- Parameters
- ----------
- startable : StarTable
- StarTable object
- weighting : str, optional
- Weighting by variance (1/ye**2) or standard deviation (1/ye), by default 'var'
- use_scipy : bool, optional
- Use scipy.curve_fit or flystar.fit_velocity.linear_fit, by default False
- absolute_sigma : bool, optional
- If True (default), sigma is used in an absolute sense and the estimated parameter uncertainty reflects these absolute values. If False, only the relative magnitudes of the sigma values matter, by default True
- epoch_cols : str or list of intergers, optional
- List of indicies of columns to use. If 'all', use all columns, by default 'all'
- art_star : bool, optional
- Artificial star catalog or not. If True, use startable['x'][:, epoch_ols, 1] as the location, by default False.
-
- Returns
- -------
- result : pd.DataFrame
- Proper motion dataframe with keys vx, vxe, vy, vye, x0, x0e, y0, y0e
-
- Raises
- ------
- ValueError
- If weighting is neither 'std' nor 'var'
- """
- if weighting not in ['std', 'var']:
- raise ValueError(f"Weighting must be either 'std' or 'var', not '{weighting}'.")
- if epoch_cols is None:
- epoch_cols = np.arange(len(startable.meta['YEARS'])) # use all cols if not specified
-
- N = len(startable)
- vx = np.zeros(N)
- vy = np.zeros(N)
- vxe = np.zeros(N)
- vye = np.zeros(N)
- x0 = np.zeros(N)
- y0 = np.zeros(N)
- x0e = np.zeros(N)
- y0e = np.zeros(N)
- chi2_vx = np.zeros(N)
- chi2_vy = np.zeros(N)
- t0 = np.zeros(N)
-
- time = np.array(startable.meta['YEARS'])[epoch_cols]
-
- if not art_star:
- x_arr = startable['x'][:, epoch_cols]
- y_arr = startable['y'][:, epoch_cols]
- else:
- x_arr = startable['x'][:, epoch_cols, 1]
- y_arr = startable['y'][:, epoch_cols, 1]
-
- xe_arr = startable['xe'][:, epoch_cols]
- ye_arr = startable['ye'][:, epoch_cols]
-
- if weighting=='std':
- sigma_x_arr = np.abs(xe_arr)**0.5
- sigma_y_arr = np.abs(ye_arr)**0.5
- elif weighting=='var':
- sigma_x_arr = xe_arr
- sigma_y_arr = ye_arr
-
- # For each star
- for i in tqdm(range(len(startable))):
- x = x_arr[i]
- y = y_arr[i]
- xe = xe_arr[i]
- ye = ye_arr[i]
- sigma_x = sigma_x_arr[i]
- sigma_y = sigma_y_arr[i]
-
- t_weight = 1. / np.hypot(xe, ye)
- t0[i] = np.average(time, weights=t_weight)
- dt = time - t0[i]
-
- if use_scipy:
- p0x = np.array([0., x.mean()])
- p0y = np.array([0., y.mean()])
-
- # Use scipy.curve_fit to fit for velocity
- vx_opt, vx_cov = curve_fit(linear, dt, x, p0=p0x, sigma=sigma_x, absolute_sigma=absolute_sigma)
- vy_opt, vy_cov = curve_fit(linear, dt, y, p0=p0y, sigma=sigma_y, absolute_sigma=absolute_sigma)
-
- vx[i] = vx_opt[0]
- vy[i] = vy_opt[0]
- x0[i] = vx_opt[1]
- y0[i] = vy_opt[1]
- vxe[i], x0e[i] = np.sqrt(vx_cov.diagonal())
- vye[i], y0e[i] = np.sqrt(vy_cov.diagonal())
- chi2_vx[i] = calc_chi2(dt, x, sigma_x, *vx_opt)
- chi2_vy[i] = calc_chi2(dt, y, sigma_y, *vy_opt)
-
- else:
- vx_result = linear_fit(dt, x, sigma=sigma_x, absolute_sigma=absolute_sigma)
- vy_result = linear_fit(dt, y, sigma=sigma_y, absolute_sigma=absolute_sigma)
-
- vx[i] = vx_result['slope']
- vxe[i] = vx_result['e_slope']
- x0[i] = vx_result['intercept']
- x0e[i] = vx_result['e_intercept']
- chi2_vx[i] = vx_result['chi2']
-
- vy[i] = vy_result['slope']
- vye[i] = vy_result['e_slope']
- y0[i] = vy_result['intercept']
- y0e[i] = vy_result['e_intercept']
- chi2_vy[i] = vy_result['chi2']
-
- result = pd.DataFrame({
- 'vx': vx, 'vy': vy,
- 'vxe': vxe, 'vye': vye,
- 'x0': x0, 'y0': y0,
- 'x0e': x0e, 'y0e': y0e,
- 'chi2_vx': chi2_vx,
- 'chi2_vy': chi2_vy,
- 't0': t0
- })
- return result
-
-
-# Test
-if __name__=='__main__':
- from scipy.optimize import curve_fit
-
- x = np.array([1,2,3,4])
- y = np.array([1,2,5,6])
- sigma = np.array([.4,.2,.1,.3])
-
- for absolute_sigma in [True, False]:
- result = linear_fit(x, y, sigma=sigma, absolute_sigma=absolute_sigma)
- popt, pcov = curve_fit(linear, x, y, sigma=sigma, absolute_sigma=absolute_sigma)
- perr = np.sqrt(np.diag(pcov))
- print(f'Absolute Sigma = {absolute_sigma}')
- print(f"linear_fit: slope = {result['slope']:.3f} ± {result['e_slope']:.3f}, intercept = {result['intercept']:.3f} ± {result['e_intercept']:.3f}, chi2={result['chi2']:.3f}")
- print(f'curve_fit: slope = {popt[0]:.3f} ± {perr[0]:.3f}, intercept = {popt[1]:.3f} ± {perr[1]:.3f}, chi2={calc_chi2(x, y, sigma, *popt):.3f}\n')
\ No newline at end of file
diff --git a/flystar/match.py b/flystar/match.py
index d40ccdb..d7c391e 100644
--- a/flystar/match.py
+++ b/flystar/match.py
@@ -15,7 +15,7 @@ def miracle_match_briteN(xin1, yin1, min1, xin2, yin2, min2, Nbrite,
Nbins_vmax=200, Nbins_angle=360,verbose=False):
"""
Take two input starlists and select the brightest stars from
- each. Then performa a triangle matching algorithm along the lines of
+ each. Then perform a triangle matching algorithm along the lines of
Groth 1986.
For every possible triangle (combination of 3 stars) in a starlist,
@@ -279,8 +279,11 @@ def match(x1, y1, m1, x2, y2, m2, dr_tol, dm_tol=None, verbose=True):
idxs2 = np.ones(x1.size, dtype=int) * -1
# The matching will be done using a KDTree.
- kdt = KDT(coords2, balanced_tree=False)
-
+ #kdt = KDT(coords2, balanced_tree=False)
+ #KDTree handling of NaNs throws error in scipy v1.10.1 and newer.
+ #Replace NaNs in coords2 with zero (0). -SKT
+ kdt = KDT(np.where(np.isfinite(coords2), coords2, 0), balanced_tree=False)
+
# This returns the number of neighbors within the specified
# radius. We will use this to find those stars that have no or one
# match and deal with them easily. The more complicated conflict
@@ -342,7 +345,7 @@ def match(x1, y1, m1, x2, y2, m2, dr_tol, dm_tol=None, verbose=True):
# Double check that "min" choice is still within our
# detla-mag tolerence.
- dm_tmp = np.array([dm.T[dm_min[I]][I] for I in np.lib.index_tricks.ndindex(dm_min.shape)])
+ dm_tmp = np.array([dm.T[dm_min[I]][I] for I in np.ndindex(dm_min.shape)])
keep = (dm_min == dr_min) & (dm_tmp < dm_tol)
else:
@@ -389,7 +392,7 @@ def match(x1, y1, m1, x2, y2, m2, dr_tol, dm_tol=None, verbose=True):
keep[dups[dm_min]] = True
else:
if verbose:
- print(' confused, dropping')
+ print(' confused, dropping star at',x2[idxs2[dups]][0],y2[idxs2[dups]][0])
# Clean up the duplicates
diff --git a/flystar/motion_model.py b/flystar/motion_model.py
new file mode 100644
index 0000000..0b86d07
--- /dev/null
+++ b/flystar/motion_model.py
@@ -0,0 +1,561 @@
+import numpy as np
+from abc import ABC
+import pdb
+from flystar import parallax
+from astropy.time import Time
+from scipy.optimize import curve_fit
+import warnings
+
+class MotionModel(ABC):
+ # Number of data points required to fit model
+ n_pts_req = 0
+ # Degrees of freedom for model
+ n_params = 0
+
+ # Fit paramters: Shared fit parameters
+ fitter_param_names = []
+
+ # Fixed parameters: These are parameters that are required for the model, but are not
+ # fit quantities. For example, RA and Dec in a parallax model.
+ fixed_param_names = []
+ fixed_meta_data = []
+
+ # Non-fit paramters: Custom paramters that will not be fit.
+ # These parameters should be derived from the fit parameters and
+ # they must exist as a variable on the model object
+ optional_param_names = []
+
+ def __init__(self, *args, **kwargs):
+ """
+ Make a motion model object. This object defines the fitter and fixed parameters,
+ and if needed stores metadata such as RA and Dec for Parallax,
+ for the given motion model and contains functions to fit these values to data
+ and apply the values to compute expected positions at given times. Each instance
+ corresponds to a given motion model, not an individual star, and thus the fit
+ values are only input/returned in functions and not stored in the object.
+ """
+ return
+
+ def get_pos_at_time(self, params, t):
+ """
+ Position calculator for a single star using a given motion model and input
+ model parameters and times.
+ """
+ #return x, y
+ pass
+
+ def get_batch_pos_at_time(self, t):
+ """
+ Position calculator for a set of stars using a given motion model and input
+ model parameters and times.
+ """
+ #return x, y, x_err, y_err
+ pass
+
+ def run_fit(self, t, x, y, xe, ye, t0, weighting='var',
+ use_scipy=True, absolute_sigma=True):
+ """
+ Run a single fit of the data to the motion model and return the best parameters.
+ This function is used by the overall fit_motion_model function once for a basic fit
+ or several times for a bootstrap fit.
+ """
+ # Run a single fit (used both for overall fit + bootstrap iterations)
+ pass
+
+ def get_weights(self, xe, ye, weighting='var'):
+ """
+ Get the weights for each data point for fitting. Options are 'var' (default)
+ and 'std'.
+ """
+ if weighting=='std':
+ return 1./xe, 1./ye
+ elif weighting=='var':
+ return 1./xe**2, 1./ye**2
+ else:
+ warnings.warn("Invalid weighting, using default weighting scheme var.", UserWarning)
+ return 1./xe**2, 1./ye**2
+
+ def scale_errors(self, errs, weighting='var'):
+ """
+ Rescale the fit result errors as needed, according to the weighting scheme used.
+ """
+ if weighting=='std':
+ return np.array(errs)**2
+ elif weighting=='var':
+ return errs
+ else:
+ warnings.warn("Invalid weighting, using default weighting scheme var.", UserWarning)
+ return errs
+
+ def fit_motion_model(self, t, x, y, xe, ye, t0, bootstrap=0, weighting='var',
+ use_scipy=True, absolute_sigma=True):
+ """
+ Fit the input positions on the sky and errors
+ to determine new parameters for this motion model (MM).
+ Best-fit parameters will be returned along with uncertainties.
+ Optionally, bootstrap error estimation can be performed.
+ """
+ params, param_errs = self.run_fit(t, x, y, xe, ye, t0=t0, weighting=weighting,
+ use_scipy=use_scipy, absolute_sigma=absolute_sigma)
+
+ if bootstrap>0 and len(x)>(self.n_pts_req):
+ edx = np.arange(len(x), dtype=int)
+ bb_params = []
+ bb_params_errs = []
+ for bb in range(bootstrap):
+ bdx = np.random.choice(edx, len(x))
+ while len(np.unique(bdx))2: # Catch case where bootstrap sends only 2 unique epochs
+ _,idx=np.unique(dt, return_index=True)
+ dt = dt[idx]
+ x = x[idx]
+ y = y[idx]
+ xe = xe[idx]
+ ye = ye[idx]
+ dx = np.diff(x)[0]
+ dy = np.diff(y)[0]
+ dt_diff = np.diff(dt)[0]
+ vx = dx / dt_diff
+ vy = dy / dt_diff
+ # TODO: still not sure about the error handling here
+ x0 = x[0] - dt[0]*vx # np.average(x, weights=x_wt) #
+ y0 = y[0] - dt[0]*vy # np.average(y, weights=y_wt) #
+ x0e = np.abs(dx) / 2**0.5 # np.sqrt(np.sum(xe**2)/2) #
+ y0e = np.abs(dy) / 2**0.5 # np.sqrt(np.sum(ye**2)/2) #
+ vxe = 0.0 #np.abs(vx) * np.sqrt(np.sum(xe**2/x**2))
+ vye = 0.0 #np.abs(vy) * np.sqrt(np.sum(ye**2/y**2))
+
+ else:
+ if use_scipy:
+ def linear(t, c0, c1):
+ return c0 + c1*t
+ x_opt, x_cov = curve_fit(linear, dt, x, p0=np.array(params_guess[:2]), sigma=1/np.sqrt(x_wt), absolute_sigma=absolute_sigma)
+ y_opt, y_cov = curve_fit(linear, dt, y, p0=np.array(params_guess[2:]), sigma=1/np.sqrt(y_wt), absolute_sigma=absolute_sigma)
+ x0, vx = x_opt
+ y0, vy = y_opt
+ x0e, vxe = np.sqrt(x_cov.diagonal())
+ y0e, vye = np.sqrt(y_cov.diagonal())
+ x0e, vxe, y0e, vye = self.scale_errors([x0e, vxe, y0e, vye], weighting=weighting)
+ else:
+ # Use https://en.wikipedia.org/wiki/Weighted_least_squares#Solution scheme
+ x = np.array(x)
+ y = np.array(y)
+ dt = np.array(dt)
+ X_mat_t = np.vander(dt, 2)
+ # x calculation
+ W_mat_x = np.diag(x_wt)
+ XTWX_mat_x = X_mat_t.T @ W_mat_x @ X_mat_t
+ pcov_x = np.linalg.inv(XTWX_mat_x) # Covariance Matrix
+ popt_x = pcov_x @ X_mat_t.T @ W_mat_x @ x # Linear Solution
+ perr_x = np.sqrt(np.diag(pcov_x)) # Uncertainty of Linear Solution
+ # y calculation
+ W_mat_y = np.diag(y_wt)
+ XTWX_mat_y = X_mat_t.T @ W_mat_y @ X_mat_t
+ pcov_y = np.linalg.inv(XTWX_mat_y) # Covariance Matrix
+ popt_y = pcov_y @ X_mat_t.T @ W_mat_y @ y # Linear Solution
+ perr_y = np.sqrt(np.diag(pcov_y)) # Uncertainty of Linear Solution
+ # prepare values to return
+ x0, vx = popt_x[1], popt_x[0]
+ y0, vy = popt_y[1], popt_y[0]
+ x0e, vxe = perr_x[1], perr_x[0]
+ y0e, vye = perr_y[1], perr_y[0]
+ x0e, vxe, y0e, vye = self.scale_errors([x0e, vxe, y0e, vye], weighting=weighting)
+
+ params = [x0, vx, y0, vy]
+ param_errors = [x0e, vxe, y0e, vye]
+ return params, param_errors
+
+
+class Acceleration(MotionModel):
+ """
+ A 2D accelerating motion model for a star on the sky.
+ """
+ n_pts_req = 4 # TODO: consider special case for 3 pts
+ n_params=3
+ fitter_param_names = ['x0', 'vx0', 'ax', 'y0', 'vy0', 'ay']
+ fixed_param_names = ['t0']
+
+ def __init__(self, x0=0, vx0=0, ax=0, y0=0, vy0=0, ay=0, t0=None,
+ x0_err=0, vx0_err=0, ax_err=0, y0_err=0, vy0_err=0, ay_err=0, **kwargs):
+ # Must call after setting parameters.
+ # This checks for proper parameter formatting.
+ super().__init__()
+ return
+
+ def get_pos_at_time(self, fit_params, fixed_params, t):
+ fit_params_dict = dict(zip(self.fitter_param_names, fit_params))
+ fixed_params_dict = dict(zip(self.fixed_param_names, fixed_params))
+ dt = t-fixed_params_dict['t0']
+ x = fit_params_dict['x0'] + fit_params_dict['vx0']*dt + 0.5*fit_params_dict['ax']*dt**2
+ y = fit_params_dict['y0'] + fit_params_dict['vy0']*dt + 0.5*fit_params_dict['ay']*dt**2
+ return x, y
+
+ def get_batch_pos_at_time(self,t,
+ x0=[],vx0=[],ax=[], y0=[],vy0=[],ay=[], t0=[],
+ x0_err=[],vx0_err=[],ax_err=[], y0_err=[],vy0_err=[],ay_err=[], **kwargs):
+ if hasattr(t, "__len__"):
+ dt = t-t0[:,np.newaxis]
+ x = x0[:,np.newaxis] + dt*vx0[:,np.newaxis] + 0.5*dt**2*ax[:,np.newaxis]
+ y = y0[:,np.newaxis] + dt*vy0[:,np.newaxis] + 0.5*dt**2*ay[:,np.newaxis]
+ x_err = np.sqrt(x0_err[:,np.newaxis]**2 + (vx0_err[:,np.newaxis]*dt)**2 + (0.5*ax_err[:,np.newaxis]*dt**2)**2)
+ y_err = np.sqrt(y0_err[:,np.newaxis]**2 + (vy0_err[:,np.newaxis]*dt)**2 + (0.5*ay_err[:,np.newaxis]*dt**2)**2)
+ else:
+ dt = t-t0
+ x = x0 + dt*vx0 + 0.5*dt**2*ax
+ y = y0 + dt*vy0 + 0.5*dt**2*ay
+ x_err = np.sqrt(x0_err**2 + (vx0_err*dt)**2 + (0.5*ax_err*dt**2)**2)
+ y_err = np.sqrt(y0_err**2 + (vy0_err*dt)**2 + (0.5*ay_err*dt**2)**2)
+ return x,y,x_err,y_err
+
+ def run_fit(self, t, x, y, xe, ye, t0, weighting='var', params_guess=None,
+ use_scipy=True, absolute_sigma=True):
+ if not use_scipy:
+ Warning("Acceleration model has no non-scipy fitter option. Running with scipy.")
+ dt = t-t0
+ x_wt, y_wt = self.get_weights(xe,ye, weighting=weighting)
+ if params_guess is None:
+ params_guess = [x.mean(),0.0,0.0,y.mean(),0.0,0.0]
+
+ def accel(t, c0,c1,c2):
+ return c0 + c1*t + 0.5*c2*t**2
+
+ x_opt, x_cov = curve_fit(accel, dt, x, p0=np.array(params_guess[:3]), sigma=1/x_wt**0.5, absolute_sigma=True)
+ y_opt, y_cov = curve_fit(accel, dt, y, p0=np.array(params_guess[3:]), sigma=1/y_wt**0.5, absolute_sigma=True)
+ x0 = x_opt[0]
+ y0 = y_opt[0]
+ vx0 = x_opt[1]
+ vy0 = y_opt[1]
+ ax = x_opt[2]
+ ay = y_opt[2]
+
+ x0e, vx0e, axe = np.sqrt(x_cov.diagonal())
+ y0e, vy0e, aye = np.sqrt(y_cov.diagonal())
+ x0e, vx0e, axe, y0e, vy0e, aye = self.scale_errors([x0e, vx0e, axe, y0e, vy0e, aye], weighting=weighting)
+
+ params = [x0, vx0, ax, y0, vy0, ay]
+ param_errors = [x0e, vx0e, axe, y0e, vy0e, aye]
+
+ return params, param_errors
+
+class Parallax(MotionModel):
+ """
+ Motion model for linear proper motion + parallax
+
+ Requires RA & Dec (J2000) for parallax calculation.
+ Optional PA is counterclockwise offset of the image y-axis from North.
+ Optional obs parameter describes observer location, default is 'earth'.
+ """
+ n_pts_req = 4
+ n_params=3
+ fitter_param_names = ['x0', 'vx', 'y0', 'vy', 'pi']
+ fixed_param_names = ['t0']
+ fixed_meta_data = ['RA','Dec','PA','obs']
+
+ def __init__(self, RA, Dec, PA=0.0, obs='earth', **kwargs):
+ self.RA = RA
+ self.Dec = Dec
+ self.PA = PA
+ self.obs = obs
+ self.plx_vector_cached = None
+ return
+
+ def get_parallax_vector(self, t_mjd):
+ recalc_plx = True
+ if self.plx_vector_cached is not None:
+ if hasattr(t_mjd, "__len__"):
+ if list(t_mjd) == list(self.plx_vector_cached[0]):
+ pvec = self.plx_vector_cached[1:]
+ recalc_plx = False
+ elif all([t_mjd_i in self.plx_vector_cached[0] for t_mjd_i in t_mjd]):
+ pvec_idxs = [np.argwhere(self.plx_vector_cached[0]==t_mjd_i)[0][0] for t_mjd_i in t_mjd]
+ pvec = [self.plx_vector_cached[1][pvec_idxs], self.plx_vector_cached[2][pvec_idxs]]
+ recalc_plx = False
+ elif t_mjd in self.plx_vector_cached[0]:
+ idx = np.where(t_mjd==self.plx_vector_cached[0])[0][0]
+ pvec = np.array([self.plx_vector_cached[1][idx], self.plx_vector_cached[2][idx]])
+ recalc_plx = False
+ if recalc_plx:
+ pvec = parallax.parallax_in_direction(self.RA, self.Dec, t_mjd, obsLocation=self.obs, PA=self.PA).T
+ if hasattr(t_mjd, "__len__"):
+ self.plx_vector_cached = [t_mjd, pvec[0], pvec[1]]
+ return pvec
+
+ def get_pos_at_time(self, fit_params, fixed_params, t):
+ fit_params_dict = dict(zip(self.fitter_param_names, fit_params))
+ fixed_params_dict = dict(zip(self.fixed_param_names, fixed_params))
+ dt = t-fixed_params_dict['t0']
+
+ t_mjd = Time(t, format='decimalyear', scale='utc').mjd
+ pvec = self.get_parallax_vector(t_mjd)
+ pvec_x = np.reshape(pvec[0], t.shape)
+ pvec_y = np.reshape(pvec[1], t.shape)
+ x = fit_params_dict['x0'] + fit_params_dict['vx']*dt + fit_params_dict['pi']*pvec_x
+ y = fit_params_dict['y0'] + fit_params_dict['vy']*dt + fit_params_dict['pi']*pvec_y
+ return x, y
+
+ def get_batch_pos_at_time(self, t,
+ x0=[],vx=[], y0=[],vy=[], pi=[], t0=[],
+ x0_err=[],vx_err=[], y0_err=[],vy_err=[], pi_err=[], **kwargs):
+ t_mjd = Time(t, format='decimalyear', scale='utc').mjd
+ pvec = self.get_parallax_vector(t_mjd)
+ if hasattr(t, "__len__"):
+ dt = t-t0[:,np.newaxis]
+ x = x0[:,np.newaxis] + dt*vx[:,np.newaxis] + pi[:,np.newaxis]*pvec[0].T
+ y = y0[:,np.newaxis] + dt*vy[:,np.newaxis] + pi[:,np.newaxis]*pvec[1].T
+ try:
+ x_err = np.sqrt(x0_err[:,np.newaxis]**2 + (vx_err[:,np.newaxis]*dt)**2 + (pi_err[:,np.newaxis]*pvec[0].T)**2)
+ y_err = np.sqrt(y0_err[:,np.newaxis]**2 + (vy_err[:,np.newaxis]*dt)**2 + (pi_err[:,np.newaxis]*pvec[1].T)**2)
+ except:
+ x_err,y_err = [],[]
+ else:
+ dt = t-t0
+ x = x0 + dt*vx + pi*pvec[0]
+ y = y0 + dt*vy + pi*pvec[1]
+ try:
+ x_err = np.sqrt(x0_err**2 + (vx_err*dt)**2 + (pi_err*pvec[0])**2)
+ y_err = np.sqrt(y0_err**2 + (vy_err*dt)**2 + (pi_err*pvec[1])**2)
+ except:
+ x_err,y_err = [],[]
+ return x,y,x_err,y_err
+
+ def run_fit(self, t, x, y, xe, ye, t0, weighting='var', params_guess=None,
+ use_scipy=True, absolute_sigma=True):
+ if not use_scipy:
+ Warning("Parallax model has no non-scipy fitter option. Running with scipy.")
+ t_mjd = Time(t, format='decimalyear', scale='utc').mjd
+ pvec = self.get_parallax_vector(t_mjd)
+ x_wt, y_wt = self.get_weights(xe,ye, weighting=weighting)
+ def fit_func(use_t, x0,vx, y0,vy, pi):
+ x_res = x0 + vx*(use_t-t0) + pi*pvec[0]
+ y_res = y0 + vy*(use_t-t0) + pi*pvec[1]
+ return np.hstack([x_res, y_res])
+ # Initial guesses, x0,y0 as x,y averages;
+ # vx,vy as average velocity if first and last points are perfectly measured;
+ # pi for 10 pc disance
+ if params_guess is None:
+ idx_first, idx_last = np.argmin(t), np.argmax(t)
+ params_guess = [x.mean(),(x[idx_last]-x[idx_first])/(t[idx_last]-t[idx_first]),
+ y.mean(),(y[idx_last]-y[idx_first])/(t[idx_last]-t[idx_first]), 0.1]
+ res = curve_fit(fit_func, t, np.hstack([x,y]),
+ p0=params_guess, sigma = 1.0/np.hstack([x_wt,y_wt]))
+ x0,vx,y0,vy,pi = res[0]
+ x0_err,vx_err,y0_err,vy_err,pi_err = self.scale_errors(np.sqrt(np.diag(res[1])), weighting=weighting)
+
+ params = [x0, vx, y0, vy, pi]
+ param_errors = [x0_err, vx_err, y0_err, vy_err, pi_err]
+ return params, param_errors
+
+
+def validate_motion_model_dict(motion_model_dict, startable, default_motion_model):
+ """
+ Check that everything is set up properly for motion models to run and their
+ required metadata.
+ """
+
+ # Collect names of all motion models that might get used.
+ all_motion_model_names = ['Fixed']
+ if default_motion_model is not None:
+ all_motion_model_names.append(default_motion_model)
+ if 'motion_model_input' in startable.columns:
+ all_motion_model_names += np.unique(startable['motion_model_input']).tolist()
+ if 'motion_model_used' in startable.columns:
+ all_motion_model_names += np.unique(startable['motion_model_used']).tolist()
+ all_motion_model_names = np.unique(all_motion_model_names)
+
+ # Check whether all motion models are in the dict, and if not, try to add them
+ # here or raise an error.
+ for mm in all_motion_model_names:
+ if mm not in motion_model_dict:
+ mm_obj = eval(mm)
+ if len(mm_obj.fixed_meta_data)>0:
+ raise ValueError(f"Cannot use {mm} motion model without required metadata. Please initialize with required metadata and provide in motion_model_dict.")
+ else:
+ motion_model_dict[mm] = mm_obj()
+ warnings.warn(f"Using default model/fitter for {mm}.", UserWarning)
+
+ return motion_model_dict
+
+
+def get_one_motion_model_param_names(motion_model_name, with_errors=True, with_fixed=True):
+ """
+ Get all the motion model parameters for a given motion_model_name.
+ Optionally, include fixed and error parameters (included by default).
+ """
+ mod = eval(motion_model_name)
+ list_of_parameters = []
+ list_of_parameters += getattr(mod, 'fitter_param_names')
+ if with_fixed:
+ list_of_parameters += getattr(mod, 'fixed_param_names')
+ if with_errors:
+ list_of_parameters += [par+'_err' for par in getattr(mod, 'fitter_param_names')]
+ return list_of_parameters
+
+
+def get_list_motion_model_param_names(motion_model_list, with_errors=True, with_fixed=True):
+ """
+ Get all the motion model parameters for all models given in motion_model_list.
+ Optionally, include fixed and error parameters (included by default).
+ """
+ list_of_parameters = []
+ all_motion_models = [eval(mm) for mm in np.unique(motion_model_list).tolist()]
+ for aa in range(len(all_motion_models)):
+ param_names = getattr(all_motion_models[aa], 'fitter_param_names')
+ param_fixed_names = getattr(all_motion_models[aa], 'fixed_param_names')
+ param_err_names = [par+'_err' for par in param_names]
+
+ list_of_parameters += param_names
+ if with_fixed:
+ list_of_parameters += param_fixed_names
+ if with_errors:
+ list_of_parameters += param_err_names
+
+ return np.unique(list_of_parameters).tolist()
+
+
+def get_all_motion_model_param_names(with_errors=True, with_fixed=True):
+ """
+ Get all the motion model parameters for all models defined in this module.
+ Optionally, include fixed and error parameters (included by default).
+ """
+ list_of_parameters = []
+ all_motion_models = MotionModel.__subclasses__()
+ for aa in range(len(all_motion_models)):
+ param_names = getattr(all_motion_models[aa], 'fitter_param_names')
+ param_fixed_names = getattr(all_motion_models[aa], 'fixed_param_names')
+ param_err_names = [par+'_err' for par in param_names]
+
+ list_of_parameters += param_names
+ if with_fixed:
+ list_of_parameters += param_fixed_names
+ if with_errors:
+ list_of_parameters += param_err_names
+
+ return np.unique(list_of_parameters).tolist()
+
diff --git a/flystar/parallax.py b/flystar/parallax.py
new file mode 100755
index 0000000..4792ec6
--- /dev/null
+++ b/flystar/parallax.py
@@ -0,0 +1,149 @@
+# Parallax calculation module for motion models involving parallax
+# Adapted from BAGLE's parallax.py
+
+import math
+
+import numpy as np
+from joblib import Memory
+import os
+from astropy import units, units as u
+from astropy.coordinates import SkyCoord, get_body_barycentric, get_body_barycentric_posvel, solar_system_ephemeris, \
+ CartesianRepresentation
+from astropy.time import Time
+
+# Use the JPL ephemerides.
+solar_system_ephemeris.set('jpl')
+
+# Setup a parallax cache
+try:
+ cache_dir = os.environ['PARALLAX_CACHE_DIR']
+except:
+ cache_dir = os.path.dirname(__file__) + '/parallax_cache/'
+cache_memory = Memory(cache_dir, verbose=0)
+# Default cache size is 1 GB
+cache_memory.reduce_size()
+
+@cache_memory.cache()
+def parallax_in_direction(RA, Dec, mjd, obsLocation='earth', PA=0):
+ """
+ | R.A. in degrees. (J2000)
+ | Dec. in degrees. (J2000)
+ | MJD
+ | PA in degrees. (counterclockwise offset of the image y-axis from North)
+
+ Equations following MulensModel.
+ """
+ #print('parallax_in_direction: len(t) = ', len(mjd))
+
+ # Munge inputs into astropy format.
+ times = Time(mjd + 2400000.5, format='jd', scale='tdb')
+ coord = SkyCoord(RA, Dec, unit=(units.deg, units.deg))
+
+ direction = coord.cartesian.xyz.value
+ north = np.array([0., 0., 1.])
+ _east_projected = np.cross(north, direction) / np.linalg.norm(np.cross(north, direction))
+ _north_projected = np.cross(direction, _east_projected) / np.linalg.norm(np.cross(direction, _east_projected))
+
+ obs_pos = get_observer_barycentric(obsLocation, times)
+ sun_pos = get_body_barycentric(body='sun', time=times)
+
+ sun_obs_pos = sun_pos - obs_pos
+
+ pos = sun_obs_pos.xyz.T.to(units.au)
+
+ e = np.dot(pos, _east_projected)
+ n = np.dot(pos, _north_projected)
+
+ # Rotate frame e,n->x,y accounting for PA
+ PA_rad = np.pi/180.0 * PA
+ x = -e.value*np.cos(PA_rad) + n.value*np.sin(PA_rad)
+ y = e.value*np.sin(PA_rad) + n.value*np.cos(PA_rad)
+
+ pvec = np.array([x, y]).T
+
+ return pvec
+
+
+def get_observer_barycentric(body, times, min_ephem_step=1, velocity=False):
+ """
+ Get the barycentric position of a satellite or other Solar System body
+ using JPL emphemerides through the Horizon app.
+
+ The ephemeris is queried at a decimated time step set by min_ephem_step
+ (def=1 day) that must be 1 day or larger. The positions
+ (and optionally velocities) are then interpolated onto the desired
+ time array.
+
+ Inputs
+ ------
+ body : str
+ The name of the Solar System body. Must use the JPL Horizon
+ naming scheme.
+
+ times : astropy.time.Time array
+ Array of times (astropy.time.core.Time) objects at which to
+ fetch the position of the specified Solar System body.
+
+ Optional Inputs
+ ---------------
+ min_ephem_step : int
+ Minimum time step to query JPL in days. Must not be <1 and must
+ be in integer days.
+
+ veloctiy : bool
+ If true, return both position and velocity vectors over time.
+
+ Return
+ ------
+ coord : astropy.coordinates.CartesianRepresentation
+ The xyz coordinates in the plane of the Solar System at the
+ input times.
+ """
+
+ if body in solar_system_ephemeris.bodies:
+ if velocity:
+ obs_pos, obs_vel = get_body_barycentric_posvel(body=body, time=times)
+ else:
+ obs_pos = get_body_barycentric(body=body, time=times)
+ else:
+ # Figure out a cadence for the ephemerides, not smaller than 1 day.
+ dt = np.median(np.diff(times)).jd
+ if dt < min_ephem_step:
+ dt = min_ephem_step
+
+ # Get the date range, add some padding on each side.
+ t_min = times.min()
+ t_max = times.max()
+ t_min.format = 'iso'
+ t_max.format = 'iso'
+ t_min = str(t_min - dt*u.day).split()[0]
+ t_max = str(t_max + dt*u.day).split()[0]
+ step = f'{dt:.0f}d'
+
+ # Fetch the Horizons ephemeris.
+ from astroquery.jplhorizons import Horizons
+ obj = Horizons(id=body, epochs={'start':t_min, 'stop':t_max, 'step':step})
+ obj_data = obj.vectors()
+
+ ephem_jd = obj_data['datetime_jd']
+
+ # Interpolate to the actual time array.
+ obj_x_at_t = np.interp(times.jd, ephem_jd, obj_data['x'].to('km')) * u.km
+ obj_y_at_t = np.interp(times.jd, ephem_jd, obj_data['y'].to('km')) * u.km
+ obj_z_at_t = np.interp(times.jd, ephem_jd, obj_data['z'].to('km')) * u.km
+
+ if velocity:
+ obj_vx_at_t = np.interp(times.jd, ephem_jd, obj_data['vx'].to('km/s')) * u.km / u.s
+ obj_vy_at_t = np.interp(times.jd, ephem_jd, obj_data['vy'].to('km/s')) * u.km / u.s
+ obj_vz_at_t = np.interp(times.jd, ephem_jd, obj_data['vz'].to('km/s')) * u.km / u.s
+
+ obs_vel = CartesianRepresentation(obj_vx_at_t, obj_vy_at_t, obj_vz_at_t)
+
+ obs_pos = CartesianRepresentation(obj_x_at_t, obj_y_at_t, obj_z_at_t)
+
+ if velocity:
+ return (obs_pos, obs_vel)
+ else:
+ return obs_pos
+
+
diff --git a/flystar/plots.py b/flystar/plots.py
index c675170..2d65b2c 100755
--- a/flystar/plots.py
+++ b/flystar/plots.py
@@ -1,4 +1,4 @@
-from flystar import analysis
+from flystar import analysis, motion_model, startables
import pylab as py
import pylab as plt
import numpy as np
@@ -14,6 +14,8 @@
import astropy
from astropy.table import Table
from astropy.io import ascii
+from astropy.coordinates import SkyCoord
+from astropy import units as u
####################################################
# Code for making diagnostic plots for astrometry
@@ -518,8 +520,8 @@ def vel_diff_err_hist(ref_mat, starlist_mat, nbins=25, bin_width=None, vxlim=Non
diff_vx = ref_mat['vx'] - starlist_mat['vx']
diff_vy = ref_mat['vy'] - starlist_mat['vy']
- vx_err = np.hypot(ref_mat['vxe'], starlist_mat['vxe'])
- vy_err = np.hypot(ref_mat['vye'], starlist_mat['vye'])
+ vx_err = np.hypot(ref_mat['vx_err'], starlist_mat['vx_err'])
+ vy_err = np.hypot(ref_mat['vy_err'], starlist_mat['vy_err'])
ratio_vx = diff_vx / vx_err
ratio_vy = diff_vy / vy_err
@@ -589,10 +591,10 @@ def residual_vpd(ref_mat, starlist_trans_mat, pscale=None):
# Error calculation depends on if we are converting to mas/yr
if pscale != None:
- xerr_frac = np.hypot((ref_mat['vxe'] / ref_mat['vx']),
- (starlist_trans_mat['vxe'] / starlist_trans_mat['vx']))
- yerr_frac = np.hypot((ref_mat['vye'] / ref_mat['vy']),
- (starlist_trans_mat['vye'] / starlist_trans_mat['vy']))
+ xerr_frac = np.hypot((ref_mat['vx_err'] / ref_mat['vx']),
+ (starlist_trans_mat['vx_err'] / starlist_trans_mat['vx']))
+ yerr_frac = np.hypot((ref_mat['vy_err'] / ref_mat['vy']),
+ (starlist_trans_mat['vy_err'] / starlist_trans_mat['vy']))
# Now apply the plate scale to convert to mas/yr
diff_x *= pscale
@@ -600,8 +602,8 @@ def residual_vpd(ref_mat, starlist_trans_mat, pscale=None):
xerr = diff_x * xerr_frac
yerr = diff_y * yerr_frac
else:
- xerr = np.hypot(ref_mat['vxe'], starlist_trans_mat['vxe'])
- yerr = np.hypot(ref_mat['vye'], starlist_trans_mat['vye'])
+ xerr = np.hypot(ref_mat['vx_err'], starlist_trans_mat['vx_err'])
+ yerr = np.hypot(ref_mat['vy_err'], starlist_trans_mat['vy_err'])
# Plotting
py.figure(figsize=(10,10))
@@ -1044,8 +1046,8 @@ def plot_gaia(gaia):
def plot_pm_error(tab):
plt.figure(figsize=(6,6))
plt.clf()
- plt.semilogy(tab['m0'], tab['vxe']*1e3, 'r.', label=r'$\sigma_{\mu_{\alpha *}}$', alpha=0.4)
- plt.semilogy(tab['m0'], tab['vye']*1e3, 'b.', label=r'$\sigma_{\mu_{\delta}}$', alpha=0.4)
+ plt.semilogy(tab['m0'], tab['vx_err']*1e3, 'r.', label=r'$\sigma_{\mu_{\alpha *}}$', alpha=0.4)
+ plt.semilogy(tab['m0'], tab['vy_err']*1e3, 'b.', label=r'$\sigma_{\mu_{\delta}}$', alpha=0.4)
plt.legend()
plt.xlabel('Mag')
plt.ylabel('PM Error (mas/yr)')
@@ -1055,24 +1057,24 @@ def plot_pm_error(tab):
def plot_mag_error(tab):
plt.figure(figsize=(6,6))
plt.clf()
- plt.semilogy(tab['m0'], tab['m0e'], 'r.', alpha=0.4)
+ plt.semilogy(tab['m0'], tab['m0_err'], 'r.', alpha=0.4)
plt.legend()
plt.xlabel('Mag')
plt.ylabel('Mag Error (mag)')
return
-def plot_mean_residuals_by_epoch(tab):
+def plot_mean_residuals_by_epoch(tab, motion_model_dict={}):
"""
Plot mean position and magnitude residuals vs. epoch.
Note we are plotting the mean( |dx} ) to see
the size of the mean residual.
"""
# Predicted model positions at each epoch
- dt = tab['t'] - tab['t0'][:, np.newaxis]
- xt_mod = tab['x0'][:, np.newaxis] + tab['vx'][:, np.newaxis] * dt
- yt_mod = tab['y0'][:, np.newaxis] + tab['vy'][:, np.newaxis] * dt
-
+ motion_model_dict = motion_model.validate_motion_model_dict(motion_model_dict, tab, None)
+ i_all_detected = np.where(~np.any(np.isnan(tab['t']),axis=1))[0][0]
+ xt_mod, yt_mod, xt_mod_err, yt_mod_err = tab.get_star_positions_at_time(tab['t'][i_all_detected], motion_model_dict, allow_alt_models=True)
+
# Residuals
dx = tab['x'] - xt_mod
dy = tab['y'] - yt_mod
@@ -1120,18 +1122,21 @@ def plot_mean_residuals_by_epoch(tab):
return
-def plot_quiver_residuals_all_epochs(tab, unit='arcsec', scale=None, plotlim=None):
+def plot_quiver_residuals_all_epochs(tab, motion_model_dict={}, unit='arcsec', scale=None, plotlim=None):
# Keep track of the residuals for averaging.
dr_good = np.zeros(len(tab), dtype=float)
n_good = np.zeros(len(tab), dtype=int)
dr_ref = np.zeros(len(tab), dtype=float)
n_ref = np.zeros(len(tab), dtype=int)
+
+ motion_model_dict = motion_model.validate_motion_model_dict(motion_model_dict, tab, None)
+ complete_times = np.array([np.unique(col[~np.isnan(col)])[0] for col in tab['t'].T])
+ xt_mod_all, yt_mod_all, xt_mod_err, yt_mod_err = tab.get_star_positions_at_time(complete_times, motion_model_dict, allow_alt_models=True)
for ee in range(tab['x'].shape[1]):
- dt = tab['t'][:, ee] - tab['t0']
- xt_mod = tab['x0'] + tab['vx'] * dt
- yt_mod = tab['y0'] + tab['vy'] * dt
+ xt_mod = xt_mod_all[:,ee]
+ yt_mod = yt_mod_all[:,ee]
good_idx = np.where(np.isfinite(tab['x'][:, ee]) == True)[0]
ref_idx = np.where(tab[good_idx]['used_in_trans'][:, ee] == True)[0]
@@ -1184,18 +1189,22 @@ def plot_quiver_residuals_all_epochs(tab, unit='arcsec', scale=None, plotlim=Non
return
-def plot_quiver_residuals_with_orig_all_epochs(tab, trans_list, unit='arcsec', scale=None, plotlim=None, scale_orig=None, cte_fit=None, mlim=15):
+def plot_quiver_residuals_with_orig_all_epochs(tab, trans_list, motion_model_dict={}, unit='arcsec', scale=None, plotlim=None, scale_orig=None, cte_fit=None, mlim=15):
# Keep track of the residuals for averaging.
dr_good = np.zeros(len(tab), dtype=float)
n_good = np.zeros(len(tab), dtype=int)
dr_ref = np.zeros(len(tab), dtype=float)
n_ref = np.zeros(len(tab), dtype=int)
-
+
+ motion_model_dict = motion_model.validate_motion_model_dict(motion_model_dict, tab, None)
+ i_all_detected = np.where(~np.any(np.isnan(tab['t']),axis=1))[0][0]
+ xt_mod_all, yt_mod_all, xt_mod_err, yt_mod_err = tab.get_star_positions_at_time(tab['t'][i_all_detected], motion_model_dict, allow_alt_models=True)
+
for ee in range(tab['x'].shape[1]):
dt = tab['t'][:, ee] - tab['t0']
- xt_mod = tab['x0'] + tab['vx'] * dt
- yt_mod = tab['y0'] + tab['vy'] * dt
+ xt_mod = xt_mod_all[ee]
+ yt_mod = yt_mod_all[ee]
good_idx = np.where(np.isfinite(tab['x'][:, ee]) == True)[0]
ref_idx = np.where(tab[good_idx]['used_in_trans'][:, ee] == True)[0]
@@ -1216,7 +1225,7 @@ def plot_quiver_residuals_with_orig_all_epochs(tab, trans_list, unit='arcsec', s
scale=scale_orig, plotlim=plotlim)
plot_mag_scatter(tab['m'][:, ee],
- tab['m0'], tab['m0e'],
+ tab['m0'], tab['m0_err'],
tab['x'][:, ee], tab['y'][:, ee],
tab['xe'][:, ee], tab['ye'][:, ee],
xt_mod, yt_mod,
@@ -1226,7 +1235,7 @@ def plot_quiver_residuals_with_orig_all_epochs(tab, trans_list, unit='arcsec', s
cte_fit=cte_fit, mlim=mlim)
plot_y_scatter(tab['m'][:, ee],
- tab['m0'], tab['m0e'],
+ tab['m0'], tab['m0_err'],
tab['x'][:, ee], tab['y'][:, ee],
tab['xe'][:, ee], tab['ye'][:, ee],
xt_mod, yt_mod,
@@ -1287,7 +1296,7 @@ def plot_quiver_residuals_with_orig_all_epochs(tab, trans_list, unit='arcsec', s
return
-def plot_mag_scatter_multi_trans_all_epochs(tab_list, trans_list_list, unit='arcsec', scale=None, plotlim=None, scale_orig=None):
+def plot_mag_scatter_multi_trans_all_epochs(tab_list, trans_list_list, motion_model_dict={}, unit='arcsec', scale=None, plotlim=None, scale_orig=None):
m_t_list = []
x_t_list = []
y_t_list = []
@@ -1300,14 +1309,17 @@ def plot_mag_scatter_multi_trans_all_epochs(tab_list, trans_list_list, unit='arc
da_list = []
ntrans = len(tab_list)
-
+ motion_model_dict = motion_model.validate_motion_model_dict(motion_model_dict, tab, None)
+ i_all_detected = np.where(~np.any(np.isnan(tab['t']),axis=1))[0][0]
+ xt_mod_all, yt_mod_all, xt_mod_err, yt_mod_err = tab.get_star_positions_at_time(tab['t'][i_all_detected], motion_model_dict, allow_alt_models=True)
+
for mm in range(ntrans):
tab = tab_list[mm]
trans_list = trans_list_list[mm]
for ee in range(tab['x'].shape[1]):
dt = tab['t'][:, ee] - tab['t0']
- xt_mod = tab['x0'] + tab['vx'] * dt
- yt_mod = tab['y0'] + tab['vy'] * dt
+ xt_mod = xt_mod_all[ee]
+ yt_mod = yt_mod_all[ee]
good_idx = np.where(np.isfinite(tab['x'][:, ee]) == True)[0]
ref_idx = np.where(tab[good_idx]['used_in_trans'][:, ee] == True)[0]
@@ -1842,7 +1854,7 @@ def plot_quiver_residuals(x_t, y_t, x_ref, y_ref, good_idx, ref_idx, title,
return (dx, dy)
-def plot_quiver_residuals_magcolor_all_epochs(tab, unit='arcsec', scale=None, plotlim=None, lower_mag=18, upper_mag=13):
+def plot_quiver_residuals_magcolor_all_epochs(tab, motion_model_dict={}, unit='arcsec', scale=None, plotlim=None, lower_mag=18, upper_mag=13):
# Keep track of the residuals for averaging.
dr_good = np.zeros(len(tab), dtype=float)
n_good = np.zeros(len(tab), dtype=int)
@@ -1852,11 +1864,14 @@ def plot_quiver_residuals_magcolor_all_epochs(tab, unit='arcsec', scale=None, pl
idx = np.where((tab['m0'] < lower_mag) &
(tab['m0'] > upper_mag))[0]
tab = tab[idx]
-
+ motion_model_dict = motion_model.validate_motion_model_dict(motion_model_dict, tab, None)
+ i_all_detected = np.where(~np.any(np.isnan(tab['t']),axis=1))[0][0]
+ xt_mod_all, yt_mod_all, xt_mod_err, yt_mod_err = tab.get_star_positions_at_time(tab['t'][i_all_detected], motion_model_dict, allow_alt_models=True)
+
for ee in range(tab['x'].shape[1]):
dt = tab['t'][:, ee] - tab['t0']
- xt_mod = tab['x0'] + tab['vx'] * dt
- yt_mod = tab['y0'] + tab['vy'] * dt
+ xt_mod = xt_mod_all[ee]
+ yt_mod = yt_mod_all[ee]
mag = tab['m0']
good_idx = np.where(np.isfinite(tab['x'][:, ee]) == True)[0]
@@ -2149,7 +2164,7 @@ def plot_quiver_residuals_orig_angle_xy(x_t, y_t, x_ref, y_ref, good_idx, ref_id
return
-def plot_chi2_dist(tab, Ndetect, xlim=40, n_bins=50):
+def plot_chi2_dist(tab, Ndetect, motion_model_dict={}, xlim=40, n_bins=50, boot_err=False):
"""
tab = flystar table
Ndetect = Number of epochs star detected in
@@ -2157,21 +2172,27 @@ def plot_chi2_dist(tab, Ndetect, xlim=40, n_bins=50):
chi2_x_list = []
chi2_y_list = []
fnd_list = [] # Number of non-NaN error measurements
+
+ motion_model_dict = motion_model.validate_motion_model_dict(motion_model_dict, tab, None)
+ i_all_detected = np.where(~np.any(np.isnan(tab['t']),axis=1))[0][0]
+ xt_mod_all, yt_mod_all, xt_mod_err, yt_mod_err = tab.get_star_positions_at_time(tab['t'][i_all_detected], motion_model_dict, allow_alt_models=True)
for ii in range(len(tab)):
# Ignore the NaNs
fnd = np.argwhere(~np.isnan(tab['xe'][ii,:]))
-# fnd = np.where(tab['xe'][ii, :] > 0)[0]
fnd_list.append(len(fnd))
x = tab['x'][ii, fnd]
y = tab['y'][ii, fnd]
- xerr = tab['xe'][ii, fnd]
- yerr = tab['ye'][ii, fnd]
+ if boot_err:
+ xerr = np.hypot(tab['xe_boot'][ii, fnd], tab['xe'][ii, fnd])
+ yerr = np.hypot(tab['ye_boot'][ii, fnd], tab['ye'][ii, fnd])
+ else:
+ xerr = tab['xe'][ii, fnd]
+ yerr = tab['ye'][ii, fnd]
- dt = tab['t'][ii, fnd] - tab['t0'][ii]
- fitLineX = tab['x0'][ii] + (tab['vx'][ii] * dt)
- fitLineY = tab['y0'][ii] + (tab['vy'][ii] * dt)
+ fitLineX = xt_mod_all[ii, fnd]
+ fitLineY = yt_mod_all[ii,fnd]
diffX = x - fitLineX
diffY = y - fitLineY
@@ -2189,7 +2210,14 @@ def plot_chi2_dist(tab, Ndetect, xlim=40, n_bins=50):
idx = np.where(fnd == Ndetect)[0]
# Fitting position and velocity... so subtract 2 to get Ndof
- Ndof = Ndetect - 2
+ n_params = np.nanmean(tab['n_params'][idx])
+ Ndof = Ndetect - n_params
+ if len(np.unique(tab['n_params'][idx]))>1:
+ print("** Warning: using average Ndof for multiple motion models. **")
+ print("** Consider using plot_chi2_reduced_dist. **")
+ print(f"Ndof={Ndof:.2f}, Ndetect={Ndetect}, Nparams={n_params:.2f}")
+ else:
+ print(f"Ndof={Ndof}, Ndetect={Ndetect}, Nparams={n_params}")
chi2_xaxis = np.linspace(0, xlim, xlim*3)
chi2_bins = np.linspace(0, xlim, n_bins)
@@ -2197,15 +2225,93 @@ def plot_chi2_dist(tab, Ndetect, xlim=40, n_bins=50):
plt.clf()
plt.hist(x[idx], bins=chi2_bins, histtype='step', label='X', density=True)
plt.hist(y[idx], bins=chi2_bins, histtype='step', label='Y', density=True)
- plt.plot(chi2_xaxis, chi2.pdf(chi2_xaxis, Ndof), 'r-', alpha=0.6,
- label='$\chi^2$ ' + str(Ndof) + ' dof')
- plt.title('$N_{epoch} = $' + str(Ndetect) + ', $N_{dof} = $' + str(Ndof))
+ plt.plot(chi2_xaxis, chi2.pdf(chi2_xaxis, Ndof), 'r-', alpha=0.6,
+ label='$\chi^2$ ' + str(round(Ndof,2)) + ' dof')
+ plt.title('$N_{epoch} = $' + str(Ndetect) + ', $N_{dof} = $' + str(round(Ndof,2)))
+ plt.xlim(0, xlim)
+ plt.legend()
+
+ chi2red_x = x / Ndof
+ chi2red_y = y / Ndof
+ chi2red_t = (x + y) / (2.0 * Ndof)
+
+ print('Mean reduced chi^2: (Ndetect = {0:d} of {1:d})'.format(len(idx), len(tab)))
+ fmt = ' {0:s} = {1:.1f} for N_detect and {2:.1f} for all'
+ med_chi2red_x_f = np.median(chi2red_x[idx])
+ med_chi2red_x_a = np.median(chi2red_x)
+ med_chi2red_y_f = np.median(chi2red_y[idx])
+ med_chi2red_y_a = np.median(chi2red_y)
+ med_chi2red_t_f = np.median(chi2red_t[idx])
+ med_chi2red_t_a = np.median(chi2red_t)
+ print(fmt.format(' X', med_chi2red_x_f, med_chi2red_x_a))
+ print(fmt.format(' Y', med_chi2red_y_f, med_chi2red_y_a))
+ print(fmt.format('Tot', med_chi2red_t_f, med_chi2red_t_a))
+
+ return
+
+def plot_chi2_reduced_dist(tab, Ndetect, motion_model_dict={}, xlim=8, n_bins=50, boot_err=False):
+ """
+ tab = flystar table
+ Ndetect = Number of epochs star detected in
+ """
+ chi2_x_list = []
+ chi2_y_list = []
+ fnd_list = [] # Number of non-NaN error measurements
+
+ motion_model_dict = motion_model.validate_motion_model_dict(motion_model_dict, tab, None)
+ i_all_detected = np.where(~np.any(np.isnan(tab['t']),axis=1))[0][0]
+ xt_mod_all, yt_mod_all, xt_mod_err, yt_mod_err = tab.get_star_positions_at_time(tab['t'][i_all_detected], motion_model_dict, allow_alt_models=True)
+
+ for ii in range(len(tab)):
+ # Ignore the NaNs
+ fnd = np.argwhere(~np.isnan(tab['xe'][ii,:]))
+ fnd_list.append(len(fnd))
+
+ x = tab['x'][ii, fnd]
+ y = tab['y'][ii, fnd]
+ if boot_err:
+ xerr = np.hypot(tab['xe_boot'][ii, fnd], tab['xe'][ii, fnd])
+ yerr = np.hypot(tab['ye_boot'][ii, fnd], tab['ye'][ii, fnd])
+ else:
+ xerr = tab['xe'][ii, fnd]
+ yerr = tab['ye'][ii, fnd]
+
+ fitLineX = xt_mod_all[ii, fnd]
+ fitLineY = yt_mod_all[ii,fnd]
+
+ diffX = x - fitLineX
+ diffY = y - fitLineY
+ sigX = diffX / xerr
+ sigY = diffY / yerr
+
+ chi2_x = np.sum(sigX**2)
+ chi2_y = np.sum(sigY**2)
+ chi2_x_list.append(chi2_x)
+ chi2_y_list.append(chi2_y)
+
+ x = np.array(chi2_x_list)
+ y = np.array(chi2_y_list)
+ fnd = np.array(fnd_list)
+
+ idx = np.where(fnd == Ndetect)[0]
+ n_params = tab['n_params']
+ Ndof = Ndetect - n_params
+ print("Reduced chi2 for Ndetect="+str(Ndetect))
+ chi2_bins = np.linspace(0, xlim, n_bins)
+
+ plt.figure(figsize=(6,4))
+ plt.clf()
+ plt.hist(x[idx]/Ndof[idx], bins=chi2_bins, histtype='step', label='X', density=True)
+ plt.hist(y[idx]/Ndof[idx], bins=chi2_bins, histtype='step', label='Y', density=True)
+ plt.axvline(np.median(x[idx]/Ndof[idx]), color='C0', linestyle='--', label='X median')
+ plt.axvline(np.median(y[idx]/Ndof[idx]), color='C1', linestyle='--', label='Y median')
+ plt.title('Reduced chi2, $N_{epoch} = $' + str(Ndetect))
plt.xlim(0, xlim)
plt.legend()
- chi2red_x = x / (fnd - 2)
- chi2red_y = y / (fnd - 2)
- chi2red_t = (x + y) / (2.0 * (fnd - 2))
+ chi2red_x = x / Ndof
+ chi2red_y = y / Ndof
+ chi2red_t = (x + y) / (2.0 * Ndof + 1*(tab['motion_model_used']=='Parallax'))
print('Mean reduced chi^2: (Ndetect = {0:d} of {1:d})'.format(len(idx), len(tab)))
fmt = ' {0:s} = {1:.1f} for N_detect and {2:.1f} for all'
@@ -2222,34 +2328,133 @@ def plot_chi2_dist(tab, Ndetect, xlim=40, n_bins=50):
return
-def plot_chi2_dist_per_epoch(tab, Ndetect, xlim, ylim = [-1, 1], target_idx = 0):
+def plot_chi2_dist_per_filter(tab, Ndetect, motion_model_dict={}, xlim=40, n_bins=50, filter=None, boot_err=False):
"""
tab = flystar table
Ndetect = Number of epochs star detected in
"""
- diffX_arr = -99 * np.ones((len(tab['xe']), Ndetect))
- diffY_arr = -99 * np.ones((len(tab['xe']), Ndetect))
- errX_arr = -99 * np.ones((len(tab['xe']), Ndetect))
- errY_arr = -99 * np.ones((len(tab['xe']), Ndetect))
- sigX_arr = -99 * np.ones((len(tab['xe']), Ndetect))
- sigY_arr = -99 * np.ones((len(tab['xe']), Ndetect))
- m_arr = -99 * np.ones((len(tab['xe']), Ndetect))
+ chi2_x_list = []
+ chi2_y_list = []
+ fnd_list = [] # Number of non-NaN error measurements
+
+ motion_model_dict = motion_model.validate_motion_model_dict(motion_model_dict, tab, None)
+ i_all_detected = np.where(~np.any(np.isnan(tab['t']),axis=1))[0][0]
+ xt_mod_all, yt_mod_all, xt_mod_err, yt_mod_err = tab.get_star_positions_at_time(tab['t'][i_all_detected], motion_model_dict, allow_alt_models=True)
+
+ for ii in range(len(tab)):
+ # Ignore the NaNs
+ fnd = np.argwhere(~np.isnan(tab['xe'][ii,:]))
+ fnd_list.append(len(fnd))
+
+ x = tab['x'][ii, fnd]
+ y = tab['y'][ii, fnd]
+ if boot_err:
+ xerr = np.hypot(tab['xe_boot'][ii, fnd], tab['xe'][ii, fnd])
+ yerr = np.hypot(tab['ye_boot'][ii, fnd], tab['ye'][ii, fnd])
+ else:
+ xerr = tab['xe'][ii, fnd]
+ yerr = tab['ye'][ii, fnd]
+
+ fitLineX = xt_mod_all[ii, fnd]
+ fitLineY = yt_mod_all[ii,fnd]
+ diffX = x - fitLineX
+ diffY = y - fitLineY
+ sigX = diffX / xerr
+ sigY = diffY / yerr
+
+ chi2_x = np.sum(sigX**2)
+ chi2_y = np.sum(sigY**2)
+ chi2_x_list.append(chi2_x)
+ chi2_y_list.append(chi2_y)
+ #print(fitLineX, x, xerr)
+ #pdb.set_trace()
+
+ x = np.array(chi2_x_list)
+ y = np.array(chi2_y_list)
+ fnd = np.array(fnd_list)
+
+
+ idx = np.where(fnd == Ndetect)[0]
+ # Fitting position and velocity... so subtract n_params to get Ndof
+ n_params = np.nanmean(tab['n_params'][idx])
+ Ndof = Ndetect - n_params
+ print(f"Ndof={Ndof}, Ndetect={Ndetect}, Nparams={n_params}")
+ chi2_xaxis = np.linspace(0, xlim, xlim*3)
+ chi2_bins = np.linspace(0, xlim, n_bins)
+ print(x[idx])
+ #pdb.set_trace()
+
+ plt.figure(figsize=(6,4))
+ plt.clf()
+ plt.hist(x[idx], bins=chi2_bins, histtype='stepfilled', label='RA', density=True, color='skyblue', alpha=0.8, edgecolor='k')
+ plt.hist(y[idx], bins=chi2_bins, histtype='stepfilled', label='DEC', density=True, color='orange', alpha=0.8, edgecolor='k')
+ plt.plot(chi2_xaxis, chi2.pdf(chi2_xaxis, Ndof), 'r-', alpha=0.6,
+ label='$\chi^2$ ' + str(Ndof) + ' dof')
+ #plt.title('$N_{epoch} = $' + str(Ndetect) + ', $N_{dof} = $' + str(Ndof))
+ plt.title(str(filter)+' (N = '+str(len(chi2_x_list))+')', fontsize=22)
+ plt.xlim(0, xlim)
+ plt.ylabel(r'PDF', fontsize=28)
+ plt.legend(fontsize=20)
+
+ plt.tick_params(labelsize=20, direction='in', right=True, top=True)
+
+ plt.savefig(str(filter)+'_chi2_dist.png', dpi=400)
+
+ chi2red_x = x / Ndof
+ chi2red_y = y / Ndof
+ chi2red_t = (x + y) / (2.0 * Ndof)
+
+ print('Mean reduced chi^2: (Ndetect = {0:d} of {1:d})'.format(len(idx), len(tab)))
+ fmt = ' {0:s} = {1:.1f} for N_detect and {2:.1f} for all'
+ med_chi2red_x_f = np.median(chi2red_x[idx])
+ med_chi2red_x_a = np.median(chi2red_x)
+ med_chi2red_y_f = np.median(chi2red_y[idx])
+ med_chi2red_y_a = np.median(chi2red_y)
+ med_chi2red_t_f = np.median(chi2red_t[idx])
+ med_chi2red_t_a = np.median(chi2red_t)
+ print(fmt.format(' X', med_chi2red_x_f, med_chi2red_x_a))
+ print(fmt.format(' Y', med_chi2red_y_f, med_chi2red_y_a))
+ print(fmt.format('Tot', med_chi2red_t_f, med_chi2red_t_a))
+
+ return
+
+
+def plot_chi2_dist_per_epoch(tab, Ndetect, mlim=[14,21], ylim = [-1, 1], target_idx = 0, motion_model_dict={}, boot_err=False):
+ """
+ tab = flystar table
+ Ndetect = Number of epochs star detected in
+ """
+ diffX_arr = np.nan * np.ones((len(tab['xe']), Ndetect))
+ diffY_arr = np.nan * np.ones((len(tab['xe']), Ndetect))
+ errX_arr = np.nan * np.ones((len(tab['xe']), Ndetect))
+ errY_arr = np.nan * np.ones((len(tab['xe']), Ndetect))
+ sigX_arr = np.nan * np.ones((len(tab['xe']), Ndetect))
+ sigY_arr = np.nan * np.ones((len(tab['xe']), Ndetect))
+ m_arr = np.nan * np.ones((len(tab['xe']), Ndetect))
+
+ motion_model_dict = motion_model.validate_motion_model_dict(motion_model_dict, tab, None)
+ i_all_detected = np.where(~np.any(np.isnan(tab['t']),axis=1))[0][0]
+ xt_mod_all, yt_mod_all, xt_mod_err, yt_mod_err = tab.get_star_positions_at_time(tab['t'][i_all_detected], motion_model_dict, allow_alt_models=True)
+
for ii in range(len(tab['xe'])):
# Ignore the NaNs
fnd = np.argwhere(~np.isnan(tab['xe'][ii,:]))
- if len(fnd) == Ndetect:
+ if len(fnd) == Ndetect and tab['use_in_trans'][ii]:
time = tab['t'][ii, fnd]
x = tab['x'][ii, fnd]
y = tab['y'][ii, fnd]
m = tab['m'][ii, fnd]
- xerr = tab['xe'][ii, fnd]
- yerr = tab['ye'][ii, fnd]
+ if boot_err:
+ xerr = np.hypot(tab['xe_boot'][ii, fnd], tab['xe'][ii, fnd])
+ yerr = np.hypot(tab['ye_boot'][ii, fnd], tab['ye'][ii, fnd])
+ else:
+ xerr = tab['xe'][ii, fnd]
+ yerr = tab['ye'][ii, fnd]
- dt = tab['t'][ii, fnd] - tab['t0'][ii]
- fitLineX = tab['x0'][ii] + (tab['vx'][ii] * dt)
- fitLineY = tab['y0'][ii] + (tab['vy'][ii] * dt)
+ fitLineX = xt_mod_all[ii, fnd]
+ fitLineY = yt_mod_all[ii, fnd]
diffX = x - fitLineX
diffY = y - fitLineY
@@ -2281,14 +2486,17 @@ def plot_chi2_dist_per_epoch(tab, Ndetect, xlim, ylim = [-1, 1], target_idx = 0)
if target_idx is not None:
ax2.plot(m_arr[target_idx, ii], sigX_arr[target_idx, ii], 's', color='black', ms=5)
ax2.plot(m_arr[target_idx, ii], sigY_arr[target_idx, ii], 'o', color='black', ms=5)
- ax2.set_xlim(xlim[0], xlim[1])
+ ax2.set_xlim(mlim[0], mlim[1])
ax2.set_ylim(-5, 5)
ax2.axhline(y=0, color='black', alpha=0.9, zorder=1000)
+ ax2.axhline(y=np.nanmean(sigX_arr[:, ii]), color='tab:blue', alpha=0.9,linestyle='dotted', zorder=1001)
+ ax2.axhline(y=np.nanmean(sigY_arr[:, ii]), color='tab:orange', alpha=0.9,linestyle='dotted', zorder=1002)
ax2.set_xlabel('mag')
ax2.set_ylabel('sigma')
ax2.set_title('Epoch {0}'.format(ii))
ax2.legend()
+ #print(errX_arr[:, ii])
ax3.errorbar(m_arr[:, ii], diffX_arr[:, ii]*1E3, yerr=errX_arr[:, ii]*1E3,
marker='s', label = 'X', ls='none', color='tab:blue', alpha=0.4, ms=5)
ax3.errorbar(m_arr[:, ii], diffY_arr[:, ii]*1E3, yerr=errY_arr[:, ii]*1E3,
@@ -2298,15 +2506,137 @@ def plot_chi2_dist_per_epoch(tab, Ndetect, xlim, ylim = [-1, 1], target_idx = 0)
marker='s', ls='none', color='black', ms=5)
ax3.errorbar(m_arr[target_idx, ii], diffY_arr[target_idx, ii]*1E3, yerr=errY_arr[target_idx, ii]*1E3,
marker='o', ls='none', color='black', ms=5)
- ax3.set_xlim(xlim[0], xlim[1])
+ ax3.set_xlim(mlim[0], mlim[1])
ax3.set_ylim(ylim[0], ylim[1])
ax3.axhline(y=0, color='black', alpha=0.9, zorder=1000)
+ ax3.axhline(y=np.nanmean(diffX_arr[:, ii]*1E3), color='tab:blue', alpha=0.9,linestyle='dotted', zorder=1001)
+ ax3.axhline(y=np.nanmean(diffY_arr[:, ii]*1E3), color='tab:orange', alpha=0.9,linestyle='dotted', zorder=1002)
ax3.set_xlabel('mag')
ax3.set_ylabel('residual (mas)')
return
+
+# TODO: update for motion model
+def plot_chi2_ecliptic_per_epoch(tab, Ndetect,ra,dec, mlim=[14,21], ylim = [-1, 1], target_idx = 0):
+ """
+ tab = flystar table
+ Ndetect = Number of epochs star detected in
+ """
+ diffX_arr = -99 * np.ones((len(tab['xe']), Ndetect))
+ diffY_arr = -99 * np.ones((len(tab['xe']), Ndetect))
+ errX_arr = 99 * np.ones((len(tab['xe']), Ndetect))
+ errY_arr = 99 * np.ones((len(tab['xe']), Ndetect))
+ sigX_arr = -99 * np.ones((len(tab['xe']), Ndetect))
+ sigY_arr = -99 * np.ones((len(tab['xe']), Ndetect))
+ m_arr = -99 * np.ones((len(tab['xe']), Ndetect))
+
+ rad_to_as = 180/np.pi * 60 * 60
+ deg_to_as = 60 * 60
+ def eq_to_ec(ra,dec):
+ e = 23.446 * np.pi/180
+ sinb = np.sin(dec)*np.cos(e) - np.cos(dec)*np.sin(e)*np.sin(ra)
+ cosb = np.cos(np.arcsin(sinb))
+ cosg = np.cos(ra)*np.cos(dec)/cosb
+ sing = (np.sin(dec)*np.sin(e) + np.cos(dec)*np.cos(e)*np.sin(ra))/cosb
+ g,b = np.arctan2(sing,cosg)*180/np.pi,np.arcsin(sinb)*180/np.pi
+ g = 360+g
+ return g*deg_to_as,b*deg_to_as
+ coord0 = SkyCoord(ra=ra,dec=dec,unit=(u.hourangle, u.deg),frame='icrs')
-def plot_chi2_dist_mag(tab, Ndetect, mlim=40, n_bins=30):
+ for ii in range(len(tab['xe'])):
+ # Ignore the NaNs
+ fnd = np.argwhere(~np.isnan(tab['xe'][ii,:]))
+ if len(fnd) == Ndetect and tab['use_in_trans'][ii]:
+ time = tab['t'][ii, fnd]
+ x = tab['x'][ii, fnd]
+ y = tab['y'][ii, fnd]
+ m = tab['m'][ii, fnd]
+ vx = tab['vx'][ii]
+ vy = tab['vy'][ii]
+ lambda_0,beta_0 = eq_to_ec((coord0.ra - tab['x0'][ii]*u.arcsec).radian,
+ (coord0.dec + tab['y0'][ii]*u.arcsec).radian)
+ x1 = coord0.ra - u.arcsec*x
+ y1 = coord0.dec + u.arcsec*y
+ ra_rad,dec_rad = x1.radian, y1.radian
+ lambda_obs,beta_obs = eq_to_ec(ra_rad,dec_rad)
+ x2 = coord0.ra - tab['x0'][ii]*u.arcsec - (time-tab['t0'][ii])*vx*u.arcsec
+ y2 = coord0.dec + tab['y0'][ii]*u.arcsec + (time-tab['t0'][ii])*vy*u.arcsec
+ ra_rad,dec_rad = x2.radian, y2.radian
+ lambda_pm,beta_pm = eq_to_ec(ra_rad,dec_rad)
+
+ xerr = tab['xe'][ii, fnd]
+ yerr = tab['ye'][ii, fnd]
+
+ dt = tab['t'][ii, fnd] - tab['t0'][ii]
+ fitLineX = lambda_pm
+ fitLineY = beta_pm
+
+ diffX = lambda_obs - fitLineX
+ diffY = beta_obs - fitLineY
+ sigX = diffX / xerr
+ sigY = diffY / yerr
+
+ diffX_arr[ii] = diffX.reshape(Ndetect,)
+ diffY_arr[ii] = diffY.reshape(Ndetect,)
+ errX_arr[ii] = xerr.reshape(Ndetect,)
+ errY_arr[ii] = yerr.reshape(Ndetect,)
+ sigX_arr[ii] = sigX.reshape(Ndetect,)
+ sigY_arr[ii] = sigY.reshape(Ndetect,)
+ m_arr[ii] = m.reshape(Ndetect,)
+
+ ts_folded = tab['t'][0]%1
+ i_sort = np.argsort(ts_folded)
+ print(ts_folded,i_sort)
+ for ii in i_sort:
+# fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 4),
+# gridspec_kw={'width_ratios': [1, 2, 2]})
+# plt.subplots_adjust(wspace=0.5)
+# ax1.hist(sigX_arr[:, ii], label = 'X', histtype='step', bins=np.linspace(-10, 10))
+# ax1.hist(sigY_arr[:, ii], label = 'Y', histtype='step', bins=np.linspace(-10, 10))
+# ax1.set_xlabel('sigma')
+# ax1.legend()
+
+ fig, (ax2, ax3) = plt.subplots(1, 2, figsize=(14, 4))
+ plt.subplots_adjust(wspace=0.25)
+
+ '''ax2.plot(m_arr[:, ii], sigX_arr[:, ii], 's', label = 'lambda', color='tab:blue', alpha=0.4, ms=5)
+ ax2.plot(m_arr[:, ii], sigY_arr[:, ii], 'o', label = 'beta', color='tab:orange', alpha=0.4, ms=5)
+ if target_idx is not None:
+ ax2.plot(m_arr[target_idx, ii], sigX_arr[target_idx, ii], 's', color='black', ms=5)
+ ax2.plot(m_arr[target_idx, ii], sigY_arr[target_idx, ii], 'o', color='black', ms=5)
+ ax2.set_xlim(mlim[0], mlim[1])
+ ax2.set_ylim(-5, 5)
+ ax2.axhline(y=0, color='black', alpha=0.9, zorder=1000)
+ ax2.set_xlabel('mag')
+ ax2.set_ylabel('sigma')'''
+ ax2.set_title('Epoch {0}'.format(ii)+', phase='+str(tab['t'][0][ii]%1)[:5])
+
+ #print(errX_arr[:, ii])
+ ax2.errorbar(m_arr[:, ii], diffX_arr[:, ii]*1E3, yerr=errX_arr[:, ii]*1E3,
+ marker='s', label = 'lambda', ls='none', color='tab:blue', alpha=0.4, ms=5)
+ ax3.errorbar(m_arr[:, ii], diffY_arr[:, ii]*1E3, yerr=errY_arr[:, ii]*1E3,
+ marker='o', label = 'beta', ls='none', color='tab:orange', alpha=0.4, ms=5)
+ if target_idx is not None:
+ #print('target',m_arr[target_idx, ii],diffX_arr[target_idx, ii]*1E3,diffY_arr[target_idx, ii]*1E3)
+ ax2.errorbar(m_arr[target_idx, ii], diffX_arr[target_idx, ii]*1E3, yerr=errX_arr[target_idx, ii]*1E3,
+ marker='s', ls='none', color='black', ms=5)
+ ax3.errorbar(m_arr[target_idx, ii], diffY_arr[target_idx, ii]*1E3, yerr=errY_arr[target_idx, ii]*1E3,
+ marker='o', ls='none', color='black', ms=5)
+ ax2.legend()
+ ax3.legend()
+ ax2.set_xlim(mlim[0], mlim[1])
+ ax3.set_xlim(mlim[0], mlim[1])
+ ax2.set_ylim(ylim[0], ylim[1])
+ ax3.set_ylim(ylim[0], ylim[1])
+ ax2.axhline(y=0, color='black', alpha=0.9, zorder=1000)
+ ax3.axhline(y=0, color='black', alpha=0.9, zorder=1000)
+ ax2.set_xlabel('mag')
+ ax2.set_ylabel('residual (mas)')
+ ax3.set_xlabel('mag')
+ ax3.set_ylabel('residual (mas)')
+ return
+
+def plot_chi2_dist_mag(tab, Ndetect, xlim=40, n_bins=30, boot_err=False):
"""
tab = flystar table
Ndetect = Number of epochs star detected in
@@ -2320,9 +2650,12 @@ def plot_chi2_dist_mag(tab, Ndetect, mlim=40, n_bins=30):
fnd_list.append(len(fnd))
m = tab['m'][ii, fnd]
- merr = tab['me'][ii, fnd]
+ if boot_err:
+ merr = np.hypot(tab['me_boot'][ii, fnd], tab['me'][ii, fnd])
+ else:
+ merr = tab['me'][ii, fnd]
m0 = tab['m0'][ii]
- m0err = tab['m0e'][ii]
+ m0err = tab['m0_err'][ii]
diff_m = m0 - m
sig_m = diff_m/merr
@@ -2337,16 +2670,16 @@ def plot_chi2_dist_mag(tab, Ndetect, mlim=40, n_bins=30):
# Fitting mean magnitude... so subtract 1 to get Ndof
Ndof = Ndetect - 1
- chi2_maxis = np.linspace(0, mlim, mlim*3)
- chi2_bins = np.linspace(0, mlim, n_bins)
+ chi2_maxis = np.linspace(0, xlim, xlim*3)
+ chi2_bins = np.linspace(0, xlim, n_bins)
plt.figure(figsize=(6,4))
plt.clf()
- plt.hist(chi2_m[idx], bins=np.arange(mlim*10), histtype='step', density=True)
+ plt.hist(chi2_m[idx], bins=np.arange(xlim*10), histtype='step', density=True)
plt.plot(chi2_maxis, chi2.pdf(chi2_maxis, Ndof), 'r-', alpha=0.6,
label='$\chi^2$ ' + str(Ndof) + ' dof')
plt.title('$N_{epoch} = $' + str(Ndetect) + ', $N_{dof} = $' + str(Ndof))
- plt.xlim(0, mlim)
+ plt.xlim(0, xlim)
plt.legend()
print('Mean reduced chi^2: (Ndetect = {0:d} of {1:d})'.format(len(idx), len(tab)))
@@ -2355,13 +2688,72 @@ def plot_chi2_dist_mag(tab, Ndetect, mlim=40, n_bins=30):
return
-def plot_stars(tab, star_names, NcolMax=2, epoch_array = None, figsize=(15,25), color_time=False):
+def plot_chi2_dist_mag_per_filter(tab, Ndetect, mlim=40, n_bins=30, xlim=40, file_name=None, filter=None):
+ """
+ tab = flystar table
+ Ndetect = Number of epochs star detected in
+ """
+ chi2_m_list = []
+ fnd_list = [] # Number of non-NaN error measurements
+
+ for ii in range(len(tab['me'])):
+ # Ignore the NaNs
+ fnd = np.argwhere(~np.isnan(tab['me'][ii,:]))
+ fnd_list.append(len(fnd))
+
+ m = tab['m'][ii, fnd]
+ merr = tab['me'][ii, fnd]
+ m0 = tab['m0'][ii]
+ m0err = tab['m0_err'][ii]
+
+ diff_m = m0 - m
+ sig_m = diff_m/merr
+
+ chi2_m = np.sum(sig_m**2)
+ chi2_m_list.append(chi2_m)
+
+ chi2_m = np.array(chi2_m_list)
+ fnd = np.array(fnd_list)
+
+ idx = np.where(fnd == Ndetect)[0]
+
+ # Fitting mean magnitude... so subtract 1 to get Ndof
+ Ndof = Ndetect - 1
+ chi2_maxis = np.linspace(0, xlim, xlim*3)
+ chi2_bins = np.linspace(0, xlim, n_bins)
+
+ plt.figure(figsize=(6,4))
+ plt.clf()
+ plt.hist(chi2_m[idx], bins=np.arange(xlim*10), label='mag', histtype='stepfilled', density=True, color='green', alpha=0.7, edgecolor='k')
+ plt.plot(chi2_maxis, chi2.pdf(chi2_maxis, Ndof), 'r-', alpha=0.6,
+ label='$\chi^2$ ' + str(Ndof) + ' dof')
+ #plt.title('$N_{epoch} = $' + str(Ndetect) + ', $N_{dof} = $' + str(Ndof))
+ plt.xlim(0, xlim)
+ plt.xlabel(r'$\chi^{2}$', fontsize=28)
+ plt.ylabel(r'PDF', fontsize=28)
+ plt.legend(fontsize=20)
+
+ plt.tick_params(labelsize=20, direction='in', right=True, top=True)
+
+ plt.savefig(str(filter)+'_chi2_dist_mag.png', dpi=400)
+
+ print('Mean reduced chi^2: (Ndetect = {0:d} of {1:d})'.format(len(idx), len(tab)))
+ fmt = ' {0:s} = {1:.1f} for N_detect and {2:.1f} for all'
+ print(fmt.format('M', np.median(chi2_m[idx] / (fnd[idx] - 2)), np.median(chi2_m / (fnd - 2))))
+
+ return
+
+def plot_stars(tab, star_names, motion_model_dict={}, NcolMax=2, epoch_array = None, figsize=(15,25), color_time=False, boot_err=False):
"""
Plot a set of stars positions, flux and residuals over time.
epoch_array : None, array
Array of the epoch indicies to plot. If None, plots all epochs.
"""
+
+ def rs(x):
+ return x.reshape(len(x))
+
print( 'Creating residuals plots for star(s):' )
print( star_names )
@@ -2379,6 +2771,11 @@ def plot_stars(tab, star_names, NcolMax=2, epoch_array = None, figsize=(15,25),
x = tab['x0']
y = tab['y0']
r = np.hypot(x, y)
+ motion_model_dict = motion_model.validate_motion_model_dict(motion_model_dict, tab, None)
+ i_all_detected = np.where(~np.any(np.isnan(tab['t']),axis=1))[0][0]
+ cont_times = np.arange(np.min(tab['t'][i_all_detected]), np.max(tab['t'][i_all_detected]), 0.01)
+ xt_mod_all, yt_mod_all, xt_mod_err, yt_mod_err = tab.get_star_positions_at_time(tab['t'][i_all_detected], motion_model_dict, allow_alt_models=True)
+ xt_cont_all, yt_cont_all, xt_cont_err, yt_cont_err = tab.get_star_positions_at_time(cont_times, motion_model_dict, allow_alt_models=True)
for i in range(Nstars):
starName = star_names[i]
@@ -2402,19 +2799,25 @@ def plot_stars(tab, star_names, NcolMax=2, epoch_array = None, figsize=(15,25),
y = tab['y'][ii, fnd]
m = tab['m'][ii, fnd]
- xerr = tab['xe'][ii, fnd]
- yerr = tab['ye'][ii, fnd]
- merr = tab['me'][ii, fnd]
+ if boot_err:
+ xerr = np.hypot(tab['xe'][ii, fnd], tab['xe_boot'][ii, fnd])
+ yerr = np.hypot(tab['ye'][ii, fnd], tab['ye_boot'][ii, fnd])
+ merr = np.hypot(tab['me'][ii, fnd], tab['me_boot'][ii, fnd])
+ else:
+ xerr = tab['xe'][ii, fnd]
+ yerr = tab['ye'][ii, fnd]
+ merr = tab['me'][ii, fnd]
dt = tab['t'][ii, fnd] - tab['t0'][ii]
- fitLineX = tab['x0'][ii] + (tab['vx'][ii] * dt)
- fitLineY = tab['y0'][ii] + (tab['vy'][ii] * dt)
+
+ fitLineX = xt_mod_all[ii, fnd]
+ fitLineY = yt_mod_all[ii, fnd]
- fitSigX = np.hypot(tab['x0e'][ii], tab['vxe'][ii]*dt)
- fitSigY = np.hypot(tab['y0e'][ii], tab['vye'][ii]*dt)
+ fitSigX = xt_mod_err[ii, fnd]
+ fitSigY = yt_mod_err[ii, fnd]
fitLineM = np.repeat(tab['m0'][ii], len(dt)).reshape(len(dt),1)
- fitSigM = np.repeat(tab['m0e'][ii], len(dt)).reshape(len(dt),1)
+ fitSigM = np.repeat(tab['m0_err'][ii], len(dt)).reshape(len(dt),1)
diffX = x - fitLineX
diffY = y - fitLineY
@@ -2437,7 +2840,7 @@ def plot_stars(tab, star_names, NcolMax=2, epoch_array = None, figsize=(15,25),
chi2_y = np.sum(sigY**2)
chi2_m = np.sum(sigM**2)
- dof = len(x) - 2
+ dof = (len(tab['x'][ii])-tab['n_params'][ii]).astype(int)
dofM = len(m) - 1
chi2_red_x = chi2_x / dof
@@ -2452,6 +2855,8 @@ def plot_stars(tab, star_names, NcolMax=2, epoch_array = None, figsize=(15,25),
(chi2_red_y, chi2_y, dof))
print( '\tM Chi^2 = %5.2f (%6.2f for %2d dof)' %
(chi2_red_m, chi2_m, dofM))
+ if 'motion_model_used' in tab.keys():
+ print('\tMotion model:', tab['motion_model_used'][ii])
tmin = time.min()
tmax = time.max()
@@ -2502,11 +2907,16 @@ def plot_stars(tab, star_names, NcolMax=2, epoch_array = None, figsize=(15,25),
ind = int((row-1)*Ncols + col)
paxes = plt.subplot(Nrows, Ncols, ind)
- plt.plot(time, fitLineX, 'b-')
- plt.plot(time, fitLineX + fitSigX, 'b--')
- plt.plot(time, fitLineX - fitSigX, 'b--')
+ plt.plot(cont_times, xt_cont_all[ii], 'b-')
+ plt.plot(cont_times, xt_cont_all[ii] + xt_cont_err[ii], 'b--')
+ plt.plot(cont_times, xt_cont_all[ii] - xt_cont_err[ii], 'b--')
if not color_time:
- plt.errorbar(time, x, yerr=xerr.reshape(len(xerr),), fmt='k.')
+ #print('x:',x)
+ #print('xerr:',xerr)
+ #print('xerr_reshaped:', xerr.reshape(len(xerr),))
+ #plt.errorbar(time, x, yerr=xerr.reshape(len(xerr)), fmt='k.')
+ plt.errorbar(rs(time), rs(x), yerr=rs(xerr), fmt='k.')
+ #plt.errorbar(time, x, yerr=xerr, fmt='k.')
else:
norm = colors.Normalize(vmin=0, vmax=1, clip=True)
mapper = cm.ScalarMappable(norm=norm, cmap='hsv')
@@ -2534,11 +2944,11 @@ def plot_stars(tab, star_names, NcolMax=2, epoch_array = None, figsize=(15,25),
ind = int((row-1)*Ncols + col)
paxes = plt.subplot(Nrows, Ncols, ind)
- plt.plot(time, fitLineY, 'b-')
- plt.plot(time, fitLineY + fitSigY, 'b--')
- plt.plot(time, fitLineY - fitSigY, 'b--')
+ plt.plot(cont_times, yt_cont_all[ii], 'b-')
+ plt.plot(cont_times, yt_cont_all[ii] + yt_cont_err[ii], 'b--')
+ plt.plot(cont_times, yt_cont_all[ii] - yt_cont_err[ii], 'b--')
if not color_time:
- plt.errorbar(time, y, yerr=yerr.reshape(len(yerr),), fmt='k.')
+ plt.errorbar(rs(time), rs(y), yerr=rs(yerr), fmt='k.')
else:
norm = colors.Normalize(vmin=0, vmax=1, clip=True)
mapper = cm.ScalarMappable(norm=norm, cmap='hsv')
@@ -2568,7 +2978,7 @@ def plot_stars(tab, star_names, NcolMax=2, epoch_array = None, figsize=(15,25),
plt.plot(time, fitLineM + fitSigM, 'g--')
plt.plot(time, fitLineM - fitSigM, 'g--')
if not color_time:
- plt.errorbar(time, m, yerr=merr.reshape(len(merr),), fmt='k.')
+ plt.errorbar(rs(time), rs(m), yerr=rs(merr), fmt='k.')
else:
norm = colors.Normalize(vmin=0, vmax=1, clip=True)
mapper = cm.ScalarMappable(norm=norm, cmap='hsv')
@@ -2597,10 +3007,10 @@ def plot_stars(tab, star_names, NcolMax=2, epoch_array = None, figsize=(15,25),
paxes = plt.subplot(Nrows, Ncols, ind)
plt.plot(time, np.zeros(len(time)), 'b-')
- plt.plot(time, fitSigX*1e3, 'b--')
- plt.plot(time, -fitSigX*1e3, 'b--')
+ plt.plot(cont_times, xt_cont_err[ii]*1e3, 'b--')
+ plt.plot(cont_times, -xt_cont_err[ii]*1e3, 'b--')
if not color_time:
- plt.errorbar(time, (x - fitLineX)*1e3, yerr=xerr.reshape(len(xerr),)*1e3, fmt='k.')
+ plt.errorbar(rs(time), rs(x - fitLineX)*1e3, yerr=rs(xerr)*1e3, fmt='k.')
else:
norm = colors.Normalize(vmin=0, vmax=1, clip=True)
mapper = cm.ScalarMappable(norm=norm, cmap='hsv')
@@ -2625,10 +3035,10 @@ def plot_stars(tab, star_names, NcolMax=2, epoch_array = None, figsize=(15,25),
paxes = plt.subplot(Nrows, Ncols, ind)
plt.plot(time, np.zeros(len(time)), 'b-')
- plt.plot(time, fitSigY*1e3, 'b--')
- plt.plot(time, -fitSigY*1e3, 'b--')
+ plt.plot(cont_times, yt_cont_err[ii]*1e3, 'b--')
+ plt.plot(cont_times, -yt_cont_err[ii]*1e3, 'b--')
if not color_time:
- plt.errorbar(time, (y - fitLineY)*1e3, yerr=yerr.reshape(len(yerr),)*1e3, fmt='k.')
+ plt.errorbar(rs(time), rs(y - fitLineY)*1e3, yerr=rs(yerr)*1e3, fmt='k.')
else:
norm = colors.Normalize(vmin=0, vmax=1, clip=True)
mapper = cm.ScalarMappable(norm=norm, cmap='hsv')
@@ -2656,7 +3066,7 @@ def plot_stars(tab, star_names, NcolMax=2, epoch_array = None, figsize=(15,25),
plt.plot(time, fitSigM*1e3, 'g--')
plt.plot(time, -fitSigM*1e3, 'g--')
if not color_time:
- plt.errorbar(time, (m - fitLineM), yerr=merr.reshape(len(merr),), fmt='k.')
+ plt.errorbar(rs(time), rs(m - fitLineM), yerr=rs(merr), fmt='k.')
else:
norm = colors.Normalize(vmin=0, vmax=1, clip=True)
mapper = cm.ScalarMappable(norm=norm, cmap='hsv')
@@ -2683,8 +3093,8 @@ def plot_stars(tab, star_names, NcolMax=2, epoch_array = None, figsize=(15,25),
paxes = plt.subplot(Nrows, Ncols, ind)
if not color_time:
- plt.errorbar(x,y, xerr=xerr.reshape(len(xerr),),
- yerr=yerr.reshape(len(yerr),), fmt='k.')
+ plt.errorbar(rs(x),rs(y), xerr=rs(xerr),
+ yerr=rs(yerr), fmt='k.')
else:
sc = plt.scatter(x, y, s=0, c=dtime, vmin=0, vmax=1, cmap='hsv')
clb = plt.colorbar(sc)
@@ -2701,8 +3111,8 @@ def plot_stars(tab, star_names, NcolMax=2, epoch_array = None, figsize=(15,25),
paxes.xaxis.set_major_formatter(FormatStrFormatter('%.3f'))
plt.xlabel('X (asec)', fontsize=fontsize1)
plt.ylabel('Y (asec)', fontsize=fontsize1)
- plt.plot(fitLineX, fitLineY, 'b-')
-
+ plt.plot(xt_cont_all[ii], yt_cont_all[ii], 'b-')
+
##########
# X, Y Histogram of Residuals
##########
@@ -2752,10 +3162,8 @@ def plot_stars(tab, star_names, NcolMax=2, epoch_array = None, figsize=(15,25),
return
-
-
-def plot_stars_nfilt(tab, star_names, NcolMax=2, epoch_array_list = None, color_list = None,
- figsize=(15,25), color_time=False, resTicRng=None):
+def plot_stars_nfilt(tab, star_names, motion_model_dict={}, NcolMax=2, epoch_array_list = None, color_list = None,
+ figsize=(15,25), color_time=False, resTicRng=None, save_name=None, boot_err=False):
"""
Plot a set of stars positions, flux and residuals over time.
@@ -2767,6 +3175,12 @@ def plot_stars_nfilt(tab, star_names, NcolMax=2, epoch_array_list = None, color_
"""
print( 'Creating residuals plots for star(s):' )
print( star_names )
+ def rs(x):
+ return x.reshape(len(x))
+
+ motion_model_dict = motion_model.validate_motion_model_dict(motion_model_dict, tab, None)
+ i_all_detected = np.where(~np.any(np.isnan(tab['t']),axis=1))[0][0]
+ xt_mod_all, yt_mod_all, xt_mod_err, yt_mod_err = tab.get_star_positions_at_time(tab['t'][i_all_detected], motion_model_dict, allow_alt_models=True)
Nstars = len(star_names)
Ncols = 3 * np.min([Nstars, NcolMax])
@@ -2782,6 +3196,11 @@ def plot_stars_nfilt(tab, star_names, NcolMax=2, epoch_array_list = None, color_
x = tab['x0']
y = tab['y0']
r = np.hypot(x, y)
+ motion_model_dict = motion_model.validate_motion_model_dict(motion_model_dict, tab, None)
+ i_all_detected = np.where(~np.any(np.isnan(tab['t']),axis=1))[0][0]
+ cont_times = np.arange(np.min(tab['t'][i_all_detected]), np.max(tab['t'][i_all_detected]), 0.01)
+ xt_mod_all, yt_mod_all, xt_mod_err, yt_mod_err = tab.get_star_positions_at_time(tab['t'][i_all_detected], motion_model_dict, allow_alt_models=True)
+ xt_cont_all, yt_cont_all, xt_cont_err, yt_cont_err = tab.get_star_positions_at_time(cont_times, motion_model_dict, allow_alt_models=True)
for i in range(Nstars):
for ea, epoch_array in enumerate(epoch_array_list):
@@ -2807,19 +3226,23 @@ def plot_stars_nfilt(tab, star_names, NcolMax=2, epoch_array_list = None, color_
y = tab['y'][ii, fnd]
m = tab['m'][ii, fnd]
- xerr = tab['xe'][ii, fnd]
- yerr = tab['ye'][ii, fnd]
- merr = tab['me'][ii, fnd]
+ if boot_err:
+ xerr = np.hypot(tab['xe'][ii, fnd], tab['xe_boot'][ii, fnd])
+ yerr = np.hypot(tab['ye'][ii, fnd], tab['ye_boot'][ii, fnd])
+ merr = np.hypot(tab['me'][ii, fnd], tab['me_boot'][ii, fnd])
+ else:
+ xerr = tab['xe'][ii, fnd]
+ yerr = tab['ye'][ii, fnd]
+ merr = tab['me'][ii, fnd]
- dt = tab['t'][ii, fnd] - tab['t0'][ii]
- fitLineX = tab['x0'][ii] + (tab['vx'][ii] * dt)
- fitLineY = tab['y0'][ii] + (tab['vy'][ii] * dt)
+ fitLineX = xt_mod_all[ii, fnd]
+ fitLineY = yt_mod_all[ii, fnd]
- fitSigX = np.hypot(tab['x0e'][ii], tab['vxe'][ii]*dt)
- fitSigY = np.hypot(tab['y0e'][ii], tab['vye'][ii]*dt)
+ fitSigX = xt_mod_err[ii, fnd]
+ fitSigY = yt_mod_err[ii, fnd]
- fitLineM = np.repeat(tab['m0'][ii], len(dt)).reshape(len(dt),1)
- fitSigM = np.repeat(tab['m0e'][ii], len(dt)).reshape(len(dt),1)
+ fitLineM = np.repeat(tab['m0'][ii], len(time)).reshape(len(time),1)
+ fitSigM = np.repeat(tab['m0_err'][ii], len(time)).reshape(len(time),1)
diffX = x - fitLineX
diffY = y - fitLineY
@@ -2908,11 +3331,12 @@ def plot_stars_nfilt(tab, star_names, NcolMax=2, epoch_array_list = None, color_
ind = int((row-1)*Ncols + col)
paxes = plt.subplot(Nrows, Ncols, ind)
- plt.plot(time, fitLineX, 'b-')
- plt.plot(time, fitLineX + fitSigX, 'b--')
- plt.plot(time, fitLineX - fitSigX, 'b--')
+ plt.plot(cont_times, xt_cont_all[ii], 'b-')
+ plt.plot(cont_times, xt_cont_all[ii] + xt_cont_err[ii], 'b--')
+ plt.plot(cont_times, xt_cont_all[ii] - xt_cont_err[ii], 'b--')
+ print(np.shape(xerr.reshape(len(xerr),)))
if not color_time:
- plt.errorbar(time, x, yerr=xerr.reshape(len(xerr),), marker='.', color=color, ls='none')
+ plt.errorbar(rs(time), rs(x), yerr=rs(xerr), marker='.', color=color, ls='none')
else:
norm = colors.Normalize(vmin=0, vmax=1, clip=True)
mapper = cm.ScalarMappable(norm=norm, cmap='hsv')
@@ -2940,11 +3364,11 @@ def plot_stars_nfilt(tab, star_names, NcolMax=2, epoch_array_list = None, color_
ind = int((row-1)*Ncols + col)
paxes = plt.subplot(Nrows, Ncols, ind)
- plt.plot(time, fitLineY, 'b-')
- plt.plot(time, fitLineY + fitSigY, 'b--')
- plt.plot(time, fitLineY - fitSigY, 'b--')
+ plt.plot(cont_times, yt_cont_all[ii], 'b-')
+ plt.plot(cont_times, yt_cont_all[ii] + yt_cont_err[ii], 'b--')
+ plt.plot(cont_times, yt_cont_all[ii] - yt_cont_err[ii], 'b--')
if not color_time:
- plt.errorbar(time, y, yerr=yerr.reshape(len(yerr),), marker='.', color=color, ls='none')
+ plt.errorbar(rs(time), rs(y), yerr=rs(yerr), marker='.', color=color, ls='none')
else:
norm = colors.Normalize(vmin=0, vmax=1, clip=True)
mapper = cm.ScalarMappable(norm=norm, cmap='hsv')
@@ -2974,7 +3398,7 @@ def plot_stars_nfilt(tab, star_names, NcolMax=2, epoch_array_list = None, color_
plt.plot(time, fitLineM + fitSigM, 'g--')
plt.plot(time, fitLineM - fitSigM, 'g--')
if not color_time:
- plt.errorbar(time, m, yerr=merr.reshape(len(merr),), marker='.', color=color, ls='none')
+ plt.errorbar(rs(time), rs(m), yerr=rs(merr), marker='.', color=color, ls='none')
else:
norm = colors.Normalize(vmin=0, vmax=1, clip=True)
mapper = cm.ScalarMappable(norm=norm, cmap='hsv')
@@ -3003,10 +3427,10 @@ def plot_stars_nfilt(tab, star_names, NcolMax=2, epoch_array_list = None, color_
paxes = plt.subplot(Nrows, Ncols, ind)
plt.plot(time, np.zeros(len(time)), 'b-')
- plt.plot(time, fitSigX*1e3, 'b--')
- plt.plot(time, -fitSigX*1e3, 'b--')
+ plt.plot(cont_times, xt_cont_err[ii]*1e3, 'b--')
+ plt.plot(cont_times, -xt_cont_err[ii]*1e3, 'b--')
if not color_time:
- plt.errorbar(time, (x - fitLineX)*1e3, yerr=xerr.reshape(len(xerr),)*1e3, marker='.', color=color, ls='none')
+ plt.errorbar(rs(time), rs(x - fitLineX)*1e3, yerr=rs(xerr)*1e3, marker='.', color=color, ls='none')
else:
norm = colors.Normalize(vmin=0, vmax=1, clip=True)
mapper = cm.ScalarMappable(norm=norm, cmap='hsv')
@@ -3031,10 +3455,10 @@ def plot_stars_nfilt(tab, star_names, NcolMax=2, epoch_array_list = None, color_
paxes = plt.subplot(Nrows, Ncols, ind)
plt.plot(time, np.zeros(len(time)), 'b-')
- plt.plot(time, fitSigY*1e3, 'b--')
- plt.plot(time, -fitSigY*1e3, 'b--')
+ plt.plot(cont_times, yt_cont_err[ii]*1e3, 'b--')
+ plt.plot(cont_times, -yt_cont_err[ii]*1e3, 'b--')
if not color_time:
- plt.errorbar(time, (y - fitLineY)*1e3, yerr=yerr.reshape(len(yerr),)*1e3, marker='.', color=color, ls='none')
+ plt.errorbar(rs(time), rs(y - fitLineY)*1e3, yerr=rs(yerr)*1e3, marker='.', color=color, ls='none')
else:
norm = colors.Normalize(vmin=0, vmax=1, clip=True)
mapper = cm.ScalarMappable(norm=norm, cmap='hsv')
@@ -3062,7 +3486,7 @@ def plot_stars_nfilt(tab, star_names, NcolMax=2, epoch_array_list = None, color_
plt.plot(time, fitSigM*1e3, 'g--')
plt.plot(time, -fitSigM*1e3, 'g--')
if not color_time:
- plt.errorbar(time, (m - fitLineM), yerr=merr.reshape(len(merr),), marker='.', color=color, ls='none')
+ plt.errorbar(rs(time), rs(m - fitLineM), yerr=rs(merr), marker='.', color=color, ls='none')
else:
norm = colors.Normalize(vmin=0, vmax=1, clip=True)
mapper = cm.ScalarMappable(norm=norm, cmap='hsv')
@@ -3089,8 +3513,8 @@ def plot_stars_nfilt(tab, star_names, NcolMax=2, epoch_array_list = None, color_
paxes = plt.subplot(Nrows, Ncols, ind)
if not color_time:
- plt.errorbar(x,y, xerr=xerr.reshape(len(xerr),),
- yerr=yerr.reshape(len(yerr),), marker='.', color=color, ls='none')
+ plt.errorbar(rs(x),rs(y), xerr=rs(xerr),
+ yerr=rs(yerr), marker='.', color=color, ls='none')
else:
sc = plt.scatter(x, y, s=0, c=dtime, vmin=0, vmax=1, cmap='hsv')
clb = plt.colorbar(sc)
@@ -3153,7 +3577,8 @@ def plot_stars_nfilt(tab, star_names, NcolMax=2, epoch_array_list = None, color_
plt.subplots_adjust(wspace=0.6, hspace=0.6, left = 0.08, bottom = 0.05, right=0.95, top=0.90)
# plt.savefig(rootDir+'plots/plotStar_all.png')
plt.show()
-
+ if save_name is not None:
+ plt.savefig(save_name + '.png')
plt.show()
return
@@ -3166,8 +3591,8 @@ def plot_errors_vs_r_m(star_tab, vmax_perr=0.75, vmax_pmerr=0.75):
two axis (as is used in pick_good_ref_stars()).
"""
r = np.hypot(star_tab['x0'], star_tab['y0'])
- p_err = np.mean((star_tab['x0e'], star_tab['y0e']), axis=0) * 1e3
- pm_err = np.mean((star_tab['vxe'], star_tab['vye']), axis=0) * 1e3
+ p_err = np.mean((star_tab['x0_err'], star_tab['y0_err']), axis=0) * 1e3
+ pm_err = np.mean((star_tab['vx_err'], star_tab['vy_err']), axis=0) * 1e3
plt.figure(figsize=(12, 6))
plt.clf()
@@ -3186,7 +3611,20 @@ def plot_errors_vs_r_m(star_tab, vmax_perr=0.75, vmax_pmerr=0.75):
plt.ylabel('Radius (")')
return
-
+
+def plot_plxs(star_tab, target_idx=0):
+ fig,ax = plt.subplots(nrows=1,ncols=2,figsize=(10,5))
+ ax[0].errorbar(star_tab['m0'],star_tab['pi']*1e3, yerr=star_tab['pi_err']*1e3,marker='.',linestyle='none')
+ if target_idx is not None:
+ ax[0].errorbar(star_tab['m0'][target_idx],star_tab['pi'][target_idx]*1e3, yerr=star_tab['pi_err'][target_idx]*1e3,marker='*',linestyle='none', color='orange', markersize=10)
+ ax[0].axhline(0, c='gray')
+ ax[0].set_ylabel('Plx (mas)')
+ ax[0].set_xlabel('Mag')
+ ax[1].hist(star_tab['pi']/star_tab['pi_err'], bins=range(-10,10))
+ ax[1].set_ylabel('N stars')
+ ax[1].set_xlabel('Plx/Plx_err')
+ plt.tight_layout()
+ ax[0].set_ylim(-5,5)
def plot_sky(stars_tab,
plot_errors=False, center_star=None, range=0.4,
diff --git a/flystar/starlists.py b/flystar/starlists.py
index be49458..23df44f 100644
--- a/flystar/starlists.py
+++ b/flystar/starlists.py
@@ -209,12 +209,12 @@ def read_label(labelFile, prop_to_time=None, flipX=True):
t_label.rename_column('col2', 'm')
t_label.rename_column('col3', 'x0')
t_label.rename_column('col4', 'y0')
- t_label.rename_column('col5', 'x0e')
- t_label.rename_column('col6', 'y0e')
+ t_label.rename_column('col5', 'x0_err')
+ t_label.rename_column('col6', 'y0_err')
t_label.rename_column('col7', 'vx')
t_label.rename_column('col8', 'vy')
- t_label.rename_column('col9', 'vxe')
- t_label.rename_column('col10','vye')
+ t_label.rename_column('col9', 'vx_err')
+ t_label.rename_column('col10','vy_err')
t_label.rename_column('col11','t0')
t_label.rename_column('col12','use')
t_label.rename_column('col13','r0')
@@ -222,23 +222,23 @@ def read_label(labelFile, prop_to_time=None, flipX=True):
# Convert velocities from mas/yr to arcsec/year
# t_label['vx'] *= 0.001
# t_label['vy'] *= 0.001
-# t_label['vxe'] *= 0.001
-# t_label['vye'] *= 0.001
+# t_label['vx_err'] *= 0.001
+# t_label['vy_err'] *= 0.001
t_label['vx'] = t_label['vx'] * 0.001
t_label['vy'] = t_label['vy'] * 0.001
- t_label['vxe'] = t_label['vxe'] * 0.001
- t_label['vye'] = t_label['vye'] * 0.001
+ t_label['vx_err'] = t_label['vx_err'] * 0.001
+ t_label['vy_err'] = t_label['vy_err'] * 0.001
# propogate to prop_to_time if prop_to_time is given
if prop_to_time != None:
x0 = t_label['x0']
- x0e = t_label['x0e']
+ x0e = t_label['x0_err']
vx = t_label['vx']
- vxe = t_label['vxe']
+ vxe = t_label['vx_err']
y0 = t_label['y0']
- y0e = t_label['y0e']
+ y0e = t_label['y0_err']
vy = t_label['vy']
- vye = t_label['vye']
+ vye = t_label['vy_err']
t0 = t_label['t0']
t_label['x'] = x0 + vx*(prop_to_time - t0)
t_label['y'] = y0 + vy*(prop_to_time - t0)
@@ -316,12 +316,12 @@ def read_label_accel(labelFile, prop_to_time=None, flipX=True):
t_label.rename_column('col2', 'm')
t_label.rename_column('col3', 'x0')
t_label.rename_column('col4', 'y0')
- t_label.rename_column('col5', 'x0e')
- t_label.rename_column('col6', 'y0e')
+ t_label.rename_column('col5', 'x0_err')
+ t_label.rename_column('col6', 'y0_err')
t_label.rename_column('col7', 'vx')
t_label.rename_column('col8', 'vy')
- t_label.rename_column('col9', 'vxe')
- t_label.rename_column('col10','vye')
+ t_label.rename_column('col9', 'vx_err')
+ t_label.rename_column('col10','vy_err')
t_label.rename_column('col11', 'ax')
t_label.rename_column('col12', 'ay')
t_label.rename_column('col13', 'axe')
@@ -333,12 +333,12 @@ def read_label_accel(labelFile, prop_to_time=None, flipX=True):
# Convert velocities from mas/yr to arcsec/year
# t_label['vx'] *= 0.001
# t_label['vy'] *= 0.001
-# t_label['vxe'] *= 0.001
-# t_label['vye'] *= 0.001
+# t_label['vx_err'] *= 0.001
+# t_label['vy_err'] *= 0.001
t_label['vx'] = t_label['vx'] * 0.001
t_label['vy'] = t_label['vy'] * 0.001
- t_label['vxe'] = t_label['vxe'] * 0.001
- t_label['vye'] = t_label['vye'] * 0.001
+ t_label['vx_err'] = t_label['vx_err'] * 0.001
+ t_label['vy_err'] = t_label['vy_err'] * 0.001
t_label['ax'] = t_label['ax'] * 0.001
t_label['ay'] = t_label['ay'] * 0.001
@@ -348,15 +348,15 @@ def read_label_accel(labelFile, prop_to_time=None, flipX=True):
# propogate to prop_to_time if prop_to_time is given
if prop_to_time != None:
x0 = t_label['x0']
- x0e = t_label['x0e']
+ x0e = t_label['x0_err']
vx = t_label['vx']
- vxe = t_label['vxe']
+ vxe = t_label['vx_err']
ax = t_label['ax']
axe = t_label['axe']
y0 = t_label['y0']
- y0e = t_label['y0e']
+ y0e = t_label['y0_err']
vy = t_label['vy']
- vye = t_label['vye']
+ vye = t_label['vy_err']
ay = t_label['ay']
aye = t_label['aye']
t0 = t_label['t0']
@@ -535,6 +535,8 @@ def __init__(self, *args, **kwargs):
# Check all the arrays.
arg_tab = ('x', 'y', 'm', 'xe', 'ye', 'me', 'corr')
+ #print(kwargs)
+
for arg_test in arg_tab:
if arg_test in kwargs:
if not isinstance(kwargs[arg_test], np.ndarray):
diff --git a/flystar/startables.py b/flystar/startables.py
index 84953a9..d75fca9 100644
--- a/flystar/startables.py
+++ b/flystar/startables.py
@@ -1,7 +1,7 @@
-from astropy.table import Table, Column, hstack
+from astropy.table import Table, Column, MaskedColumn, hstack
from astropy.stats import sigma_clipping
+from astropy.time import Time
from scipy.optimize import curve_fit
-from flystar.fit_velocity import linear_fit, calc_chi2, linear, fit_velocity
from tqdm import tqdm
import numpy as np
import warnings
@@ -9,7 +9,8 @@
import pdb
import time
import copy
-
+from flystar import motion_model
+import pandas as pd
class StarTable(Table):
"""
@@ -31,6 +32,9 @@ class StarTable(Table):
Optional table columns (input as keywords):
-------------------------
+ motion_model : 1D numpy.array with shape = N_stars
+ string indicating motion model type for each star
+
xe : 2D numpy.array with shape = (N_stars, N_lists)
Position uncertainties of N_stars in each of N_lists in the x dimension.
@@ -58,7 +62,6 @@ class StarTable(Table):
ref_list : int
Specify which list is the reference list (if any).
-
Examples
--------------------------
@@ -126,7 +129,7 @@ def __init__(self, *args, ref_list=0, **kwargs):
# We have to have special handling of meta-data (i.e. info that has
# dimensions of n_lists).
- meta_tab = ('LIST_TIMES', 'LIST_NAMES')
+ meta_tab = ('list_times', 'list_names')
meta_type = ((float, int), str)
for mm in range(len(meta_tab)):
meta_test = meta_tab[mm]
@@ -153,6 +156,9 @@ def __init__(self, *args, ref_list=0, **kwargs):
if meta_arg in kwargs:
self.meta[meta_arg] = kwargs[meta_arg]
del kwargs[meta_arg]
+ elif meta_arg.upper() in kwargs:
+ self.meta[meta_arg] = kwargs[meta_arg.upper()]
+ del kwargs[meta_arg]
for arg in kwargs:
if arg in ['name', 'x', 'y', 'm']:
@@ -161,6 +167,12 @@ def __init__(self, *args, ref_list=0, **kwargs):
self.add_column(Column(data=kwargs[arg], name=arg))
if arg == 'name_in_list':
self['name_in_list'] = self['name_in_list'].astype('U20')
+ if arg == 'motion_model_input':
+ self['motion_model_input'] = self['motion_model_input'].astype('U20')
+ if arg == 'motion_model_used':
+ self['motion_model_used'] = self['motion_model_used'].astype('U20')
+ #if 'motion_model_input' not in kwargs:
+ # self['motion_model_input'] = np.repeat(self.default_motion_model, len(self['name']))
return
@@ -228,7 +240,7 @@ def _add_list_data_from_starlist(self, starlist):
# Meta table entries with a size that matches the n_lists size are the ones
# that need a new value. We have to add something... whatever was passed in or None
- if isinstance(self.meta[tab_key], collections.abc.Iterable) and (len(self.meta[tab_key]) == self.meta['n_lists']):
+ if isinstance(self.meta[tab_key], collections.abc.Iterable) and (len(self.meta[tab_key]) == self.meta['n_lists']) and (not isinstance(self.meta[tab_key], str)):
# If we find the key in the starlists' meta argument, then add the new values.
# Otherwise, add "None".
@@ -285,7 +297,7 @@ def _add_list_data_from_keywords(self, **kwargs):
for key in self.meta.keys():
# Meta table entries with a size that matches the n_lists size are the ones
# that need a new value. We have to add something... whatever was passed in or None
- if isinstance(self.meta[key], collections.abc.Iterable) and (len(self.meta[key]) == self.meta['n_lists']):
+ if isinstance(self.meta[key], collections.abc.Iterable) and (len(self.meta[key]) == self.meta['n_lists']) and (not isinstance(self.meta[key], str)):
# If we find the key is the passed in meta argument, then add the new values.
# Otherwise, add "None".
if 'meta' in kwargs:
@@ -500,7 +512,7 @@ def combine_lists(self, col_name_in, weights_col=None, mask_val=None,
# Save off our new AVG and STD into new columns with shape (N_stars).
col_name_avg = col_name_in + '0'
- col_name_std = col_name_in + '0e'
+ col_name_std = col_name_in + '0_err'
if ismag:
std = (2.5 / np.log(10)) * std / avg
@@ -526,22 +538,18 @@ def detections(self):
self.add_column(Column(n_detect), name='n_detect')
return
-
- def fit_velocities(self, weighting='var', use_scipy=True, absolute_sigma=True, bootstrap=0, fixed_t0=False, verbose=False,
- mask_val=None, mask_lists=False, show_progress=True):
+ def fit_velocities(self, weighting='var', use_scipy=True, absolute_sigma=True, bootstrap=0,
+ fixed_t0=False, verbose=False, mask_val=None, mask_lists=False, show_progress=True,
+ default_motion_model='Linear', reassign_motion_model=False, select_stars=None, motion_model_dict={}):
"""Fit velocities for all stars in the table and add to the columns 'vx', 'vxe', 'vy', 'vye', 'x0', 'x0e', 'y0', 'y0e'.
Parameters
----------
weighting : str, optional
Weight by variance 'var' or standard deviation 'std', by default 'var'
- use_scipy : bool, optional
- Use scipy.curve_fit (recommended for large number of epochs, but may return inf or nan) or analytic fitting from flystar.fit_velocity.linear_fit (recommended for a few epochs), by default True
- absolute_sigma : bool, optional
- Absolute sigma or not. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html for details, by default True
bootstrap : int, optional
- Calculate uncertain using bootstraping or not, by default 0
+ Calculate uncertainty using bootstraping or not, by default 0
fixed_t0 : bool or array-like, optional
Fix the t0 in dt = time - t0 if user provides an array with the same length of the table, or automatically calculate t0 = np.average(time, weights=1/np.hypot(xe, ye)) if False, by default False
verbose : bool, optional
@@ -563,8 +571,8 @@ def fit_velocities(self, weighting='var', use_scipy=True, absolute_sigma=True, b
if weighting not in ['var', 'std']:
raise ValueError(f"fit_velocities: Weighting must either be 'var' or 'std', not {weighting}!")
- if ('t' not in self.colnames) and ('LIST_TIMES' not in self.meta):
- raise KeyError("fit_velocities: Failed to access time values. No 't' column in table, no 'LIST_TIMES' in meta.")
+ if ('t' not in self.colnames) and ('list_times' not in self.meta):
+ raise KeyError("fit_velocities: Failed to access time values. No 't' column in table, no 'list_times' in meta.")
# Check if we have the required columns
if not all([_ in self.colnames for _ in ['x', 'y']]):
@@ -577,93 +585,103 @@ def fit_velocities(self, weighting='var', use_scipy=True, absolute_sigma=True, b
msg = 'Starting startable.fit_velocities for {0:d} stars with n={1:d} bootstrap'
print(msg.format(N_stars, bootstrap))
- # Clean/remove up old arrays.
- if 'x0' in self.colnames: self.remove_column('x0')
- if 'vx' in self.colnames: self.remove_column('vx')
- if 'y0' in self.colnames: self.remove_column('y0')
- if 'vy' in self.colnames: self.remove_column('vy')
- if 'x0e' in self.colnames: self.remove_column('x0e')
- if 'vxe' in self.colnames: self.remove_column('vxe')
- if 'y0e' in self.colnames: self.remove_column('y0e')
- if 'vye' in self.colnames: self.remove_column('vye')
- if 'chi2_vx' in self.colnames: self.remove_column('chi2_vx')
- if 'chi2_vy' in self.colnames: self.remove_column('chi2_vy')
- if 't0' in self.colnames: self.remove_column('t0')
- if 'n_vfit' in self.colnames: self.remove_column('n_vfit')
+ # Set all to default_motion_model if none assigned already.
+ # Reset motion_model_used to the inputs for now -> will change as fits run
+ if ('motion_model_input' not in self.colnames) or reassign_motion_model:
+ self['motion_model_input'] = default_motion_model
+ self['motion_model_used'] = self['motion_model_input']
+ motion_model_dict = motion_model.validate_motion_model_dict(motion_model_dict, self, default_motion_model)
+
+ #
+ # Fill table with all possible motion model parameter names as new
+ # columns. Make everything empty for now.
+ #
+ all_motion_models = np.unique(self['motion_model_input'].tolist() + ['Fixed']+[default_motion_model]).tolist()
+ new_col_list = motion_model.get_list_motion_model_param_names(all_motion_models, with_errors=True)
+ # Append goodness of fit metrics and t0.
+ new_col_list += ['chi2_x', 'chi2_y', 'n_params']
+ if 't0' not in new_col_list:
+ new_col_list.append('t0')
+
# Define output arrays for the best-fit parameters.
- self.add_column(Column(data = np.zeros(N_stars, dtype=float), name = 'x0'))
- self.add_column(Column(data = np.zeros(N_stars, dtype=float), name = 'vx'))
- self.add_column(Column(data = np.zeros(N_stars, dtype=float), name = 'y0'))
- self.add_column(Column(data = np.zeros(N_stars, dtype=float), name = 'vy'))
-
- self.add_column(Column(data = np.zeros(N_stars, dtype=float), name = 'x0e'))
- self.add_column(Column(data = np.zeros(N_stars, dtype=float), name = 'vxe'))
- self.add_column(Column(data = np.zeros(N_stars, dtype=float), name = 'y0e'))
- self.add_column(Column(data = np.zeros(N_stars, dtype=float), name = 'vye'))
+ for col in new_col_list:
+ # Clean/remove up old arrays.
+ if col in self.colnames: self.remove_column(col)
+ # Add column #TODO: is this good for filling???
+ self.add_column(Column(data = np.full(N_stars, np.nan, dtype=float), name = col))
+
+ # Add a column to keep track of the number of points used in a fit.
+ self['n_fit'] = 0
- self.add_column(Column(data = np.zeros(N_stars, dtype=float), name = 'chi2_vx'))
- self.add_column(Column(data = np.zeros(N_stars, dtype=float), name = 'chi2_vy'))
+ # Preserve the number of bootstraps that will be run (if any).
+ self.meta['n_fit_bootstrap'] = bootstrap
- self.add_column(Column(data = np.zeros(N_stars, dtype=float), name = 't0'))
- self.add_column(Column(data = np.zeros(N_stars, dtype=int), name = 'n_vfit'))
-
- self.meta['N_VFIT_BOOTSTRAP'] = bootstrap
-
# (FIXME: Do we need to catch the case where there's a single *unmasked* epoch?)
# Catch the case when there is only a single epoch. Just return 0 velocity
# and the same input position for the x0/y0.
if len(self['x'].shape) == 1:
+ self['motion_model_used'] = 'Fixed'
self['x0'] = self['x']
self['y0'] = self['y']
if 't' in self.colnames:
self['t0'] = self['t']
else:
- self['t0'] = self.meta['LIST_TIMES'][0]
+ self['t0'] = self.meta['list_times'][0]
if 'xe' in self.colnames:
- self['x0e'] = self['xe']
- self['y0e'] = self['ye']
- self['n_vfit'] = 1
-
+ self['x0_err'] = self['xe']
+ self['y0_err'] = self['ye']
+ self['n_fit'] = 1
+ self['n_params'] = 1
return
- if self['x'].shape[1] == 1:
+ if (self['x'].shape[1] == 1):
+ self['motion_model_used'] = 'Fixed'
self['x0'] = self['x'][:,0]
self['y0'] = self['y'][:,0]
-
if 't' in self.colnames:
self['t0'] = self['t'][:, 0]
else:
- self['t0'] = self.meta['LIST_TIMES'][0]
-
+ self['t0'] = self.meta['list_times'][0]
if 'xe' in self.colnames:
- self['x0e'] = self['xe'][:,0]
- self['y0e'] = self['ye'][:,0]
-
- self['n_vfit'] = 1
-
+ self['x0_err'] = self['xe'][:,0]
+ self['y0_err'] = self['ye'][:,0]
+ self['n_fit'] = 1
+ self['n_params'] = 1
return
-
+
+ # Only fit selected stars, if list given
+ fit_star_idxs = range(N_stars)
+ if select_stars is not None:
+ fit_star_idxs = select_stars
# STARS LOOP through the stars and work on them 1 at a time.
# This is slow; but robust.
if show_progress:
- for ss in tqdm(range(N_stars)):
- self.fit_velocity_for_star(ss, weighting=weighting, use_scipy=use_scipy, absolute_sigma=absolute_sigma, bootstrap=bootstrap, fixed_t0=fixed_t0,
- mask_val=mask_val, mask_lists=mask_lists)
+ for ss in tqdm(fit_star_idxs):
+ self.fit_velocity_for_star(ss, motion_model_dict, weighting=weighting, bootstrap=bootstrap,
+ use_scipy=use_scipy, absolute_sigma=absolute_sigma,
+ fixed_t0=fixed_t0, default_motion_model=default_motion_model,
+ mask_val=mask_val, mask_lists=mask_lists)
else:
- for ss in range(N_stars):
- self.fit_velocity_for_star(ss, weighting=weighting, use_scipy=use_scipy, absolute_sigma=absolute_sigma, bootstrap=bootstrap, fixed_t0=fixed_t0,
- mask_val=mask_val, mask_lists=mask_lists, )
+ for ss in fit_star_idxs:
+ self.fit_velocity_for_star(ss, motion_model_dict, weighting=weighting, bootstrap=bootstrap,
+ use_scipy=use_scipy, absolute_sigma=absolute_sigma,
+ fixed_t0=fixed_t0, default_motion_model=default_motion_model,
+ mask_val=mask_val, mask_lists=mask_lists)
if verbose:
stop_time = time.time()
print('startable.fit_velocities runtime = {0:.0f} s for {1:d} stars'.format(stop_time - start_time, N_stars))
return
- def fit_velocity_for_star(self, ss, weighting='var', use_scipy=True, absolute_sigma=True, bootstrap=False, fixed_t0=False,
- mask_val=None, mask_lists=False):
-
+ def fit_velocity_for_star(self, ss, motion_model_dict, weighting='var', use_scipy=True, absolute_sigma=True,
+ bootstrap=False, fixed_t0=False, mask_val=None, mask_lists=False,
+ default_motion_model='Linear'):
+ # TODO: "weighting" is not used
+ #
# Make a mask of invalid (NaN) values and a user-specified invalid value.
+ #
+
x = np.ma.masked_invalid(self['x'][ss, :].data)
y = np.ma.masked_invalid(self['y'][ss, :].data)
if mask_val:
@@ -675,7 +693,6 @@ def fit_velocity_for_star(self, ss, weighting='var', use_scipy=True, absolute_si
if not np.ma.is_masked(y):
y.mask = np.zeros_like(y.data, dtype=bool)
-
if mask_lists is not False:
# Remove a list
if isinstance(mask_lists, list):
@@ -686,7 +703,9 @@ def fit_velocity_for_star(self, ss, weighting='var', use_scipy=True, absolute_si
# Throw a warning if mask_lists is not a list
if not isinstance(mask_lists, list):
raise RuntimeError('mask_lists needs to be a list.')
-
+ #
+ # Assign the appropriate positional errors
+ #
if 'xe' in self.colnames:
# Make a mask of invalid (NaN) values and a user-specified invalid value.
xe = np.ma.masked_invalid(self['xe'][ss, :].data)
@@ -734,11 +753,13 @@ def fit_velocity_for_star(self, ss, weighting='var', use_scipy=True, absolute_si
if not isinstance(mask_lists, list):
raise RuntimeError('mask_lists needs to be a list.')
+ #
# Make a mask of invalid (NaN) values and a user-specified invalid value.
+ #
if 't' in self.colnames:
t = np.ma.masked_invalid(self['t'][ss, :].data)
else:
- t = np.ma.masked_invalid(self.meta['LIST_TIMES'])
+ t = np.ma.masked_invalid(self.meta['list_times'])
if mask_val:
t = np.ma.masked_values(t, mask_val)
@@ -750,14 +771,17 @@ def fit_velocity_for_star(self, ss, weighting='var', use_scipy=True, absolute_si
if isinstance(mask_lists, list):
if all(isinstance(item, int) for item in mask_lists):
t.mask[mask_lists] = True
-
+
# Throw a warning if mask_lists is not a list
if not isinstance(mask_lists, list):
raise RuntimeError('mask_lists needs to be a list.')
-
+
# For inconsistent masks, mask the star if any of the values are masked.
new_mask = np.logical_or.reduce((t.mask, x.mask, y.mask, xe.mask, ye.mask))
+
+ #
# Figure out where we have detections (as indicated by error columns)
+ #
good = np.where((xe != 0) & (ye != 0) &
np.isfinite(xe) & np.isfinite(ye) &
np.isfinite(x) & np.isfinite(y) & ~new_mask)[0]
@@ -766,6 +790,9 @@ def fit_velocity_for_star(self, ss, weighting='var', use_scipy=True, absolute_si
# Catch the case where there is NO good data.
if N_good == 0:
+ #self['motion_model_used'][ss] = 'None'
+ self['n_fit'][ss] = N_good
+ self['n_params'][ss] = 0
return
# Everything below has N_good >= 1
@@ -775,185 +802,133 @@ def fit_velocity_for_star(self, ss, weighting='var', use_scipy=True, absolute_si
xe = xe[good]
ye = ye[good]
- # slope, intercept
- p0x = np.array([0., x.mean()])
- p0y = np.array([0., y.mean()])
-
+ #
# Unless t0 is fixed, calculate the t0 for the stars.
+ #
if fixed_t0 is False:
t_weight = 1.0 / np.hypot(xe, ye)
t0 = np.average(t, weights=t_weight)
+ elif fixed_t0 is True:
+ t0 = self.t0
else:
t0 = fixed_t0[ss]
- dt = t - t0
-
self['t0'][ss] = t0
- self['n_vfit'][ss] = N_good
-
- # Catch the case where all the times are identical
- if (dt == dt[0]).all():
- if weighting == 'var':
- wgt_x = (1.0/xe)**2
- wgt_y = (1.0/ye)**2
- elif weighting == 'std':
- wgt_x = 1./np.abs(xe)
- wgt_y = 1./np.abs(ye)
-
- self['x0'][ss] = np.average(x, weights=wgt_x)
- self['y0'][ss] = np.average(y, weights=wgt_y)
- self['x0e'][ss] = np.sqrt(np.average((x - self['x0'][ss])**2, weights=wgt_x))
- self['y0e'][ss] = np.sqrt(np.average((y - self['y0'][ss])**2, weights=wgt_x))
-
- self['vx'][ss] = 0.0
- self['vy'][ss] = 0.0
- self['vxe'][ss] = 0.0
- self['vye'][ss] = 0.0
-
- return
-
- # Catch the case where we have enough measurements to actually
- # fit a velocity!
- if N_good > 2:
- if weighting == 'var':
- sigma_x = xe
- sigma_y = ye
- elif weighting == 'std':
- sigma_x = np.abs(xe)**0.5
- sigma_y = np.abs(ye)**0.5
-
- if use_scipy:
- vx_opt, vx_cov = curve_fit(linear, dt, x, p0=p0x, sigma=sigma_x, absolute_sigma=absolute_sigma)
- vy_opt, vy_cov = curve_fit(linear, dt, y, p0=p0y, sigma=sigma_y, absolute_sigma=absolute_sigma)
- vx = vx_opt[0]
- x0 = vx_opt[1]
- vy = vy_opt[0]
- y0 = vy_opt[1]
- chi2_vx = calc_chi2(dt, x, sigma_x, *vx_opt)
- chi2_vy = calc_chi2(dt, y, sigma_y, *vy_opt)
-
- else:
- result_vx = linear_fit(dt, x, sigma_x, absolute_sigma=absolute_sigma)
- result_vy = linear_fit(dt, y, sigma_y, absolute_sigma=absolute_sigma)
- vx = result_vx['slope']
- x0 = result_vx['intercept']
- vy = result_vy['slope']
- y0 = result_vy['intercept']
- chi2_vx = result_vx['chi2']
- chi2_vy = result_vy['chi2']
-
- self['vx'][ss] = vx
- self['x0'][ss] = x0
- self['vy'][ss] = vy
- self['y0'][ss] = y0
- self['chi2_vx'][ss] = chi2_vx
- self['chi2_vy'][ss] = chi2_vy
+ self['n_fit'][ss] = N_good
+
+ #
+ # Decide which motion_model to fit.
+ #
+ motion_model_use = self['motion_model_input'][ss]
+ # Go to default model if not enough points for assigned but enough for default
+ # TODO: think about whether we want other fallbacks besides the singular default and Fixed
+ if (N_good < motion_model_dict[motion_model_use].n_pts_req) and \
+ (N_good >= motion_model_dict[default_motion_model].n_pts_req):
+ motion_model_use = default_motion_model
+ # If not enough points for either, go to a fixed model
+ elif (N_good < motion_model_dict[motion_model_use].n_pts_req) and \
+ (N_good < motion_model_dict[default_motion_model].n_pts_req):
+ motion_model_use = 'Fixed'
+ # If the points do not cover multiple times, go to a fixed model
+ if (t == t[0]).all():
+ motion_model_use = 'Fixed'
- # Run the bootstrap
- if bootstrap > 0:
- edx = np.arange(N_good, dtype=int)
-
- vx_b = np.zeros(bootstrap, dtype=float)
- x0_b = np.zeros(bootstrap, dtype=float)
- vy_b = np.zeros(bootstrap, dtype=float)
- y0_b = np.zeros(bootstrap, dtype=float)
-
- for bb in range(bootstrap):
- bdx = np.random.choice(edx, N_good)
- if weighting == 'var':
- sigma_x_b = xe[bdx]
- sigma_y_b = ye[bdx]
- elif weighting == 'std':
- sigma_x_b = xe[bdx]**0.5
- sigma_y_b = ye[bdx]**0.5
-
- if use_scipy:
- vx_opt_b, vx_cov_b = curve_fit(linear, dt[bdx], x[bdx], p0=vx_opt, sigma=sigma_x_b,
- absolute_sigma=absolute_sigma)
- vy_opt_b, vy_cov_b = curve_fit(linear, dt[bdx], y[bdx], p0=vy_opt, sigma=sigma_y_b,
- absolute_sigma=absolute_sigma)
- vx_b[bb] = vx_opt_b[0]
- x0_b[bb] = vx_opt_b[1]
- vy_b[bb] = vy_opt_b[0]
- y0_b[bb] = vy_opt_b[1]
-
- else:
- result_vx_b = linear_fit(dt[bdx], x[bdx], sigma=sigma_x_b, absolute_sigma=absolute_sigma)
- result_vy_b = linear_fit(dt[bdx], y[bdx], sigma=sigma_y_b, absolute_sigma=absolute_sigma)
- vx_b[bb] = result_vx_b['slope']
- x0_b[bb] = result_vx_b['intercept']
- vy_b[bb] = result_vy_b['slope']
- y0_b[bb] = result_vy_b['intercept']
-
- # Save the errors from the bootstrap
- self['vxe'][ss] = vx_b.std()
- self['x0e'][ss] = x0_b.std()
- self['vye'][ss] = vy_b.std()
- self['y0e'][ss] = y0_b.std()
+ self['motion_model_used'][ss] = motion_model_use
+
+# # Get the motion model object.
+# modClass = motion_model_dict[motion_model_use]
+#
+# # Load up any prior information on parameters for this model.
+# param_dict = {}
+# for par in modClass.fitter_param_names+modClass.fixed_param_names:
+# if ~np.isnan(self[par][ss]):
+# param_dict[par] = self[par][ss]
+
+ # Model object
+ mod = motion_model_dict[motion_model_use]
+ fixed_params = [self[par][ss] for par in mod.fixed_param_names]
+
+ # Fit for the best parameters
+ params, param_errs = mod.fit_motion_model(t, x, y, xe, ye, t0, bootstrap=bootstrap,
+ weighting=weighting, use_scipy=use_scipy, absolute_sigma=absolute_sigma)
+ chi2_x,chi2_y = mod.get_chi2(params,fixed_params, t,x,y,xe,ye)
+ self['chi2_x'][ss]=chi2_x
+ self['chi2_y'][ss]=chi2_y
+ self['n_params'][ss] = mod.n_params
- else:
- if use_scipy:
- vxe, x0e = np.sqrt(vx_cov.diagonal())
- vye, y0e = np.sqrt(vy_cov.diagonal())
- else:
- vxe = result_vx['e_slope']
- x0e = result_vx['e_intercept']
- vye = result_vy['e_slope']
- y0e = result_vy['e_intercept']
-
- self['vxe'][ss] = vxe
- self['x0e'][ss] = x0e
- self['vye'][ss] = vye
- self['y0e'][ss] = y0e
-
- elif N_good == 2:
- # Not enough epochs to fit a velocity.
- dx = np.diff(x)[0]
- dy = np.diff(y)[0]
- dt_diff = np.diff(dt)[0]
-
- if weighting == 'var':
- sigma_x = 1./xe**2
- sigma_y = 1./ye**2
- elif weighting == 'std':
- sigma_x = 1./np.abs(xe)
- sigma_y = 1./np.abs(ye)
-
- self['x0'][ss] = np.average(x, weights=sigma_x)
- self['y0'][ss] = np.average(y, weights=sigma_y)
- self['x0e'][ss] = np.abs(dx) / 2**0.5
- self['y0e'][ss] = np.abs(dy) / 2**0.5
- self['vx'][ss] = dx / dt_diff
- self['vy'][ss] = dy / dt_diff
- self['vxe'][ss] = 0.0
- self['vye'][ss] = 0.0
- self['chi2_vx'][ss] = calc_chi2(dt, x, sigma_x, self['vx'][ss], self['x0'][ss])
- self['chi2_vy'][ss] = calc_chi2(dt, y, sigma_y, self['vy'][ss], self['y0'][ss])
+ # Save parameters and errors to table.
+ for pp in range(len(mod.fitter_param_names)):
+ par = mod.fitter_param_names[pp]
+ par_err = par + '_err'
+ self[par][ss] = params[pp]
+ self[par_err][ss] = param_errs[pp]
+ return
+
+ # New function, to use in align
+ def get_star_positions_at_time(self, t, motion_model_dict, allow_alt_models=True):
+ """ Get current x,y positions of each star according to its motion_model
+ """
+ # Start with empty arrays so we can fill them in batches
+ N_stars = len(self)
+ if hasattr(t, "__len__"):
+ x = np.full((N_stars,len(t)), np.nan, dtype=float)
+ y = np.full((N_stars,len(t)), np.nan, dtype=float)
+ xe = np.full((N_stars,len(t)), np.nan, dtype=float)
+ ye = np.full((N_stars,len(t)), np.nan, dtype=float)
else:
- # N_good == 1 case
- self['n_vfit'][ss] = 1
- self['x0'][ss] = x
- self['y0'][ss] = y
-
- if 'xe' in self.colnames:
- self['x0e'] = xe
- self['y0e'] = ye
+ x = np.full(N_stars, np.nan, dtype=float)
+ y = np.full(N_stars, np.nan, dtype=float)
+ xe = np.full(N_stars, np.nan, dtype=float)
+ ye = np.full(N_stars, np.nan, dtype=float)
+
+ # TODO: probably worth some additional testing here
+ # Check which motion models we need
+ # use complex_mms to collect models besides Fixed and Linear
+ unique_mms = np.unique(self['motion_model_input']).tolist()
+ # Calculate current position in batches by motion model
+ for mm in unique_mms:
+ try:
+ # Identify stars with this model & get class
+ idx = np.where(self['motion_model_input']==mm)[0]
+ mod = motion_model_dict[mm]
+ # Set up parameters
+ param_dict = {}
+ for par in mod.fitter_param_names + mod.fixed_param_names + [pm+'_err' for pm in mod.fitter_param_names]:
+ param_dict[par] = self[par][idx]
+ x[idx],y[idx],xe[idx],ye[idx] = mod.get_batch_pos_at_time(t,**param_dict)
+ except:
+ pass
+ if np.isnan(x).any() and allow_alt_models:
+ re_calc = np.where(np.isnan(x))[0]
+ unique_mms = np.unique(self['motion_model_used'][re_calc]).tolist()
+ # Calculate current position in batches by motion model
+ for mm in unique_mms:
+ # Identify stars with this model & get class
+ idx_0 = np.where(self['motion_model_used']==mm)[0]
+ idx = np.intersect1d(re_calc, idx_0)
+ mod = motion_model_dict[mm]
+ # Set up parameters
+ param_dict = {}
+ for par in motion_model.get_one_motion_model_param_names(mm,with_errors=True,with_fixed=True):
+ param_dict[par] = self[par][idx]
+ x[idx],y[idx],xe[idx],ye[idx] = mod.get_batch_pos_at_time(t,**param_dict)
+
+ return x,y,xe,ye
+
- return
-
-
- def fit_velocities_all_detected(self, weighting='var', use_scipy=False, absolute_sigma=False, epoch_cols='all', mask_val=None, art_star=False, return_result=False):
+ def fit_velocities_all_detected(self, motion_model_to_fit, weighting='var', use_scipy=True, absolute_sigma=True, times=None,
+ select_stars=None, epoch_cols='all', mask_val=None, art_star=False, return_result=False):
"""Fit velocities for stars detected in all epochs specified by epoch_cols.
Criterion: xe/ye error > 0 and finite, x/y not masked.
Parameters
----------
+ motion_model_to_fit : MotionModel
+ Motion model object to use for fitting all stars
weighting : str, optional
Variance weighting('var') or standard deviation weighting ('std'), by default 'var'
- use_scipy : bool, optional
- Use scipy.curve_fit or flystar.fit_velocity.fit_velocity, by default False
- absolute_sigma : bool, optional
- Absolute sigma or rescaled sigma, by default False
+ select_idx : array-like, optional
+ Indices of stars to select for fitting, by default None (fit all detected stars)
epoch_cols : str or list of intergers, optional
List of epoch column indices used for fitting velocity, by default 'all'
mask_val : float, optional
@@ -970,61 +945,182 @@ def fit_velocities_all_detected(self, weighting='var', use_scipy=False, absolute
"""
N_stars = len(self)
-
+ if select_stars is None:
+ select_stars = np.arange(N_stars)
+ else:
+ select_stars = np.asarray(select_stars)
+
if epoch_cols == 'all':
epoch_cols = np.arange(np.shape(self['x'])[1])
-
+
# Artificial Star
if art_star:
- detected_in_all_epochs = np.all(self['det'][:, epoch_cols], axis=1)
-
+ detected_in_all_epochs = np.all(self['det'][select_stars, :][:, epoch_cols], axis=1)
+
# Observation Star
else:
- valid_xe = np.all(self['xe'][:, epoch_cols]!=0, axis=1) & np.all(np.isfinite(self['xe'][:, epoch_cols]), axis=1)
- valid_ye = np.all(self['ye'][:, epoch_cols]!=0, axis=1) & np.all(np.isfinite(self['ye'][:, epoch_cols]), axis=1)
-
+ valid_xe = np.all(self['xe'][select_stars, :][:, epoch_cols]!=0, axis=1) & np.all(np.isfinite(self['xe'][select_stars, :][:, epoch_cols]), axis=1)
+ valid_ye = np.all(self['ye'][select_stars, :][:, epoch_cols]!=0, axis=1) & np.all(np.isfinite(self['ye'][select_stars, :][:, epoch_cols]), axis=1)
+
if mask_val:
- x = np.ma.masked_values(self['x'][:, epoch_cols], mask_val)
- y = np.ma.masked_values(self['y'][:, epoch_cols], mask_val)
-
+ x = np.ma.masked_values(self['x'][select_stars, :][:, epoch_cols], mask_val)
+ y = np.ma.masked_values(self['y'][select_stars, :][:, epoch_cols], mask_val)
+
# If no mask, convert x.mask to list
if not np.ma.is_masked(x):
- x.mask = np.zeros_like(self['x'][:, epoch_cols].data, dtype=bool)
+ x.mask = np.zeros_like(self['x'][select_stars, :][:, epoch_cols].data, dtype=bool)
if not np.ma.is_masked(y):
- y.mask = np.zeros_like(self['y'][:, epoch_cols].data, dtype=bool)
-
+ y.mask = np.zeros_like(self['y'][select_stars, :][:, epoch_cols].data, dtype=bool)
+
valid_x = ~np.any(x.mask, axis=1)
valid_y = ~np.any(y.mask, axis=1)
detected_in_all_epochs = np.logical_and.reduce((
- valid_x, valid_y, valid_xe, valid_ye
- ))
+ valid_x, valid_y, valid_xe, valid_ye))
else:
detected_in_all_epochs = np.logical_and(valid_xe, valid_ye)
+
+ N = len(self['x'][select_stars, :])
+ fit_params = motion_model_to_fit.fitter_param_names
+ param_data = {p: np.zeros(N) for p in fit_params}
+ param_data.update({p+'_err': np.zeros(N) for p in fit_params})
+ param_data.update({p: np.zeros(N) for p in motion_model_to_fit.fixed_param_names})
+ param_data['chi2_x'] = np.zeros(N)
+ param_data['chi2_y'] = np.zeros(N)
+
+ if times is None:
+ if 'YEARS' in self.meta:
+ times = np.array(self.meta['YEARS'])[epoch_cols]
+ elif 't' in self.colnames:
+ times = self['t'][0, epoch_cols]
+ else:
+ raise ValueError("No valid time column found.")
+ if not art_star:
+ x_arr = self['x'][select_stars, :][:, epoch_cols]
+ y_arr = self['y'][select_stars, :][:, epoch_cols]
+ else:
+ x_arr = self['x'][select_stars, :][:, epoch_cols, 1]
+ y_arr = self['y'][select_stars, :][:, epoch_cols, 1]
+
+ xe_arr = self['xe'][select_stars, :][:, epoch_cols]
+ ye_arr = self['ye'][select_stars, :][:, epoch_cols]
- # Fit velocities
- vel_result = fit_velocity(self[detected_in_all_epochs], weighting=weighting, use_scipy=use_scipy, absolute_sigma=absolute_sigma, epoch_cols=epoch_cols, art_star=art_star)
- vel_result = Table.from_pandas(vel_result)
-
-
+ # Only fit for >1 epochs, otherwise all velocities will be 0
+ if len(epoch_cols) > 1:
+ # For each star
+ for i in tqdm(range(N)):
+ x = x_arr[i]
+ y = y_arr[i]
+ xe = xe_arr[i]
+ ye = ye_arr[i]
+ t0 = np.average(times, weights=1. / np.hypot(xe, ye))
+
+ # Run fit and record results
+ params, param_errs = motion_model_to_fit.fit_motion_model(
+ times, x, y, xe, ye, t0, weighting=weighting,
+ use_scipy=use_scipy, absolute_sigma=absolute_sigma
+ )
+ if 't0' in motion_model_to_fit.fixed_param_names:
+ param_data['t0'][i] = t0
+ for j, param in enumerate(fit_params):
+ param_data[param][i] = params[j]
+ param_data[f'{param}_err'][i] = param_errs[j]
+ chi2x, chi2y = motion_model_to_fit.get_chi2(params, [t0], times, x, y, xe, ye)
+ param_data['chi2_x'][i] = chi2x
+ param_data['chi2_y'][i] = chi2y
+
+ vel_result = Table.from_pandas(pd.DataFrame(param_data))
+
# Add n_vfit
- n_vfit = len(epoch_cols)
- vel_result['n_vfit'] = n_vfit
-
+ n_fit = len(epoch_cols)
+ vel_result['n_fit'] = n_fit
+
# Clean/remove up old arrays.
- columns = [*vel_result.keys(), 'n_vfit']
+ columns = [*vel_result.keys(), 'n_fit']
for column in columns:
if column in self.colnames: self.remove_column(column)
-
+
# Update self
for column in columns:
- column_array = np.ma.zeros(N_stars)
- column_array[detected_in_all_epochs] = vel_result[column]
- column_array[~detected_in_all_epochs] = np.nan
- column_array.mask = ~detected_in_all_epochs
+ column_array = MaskedColumn(np.ma.zeros(N_stars), dtype=float, name=column)
+ column_array[select_stars] = vel_result[column]
+ column_array[select_stars][~detected_in_all_epochs] = np.nan
+ column_array.mask[select_stars] = ~detected_in_all_epochs
+ # Mask unselected indices
+ column_array.mask[~np.isin(np.arange(N_stars), select_stars)] = True
self[column] = column_array
-
+
if return_result:
return vel_result
else:
- return
\ No newline at end of file
+ return
+
+ def shift_reference_frame(self, delta_vx=0.0, delta_vy=0.0, delta_pi=0.0,
+ motion_model_dict={}):
+ """
+ After completing an alignment, shift from your relative reference frame to
+ the absolute frame using either Gaia or a Galactic model. This modified the
+ motion model fit parameters as well as the time series astrometry, assuming
+ zero error on the shift values.
+
+ Parameters
+ ----------
+ delta_vx : float, optional
+ velocity shift in x-direction (as/yr)
+ delta_vy : float, optional
+ velocity shift in y-direction (as/yr)
+ delta_pi : float, optional
+ parallax shift (as)
+ """
+ motion_model_dict = motion_model.validate_motion_model_dict(motion_model_dict, self, None)
+ if delta_vx==0.0 and delta_vy==0.0 and delta_pi==0.0:
+ print("No shifts input, reference frame unchanged.")
+ print("Specify delta_vx, delta_vy, and/or delta_pi to perform a reference frame shift.")
+ return
+ self['vx'] += delta_vx
+ self['x'] += delta_vx*(self['t']-self['t0'][:, np.newaxis])
+ self['vy'] += delta_vy
+ self['y'] += delta_vy*(self['t']-self['t0'][:, np.newaxis])
+ if delta_pi!=0.0:
+ t_all = self['t'][np.where(~np.any(np.isnan(self['t']), axis=1))[0][0]]
+ t_mjd = Time(t_all, format='decimalyear', scale='utc').mjd
+ pvec = motion_model_dict['Parallax'].get_parallax_vector(t_mjd)
+ self['pi'] += delta_pi
+ self['x'] += delta_pi*pvec[0]
+ self['y'] += delta_pi*pvec[1]
+ return
+
+def shift_reference_frame(table, delta_vx=0.0, delta_vy=0.0, delta_pi=0.0,
+ motion_model_dict={}):
+ """
+ After completing an alignment, shift from your relative reference frame to
+ the absolute frame using either Gaia or a Galactic model. This modified the
+ motion model fit parameters as well as the time series astrometry, assuming
+ zero error on the shift values.
+
+ Parameters
+ ----------
+ delta_vx : float, optional
+ velocity shift in x-direction (as/yr)
+ delta_vy : float, optional
+ velocity shift in y-direction (as/yr)
+ delta_pi : float, optional
+ parallax shift (as)
+ """
+ motion_model_dict = motion_model.validate_motion_model_dict(motion_model_dict, table, None)
+ if delta_vx==0.0 and delta_vy==0.0 and delta_pi==0.0:
+ print("No shifts input, reference frame unchanged.")
+ print("Specify delta_vx, delta_vy, and/or delta_pi to perform a reference frame shift.")
+ return
+ table['vx'] += delta_vx
+ table['x'] += delta_vx*(table['t']-table['t0'][:, np.newaxis])
+ table['vy'] += delta_vy
+ table['y'] += delta_vy*(table['t']-table['t0'][:, np.newaxis])
+ if delta_pi!=0.0:
+ t_all = table['t'][np.where(~np.any(np.isnan(table['t']), axis=1))[0][0]]
+ t_mjd = Time(t_all, format='decimalyear', scale='utc').mjd
+ pvec = motion_model_dict['Parallax'].get_parallax_vector(t_mjd)
+ table['pi'] += delta_pi
+ table['x'] += delta_pi*pvec[0]
+ table['y'] += delta_pi*pvec[1]
+ return table
diff --git a/flystar/tests/ref_vel.lis b/flystar/tests/ref_vel.lis
index 4d223b0..fc191bb 100644
--- a/flystar/tests/ref_vel.lis
+++ b/flystar/tests/ref_vel.lis
@@ -1,4 +1,4 @@
-name x y m xe ye me t0 vx vy vxe vye
+name x y m xe ye me t0 vx vy vx_err vy_err
gaia_1150 -63.98457260029581 -30.67278228118061 13.628200000000001 0.00014609621924194742 0.00014585407086906515 0.0115 2010.5 0.0 0.0 0.1 0.1
gaia_1162 0.47637231898572985 -79.79611824529178 14.6439 0.00011419811781207949 0.00011415029792639667 0.0084 2010.5 0.0 0.0 0.1 0.1
gaia_1166 8.546170748636236 -47.35893234401765 14.696900000000001 0.00013283068515276605 0.00013260913293195234 0.0041 2010.5 0.0 0.0 0.1 0.1
diff --git a/flystar/tests/test_align.py b/flystar/tests/test_align.py
index 026a8b1..2d6b0dc 100644
--- a/flystar/tests/test_align.py
+++ b/flystar/tests/test_align.py
@@ -3,12 +3,13 @@
from flystar import startables
from flystar import transforms
from flystar import analysis
+from flystar import motion_model
from astropy.table import Table
import numpy as np
import pylab as plt
import pdb
import datetime
-
+import pytest
def test_MosaicSelfRef():
"""
@@ -30,11 +31,11 @@ def test_MosaicSelfRef():
# Check some of the output quantities on the final table.
assert 'x0' in msc.ref_table.colnames
- assert 'x0e' in msc.ref_table.colnames
+ assert 'x0_err' in msc.ref_table.colnames
assert 'y0' in msc.ref_table.colnames
- assert 'y0e' in msc.ref_table.colnames
+ assert 'y0_err' in msc.ref_table.colnames
assert 'm0' in msc.ref_table.colnames
- assert 'm0e' in msc.ref_table.colnames
+ assert 'm0_err' in msc.ref_table.colnames
assert 'use_in_trans' in msc.ref_table.colnames
assert 'used_in_trans' in msc.ref_table.colnames
assert 'ref_orig' in msc.ref_table.colnames
@@ -46,41 +47,41 @@ def test_MosaicSelfRef():
# Check that we have some matched stars... should be at least 35 stars
# that are detected in all 4 starlists.
idx = np.where(msc.ref_table['n_detect'] == 4)[0]
- assert len(idx) > 35
+ assert len(idx) > 35
# Check that the transformation error isn't too big
- assert (msc.ref_table['x0e'] < 3.0).all() # less than 1 pix
- assert (msc.ref_table['y0e'] < 3.0).all()
- #assert (msc.ref_table['m0e'] < 1.0).all() # less than 0.5 mag
- assert (msc.ref_table['m0e'] < 1.5).all() # less than 0.5 mag
+ assert (msc.ref_table['x0_err'] < 3.0).all() # less than 1 pix
+ assert (msc.ref_table['y0_err'] < 3.0).all()
+ #assert (msc.ref_table['m0_err'] < 1.0).all() # less than 0.5 mag
+ assert (msc.ref_table['m0_err'] < 1.5).all() # less than 0.5 mag
# Check that the transformation lists aren't too wacky
for ii in range(4):
- np.testing.assert_almost_equal(msc.trans_list[ii].px.c1_0, 1.0, 2)
- np.testing.assert_almost_equal(msc.trans_list[ii].py.c0_1, 1.0, 2)
-
+ np.testing.assert_allclose(msc.trans_list[ii].px.c1_0, 1.0, rtol=1e-2)
+ np.testing.assert_allclose(msc.trans_list[ii].py.c0_1, 1.0, rtol=1e-2)
# We didn't do any velocity fitting, so make sure nothing got created.
assert 'vx' not in msc.ref_table.colnames
assert 'vy' not in msc.ref_table.colnames
- assert 'vxe' not in msc.ref_table.colnames
- assert 'vye' not in msc.ref_table.colnames
+ assert 'vx_err' not in msc.ref_table.colnames
+ assert 'vy_err' not in msc.ref_table.colnames
plt.clf()
plt.plot(msc.ref_table['x'][:, 0],
msc.ref_table['y'][:, 0],
- 'k+', color='red', mec='red', mfc='none')
+ '+', color='red', mec='red', mfc='none')
plt.plot(msc.ref_table['x'][:, 1],
msc.ref_table['y'][:, 1],
- 'kx', color='blue', mec='blue', mfc='none')
+ 'x', color='blue', mec='blue', mfc='none')
plt.plot(msc.ref_table['x'][:, 2],
msc.ref_table['y'][:, 2],
- 'ko', color='cyan', mec='cyan', mfc='none')
+ 'o', color='cyan', mec='cyan', mfc='none')
plt.plot(msc.ref_table['x'][:, 3],
msc.ref_table['y'][:, 3],
- 'k^', color='green', mec='green', mfc='none')
+ '^', color='green', mec='green', mfc='none')
plt.plot(msc.ref_table['x0'],
msc.ref_table['y0'],
- 'k.', color='black', alpha=0.2)
+ '.', color='black', alpha=0.2)
+
return
@@ -100,22 +101,23 @@ def test_MosaicSelfRef_vel_tconst():
msc = align.MosaicSelfRef(lists, ref_index=0, iters=2,
dr_tol=[3, 3], dm_tol=[1, 1],
trans_class=transforms.PolyTransform,
- trans_args={'order': 2}, use_vel=True,
+ trans_args={'order': 2},
+ default_motion_model='Linear',
verbose=False)
msc.fit()
# Check some of the output quantities on the final table.
assert 'x0' in msc.ref_table.colnames
- assert 'x0e' in msc.ref_table.colnames
+ assert 'x0_err' in msc.ref_table.colnames
assert 'y0' in msc.ref_table.colnames
- assert 'y0e' in msc.ref_table.colnames
+ assert 'y0_err' in msc.ref_table.colnames
assert 'm0' in msc.ref_table.colnames
- assert 'm0e' in msc.ref_table.colnames
+ assert 'm0_err' in msc.ref_table.colnames
assert 'vx' in msc.ref_table.colnames
- assert 'vxe' in msc.ref_table.colnames
+ assert 'vx_err' in msc.ref_table.colnames
assert 'vy' in msc.ref_table.colnames
- assert 'vye' in msc.ref_table.colnames
+ assert 'vy_err' in msc.ref_table.colnames
assert 't0' in msc.ref_table.colnames
# Check that we have some matched stars... should be at least 35 stars
@@ -124,24 +126,21 @@ def test_MosaicSelfRef_vel_tconst():
assert len(idx) > 35
# Check that the transformation error isn't too big
- assert (msc.ref_table['x0e'] < 3.0).all() # less than 1 pix
- assert (msc.ref_table['y0e'] < 3.0).all()
- assert (msc.ref_table['m0e'] < 1.0).all() # less than 0.5 mag
+ assert (msc.ref_table['x0_err'] < 3.0).all() # less than 1 pix
+ assert (msc.ref_table['y0_err'] < 3.0).all()
+ assert (msc.ref_table['m0_err'] < 1.0).all() # less than 0.5 mag
# Check that the transformation lists aren't too wacky
for ii in range(4):
- np.testing.assert_almost_equal(msc.trans_list[ii].px.c1_0, 1.0, 2)
- np.testing.assert_almost_equal(msc.trans_list[ii].py.c0_1, 1.0, 2)
+ np.testing.assert_allclose(msc.trans_list[ii].px.c1_0, 1.0, rtol=1e-2)
+ np.testing.assert_allclose(msc.trans_list[ii].py.c0_1, 1.0, rtol=1e-2)
# Check that the velocities aren't crazy...
- # they should be zero (since there is no time difference)
- np.testing.assert_almost_equal(msc.ref_table['vx'], 0, 1)
- np.testing.assert_almost_equal(msc.ref_table['vy'], 0, 1)
-
- assert (msc.ref_table['vx'] == 0).all()
- assert (msc.ref_table['vy'] == 0).all()
- assert (msc.ref_table['vxe'] == 0).all()
- assert (msc.ref_table['vye'] == 0).all()
+ # they should be non-existent (since there is no time difference)
+ assert np.isnan(msc.ref_table['vx']).all()
+ assert np.isnan(msc.ref_table['vy']).all()
+ assert np.isnan(msc.ref_table['vx_err']).all()
+ assert np.isnan(msc.ref_table['vy_err']).all()
return
@@ -173,39 +172,38 @@ def test_MosaicSelfRef_vel():
msc = align.MosaicSelfRef(lists, ref_index=0, iters=3,
dr_tol=[5, 3, 3], dm_tol=[1, 1, 0.5], outlier_tol=None,
trans_class=transforms.PolyTransform,
- trans_args={'order': 2}, use_vel=True,
+ trans_args={'order': 2}, default_motion_model='Linear',
verbose=False)
msc.fit()
# Check some of the output quantities on the final table.
assert 'x0' in msc.ref_table.colnames
- assert 'x0e' in msc.ref_table.colnames
+ assert 'x0_err' in msc.ref_table.colnames
assert 'y0' in msc.ref_table.colnames
- assert 'y0e' in msc.ref_table.colnames
+ assert 'y0_err' in msc.ref_table.colnames
assert 'm0' in msc.ref_table.colnames
- assert 'm0e' in msc.ref_table.colnames
+ assert 'm0_err' in msc.ref_table.colnames
assert 'vx' in msc.ref_table.colnames
- assert 'vxe' in msc.ref_table.colnames
+ assert 'vx_err' in msc.ref_table.colnames
assert 'vy' in msc.ref_table.colnames
- assert 'vye' in msc.ref_table.colnames
+ assert 'vy_err' in msc.ref_table.colnames
assert 't0' in msc.ref_table.colnames
# Check that we have some matched stars... should be at least 35 stars
# that are detected in all 4 starlists.
idx = np.where(msc.ref_table['n_detect'] == 4)[0]
- assert len(idx) > 35
+ assert len(idx) > 35
# Check that the transformation error isn't too big
- assert (msc.ref_table['x0e'] < 3.0).all() # less than 1 pix
- assert (msc.ref_table['y0e'] < 3.0).all()
- assert (msc.ref_table['m0e'] < 1.0).all() # less than 0.5 mag
+ assert (msc.ref_table['x0_err'] < 3.0).all() # less than 1 pix
+ assert (msc.ref_table['y0_err'] < 3.0).all()
+ assert (msc.ref_table['m0_err'] < 1.0).all() # less than 0.5 mag
# Check that the transformation lists aren't too wacky
for ii in range(4):
- np.testing.assert_almost_equal(msc.trans_list[ii].px.c1_0, 1.0, 2)
- np.testing.assert_almost_equal(msc.trans_list[ii].py.c0_1, 1.0, 2)
-
+ np.testing.assert_allclose(msc.trans_list[ii].px.c1_0, 1.0, rtol=1e-2)
+ np.testing.assert_allclose(msc.trans_list[ii].py.c0_1, 1.0, rtol=1e-2)
plt.clf()
plt.plot(msc.ref_table['vx'],
@@ -215,6 +213,129 @@ def test_MosaicSelfRef_vel():
return
def test_MosaicToRef():
+ make_fake_starlists_poly1(seed=42)
+
+ ref_file = 'random_ref.fits'
+ list_files = ['random_0.fits',
+ 'random_1.fits',
+ 'random_2.fits',
+ 'random_3.fits',
+ 'random_4.fits',
+ 'random_5.fits',
+ 'random_6.fits',
+ 'random_7.fits']
+
+ ref_list = Table.read(ref_file)
+
+ # Switch our list to a "increasing to the West" list.
+ ref_list['x0'] *= -1.0
+
+ lists = [starlists.StarList.read(lf) for lf in list_files]
+
+ msc = align.MosaicToRef(ref_list, lists, iters=2,
+ dr_tol=[0.2, 0.1], dm_tol=[1, 0.5],
+ trans_class=transforms.PolyTransform,
+ trans_args={'order': 2}, default_motion_model='Fixed',
+ update_ref_orig=False, verbose=False)
+
+ msc.fit()
+
+ # Check our status columns
+ assert 'use_in_trans' in msc.ref_table.colnames
+ assert 'used_in_trans' in msc.ref_table.colnames
+ assert 'ref_orig' in msc.ref_table.colnames
+ assert msc.ref_table['use_in_trans'].shape == msc.ref_table['x0'].shape
+ assert msc.ref_table['used_in_trans'].shape == msc.ref_table['x'].shape
+
+ # The velocities should be almost the same as the input
+ # velocities since update_ref_orig == False.
+ np.testing.assert_allclose(msc.ref_table['x0'], ref_list['x0'], rtol=1e-5)
+ np.testing.assert_allclose(msc.ref_table['y0'], ref_list['y0'], rtol=1e-5)
+
+ ##########
+ # Align and let velocities be free.
+ ##########
+ msc.update_ref_orig = 'periter'
+ msc.fit()
+
+ # The velocities should be almost the same (but not as close as before)
+ # as the input velocities since update_ref == False.
+ np.testing.assert_allclose(msc.ref_table['x0'], ref_list['x0'], rtol=1e-1)
+ np.testing.assert_allclose(msc.ref_table['y0'], ref_list['y0'], rtol=1e-1)
+
+ # Also double check that they aren't exactly the same for the reference stars.
+ assert np.not_equal(msc.ref_table['x0'], ref_list['x0']).all()
+ assert np.not_equal(msc.ref_table['y0'], ref_list['y0']).all()
+
+ return msc
+
+def test_MosaicToRef_p0_vel():
+ make_fake_starlists_poly0_vel(seed=42)
+
+ ref_file = 'random_vel_ref.fits'
+ list_files = ['random_vel_p0_0.fits',
+ 'random_vel_p0_1.fits',
+ 'random_vel_p0_2.fits',
+ 'random_vel_p0_3.fits']
+ #'random_vel_4.fits',
+ #'random_vel_5.fits',
+ #'random_vel_6.fits',
+ #'random_vel_7.fits']
+
+ ref_list = Table.read(ref_file)
+
+ # Convert velocities to arcsec/yr
+ ref_list['vx'] *= 1e-3
+ ref_list['vy'] *= 1e-3
+ ref_list['vx_err'] *= 1e-3
+ ref_list['vy_err'] *= 1e-3
+
+ # Switch our list to a "increasing to the West" list.
+ ref_list['x0'] *= -1.0
+ ref_list['vx'] *= -1.0
+
+ lists = [starlists.StarList.read(lf) for lf in list_files]
+
+ msc = align.MosaicToRef(ref_list, lists, iters=2,
+ dr_tol=[0.2, 0.1], dm_tol=[1, 0.5],
+ outlier_tol=[None, None],
+ trans_class=transforms.PolyTransform,
+ trans_args={'order': 1}, default_motion_model='Linear',
+ update_ref_orig=False, verbose=False)
+ msc.fit()
+
+ # Check our status columns
+ assert 'use_in_trans' in msc.ref_table.colnames
+ assert 'used_in_trans' in msc.ref_table.colnames
+ assert 'ref_orig' in msc.ref_table.colnames
+ assert msc.ref_table['use_in_trans'].shape == msc.ref_table['x0'].shape
+ assert msc.ref_table['used_in_trans'].shape == msc.ref_table['x'].shape
+
+ # The velocities should be almost the same as the input
+ # velocities since update_ref_orig == False.
+ assert (msc.ref_table['name']==ref_list['name']).all()
+ np.testing.assert_allclose(msc.ref_table['vx'], ref_list['vx'], rtol=1e-5)
+ np.testing.assert_allclose(msc.ref_table['vy'], ref_list['vy'], rtol=1e-5)
+
+ ##########
+ # Align and let velocities be free.
+ ##########
+ msc.update_ref_orig = 'periter'
+ msc.fit()
+
+ # The velocities should be almost the same (but not as close as before)
+ # as the input velocities since update_ref == True.
+ assert (msc.ref_table['name']==ref_list['name']).all()
+ assert np.max(np.abs(msc.ref_table['vx']-ref_list['vx']))<3e-4
+ assert np.max(np.abs(msc.ref_table['vy']-ref_list['vy']))<3e-4
+
+ # Also double check that they aren't exactly the same for the reference stars.
+ #assert np.any(np.not_equal(msc.ref_table['vx'], ref_list['vx']))
+ assert np.not_equal(msc.ref_table['vx'], ref_list['vx']).any()
+
+ return msc
+
+def test_MosaicToRef_vel():
make_fake_starlists_poly1_vel(seed=42)
ref_file = 'random_vel_ref.fits'
@@ -222,14 +343,18 @@ def test_MosaicToRef():
'random_vel_1.fits',
'random_vel_2.fits',
'random_vel_3.fits']
+ #'random_vel_4.fits',
+ #'random_vel_5.fits',
+ #'random_vel_6.fits',
+ #'random_vel_7.fits']
ref_list = Table.read(ref_file)
# Convert velocities to arcsec/yr
ref_list['vx'] *= 1e-3
ref_list['vy'] *= 1e-3
- ref_list['vxe'] *= 1e-3
- ref_list['vye'] *= 1e-3
+ ref_list['vx_err'] *= 1e-3
+ ref_list['vy_err'] *= 1e-3
# Switch our list to a "increasing to the West" list.
ref_list['x0'] *= -1.0
@@ -239,10 +364,10 @@ def test_MosaicToRef():
msc = align.MosaicToRef(ref_list, lists, iters=2,
dr_tol=[0.2, 0.1], dm_tol=[1, 0.5],
+ outlier_tol=[None, None],
trans_class=transforms.PolyTransform,
- trans_args={'order': 2}, use_vel=True,
+ trans_args={'order': 1}, default_motion_model='Linear',
update_ref_orig=False, verbose=False)
-
msc.fit()
# Check our status columns
@@ -254,26 +379,111 @@ def test_MosaicToRef():
# The velocities should be almost the same as the input
# velocities since update_ref_orig == False.
- np.testing.assert_almost_equal(msc.ref_table['vx'], ref_list['vx'], 5)
- np.testing.assert_almost_equal(msc.ref_table['vy'], ref_list['vy'], 5)
+ assert (msc.ref_table['name']==ref_list['name']).all()
+ np.testing.assert_allclose(msc.ref_table['vx'], ref_list['vx'], rtol=1e-5)
+ np.testing.assert_allclose(msc.ref_table['vy'], ref_list['vy'], rtol=1e-5)
+
+ ##########
+ # Align and let velocities be free.
+ ##########
+ msc.update_ref_orig = 'periter'
+ msc.fit()
+
+ # The velocities should be almost the same (but not as close as before)
+ # as the input velocities since update_ref == True.
+ assert (msc.ref_table['name']==ref_list['name']).all()
+ np.testing.assert_allclose(msc.ref_table['vx'], ref_list['vx'], rtol=1e-1, atol=3e-4)
+ np.testing.assert_allclose(msc.ref_table['vy'], ref_list['vy'], rtol=1e-1, atol=3e-4)
+
+ # Also double check that they aren't exactly the same for the reference stars.
+ #assert np.any(np.not_equal(msc.ref_table['vx'], ref_list['vx']))
+ assert np.not_equal(msc.ref_table['vx'], ref_list['vx']).any()
+
+ return msc
+
+def test_MosaicToRef_acc():
+ make_fake_starlists_poly1_acc(seed=42)
+
+ ref_file = 'random_acc_ref.fits'
+ list_files = ['random_acc_0.fits',
+ 'random_acc_1.fits',
+ 'random_acc_2.fits',
+ 'random_acc_3.fits',
+ 'random_acc_4.fits',
+ 'random_acc_5.fits',
+ 'random_acc_6.fits',
+ 'random_acc_7.fits']
+
+ ref_list = Table.read(ref_file)
+
+ # Convert velocities to arcsec/yr
+ ref_list['vx0'] *= 1e-3
+ ref_list['vy0'] *= 1e-3
+ ref_list['vx0_err'] *= 1e-3
+ ref_list['vy0_err'] *= 1e-3
+
+ # Convert accelerations to arcsec/yr**2
+ ref_list['ax'] *= 1e-3
+ ref_list['ay'] *= 1e-3
+ ref_list['ax_err'] *= 1e-3
+ ref_list['ay_err'] *= 1e-3
+
+ # Switch our list to a "increasing to the West" list.
+ ref_list['x0'] *= -1.0
+ ref_list['vx0'] *= -1.0
+ ref_list['ax'] *= -1.0
+
+ lists = [starlists.StarList.read(lf) for lf in list_files]
+
+ msc = align.MosaicToRef(ref_list, lists, iters=2,
+ dr_tol=[0.4, 0.2], dm_tol=[1, 0.5],
+ trans_class=transforms.PolyTransform,
+ trans_args={'order': 2},
+ default_motion_model='Acceleration',
+ update_ref_orig=False, verbose=False)
+
+ msc.fit()
+
+ # Check our status columns
+ assert 'use_in_trans' in msc.ref_table.colnames
+ assert 'used_in_trans' in msc.ref_table.colnames
+ assert 'ref_orig' in msc.ref_table.colnames
+ assert msc.ref_table['use_in_trans'].shape == msc.ref_table['x0'].shape
+ assert msc.ref_table['used_in_trans'].shape == msc.ref_table['x'].shape
+ # The velocities should be almost the same as the input
+ # velocities since update_ref_orig == False.
+ i_orig, i_fit = [],[]
+ for i,star in enumerate(ref_list["name"]):
+ if star in msc.ref_table["name"]:
+ i_fit.append(np.where(msc.ref_table["name"]==star)[0][0])
+ i_orig.append(i)
+ np.testing.assert_allclose(msc.ref_table['ax'][i_fit], ref_list['ax'][i_orig], rtol=1e-5)
+ np.testing.assert_allclose(msc.ref_table['ay'][i_fit], ref_list['ay'][i_orig], rtol=1e-5)
##########
# Align and let velocities be free.
##########
- msc.update_ref_orig = True
+ msc.update_ref_orig = 'periter'
msc.fit()
# The velocities should be almost the same (but not as close as before)
# as the input velocities since update_ref == False.
- np.testing.assert_almost_equal(msc.ref_table['vx'], ref_list['vx'], 1)
- np.testing.assert_almost_equal(msc.ref_table['vy'], ref_list['vy'], 1)
+ i_orig, i_fit = [],[]
+ for i,star in enumerate(ref_list["name"]):
+ if star in msc.ref_table["name"]:
+ ix_fit = np.where(msc.ref_table["name"]==star)[0][0]
+ if ~np.isnan(msc.ref_table['ax'][ix_fit]):
+ i_orig.append(i)
+ i_fit.append(ix_fit)
+ np.testing.assert_allclose(msc.ref_table['ax'][i_fit], ref_list['ax'][i_orig], rtol=1e-1, atol=3e-4)
+ np.testing.assert_allclose(msc.ref_table['ay'][i_fit], ref_list['ay'][i_orig], rtol=1e-1, atol=3e-4)
# Also double check that they aren't exactly the same for the reference stars.
- assert np.any(np.not_equal(msc.ref_table['vx'], ref_list['vx']))
+ assert np.any(np.not_equal(msc.ref_table['ax'][:200], ref_list['ax'][:200]))
return msc
-
+
def make_fake_starlists_shifts():
N_stars = 200
@@ -328,51 +538,178 @@ def make_fake_starlists_poly1(seed=-1):
np.random.seed(seed=seed)
N_stars = 200
- x = np.random.rand(N_stars) * 1000
- y = np.random.rand(N_stars) * 1000
- m = (np.random.rand(N_stars) * 8) + 9
-
- sdx = np.argsort(m)
- x = x[sdx]
- y = y[sdx]
- m = m[sdx]
+
+ x0 = np.random.rand(N_stars) * 10.0 # arcsec (increasing to East)
+ y0 = np.random.rand(N_stars) * 10.0 # arcsec
+ x0e = np.random.randn(N_stars) * 5.0e-4 # arcsec
+ y0e = np.random.randn(N_stars) * 5.0e-4 # arcsec
+ m0 = (np.random.rand(N_stars) * 8) + 9 # mag
+ m0e = np.random.randn(N_stars) * 0.05 # mag
+ t0 = np.ones(N_stars) * 2019.5
+
+ # Make all the errors positive
+ x0e = np.abs(x0e)
+ y0e = np.abs(y0e)
+ m0e = np.abs(m0e)
name = ['star_{0:03d}'.format(ii) for ii in range(N_stars)]
- # Save original positions as reference (1st) list.
- fmt = '{0:10s} {1:5.2f} 2015.0 {2:9.4f} {3:9.4f} 0 0 0 0\n'
- _out = open('random_0.lis', 'w')
- for ii in range(N_stars):
- _out.write(fmt.format(name[ii], m[ii], x[ii], y[ii]))
- _out.close()
+ # Make an StarList
+ lis = starlists.StarList([name, m0, m0e, x0, x0e, y0, y0e, t0],
+ names = ('name', 'm0', 'm0_err', 'x0', 'x0_err', 'y0', 'y0_err', 't0'))
+
+ sdx = np.argsort(m0)
+ lis = lis[sdx]
+ # Save original positions as reference (1st) list
+ # in a StarList format (with velocities).
+ lis.write('random_ref.fits', overwrite=True)
##########
# Shifts
##########
# Make 4 new starlists with different shifts.
- transforms = [[[ 6.5, 0.99, 1e-5], [ 10.1, 1e-5, 0.99]],
- [[100.3, 0.98, 1e-5], [ 50.5, 9e-6, 1.001]],
- [[-30.0, 1.00, 1e-5], [-100.7, 2e-5, 0.999]],
- [[250.0, 0.97, 2e-5], [-250.0, 1e-5, 1.001]]]
+ times = [2018.5, 2019.0, 2019.5, 2020.0, 2020.5, 2021.0, 2021.5, 2022.0]
+ xy_trans = [[[ 6.5, 0.99, 1e-5], [ 10.1, 1e-5, 0.99]],
+ [[100.3, 0.98, 1e-5], [ 50.5, 9e-6, 1.001]],
+ [[ 0.0, 1.00, 0.0], [ 0.0, 0.0, 1.0]],
+ [[250.0, 0.97, 2e-5], [-250.0, 1e-5, 1.001]],
+ [[ 50.0, 1.01, 1e-5], [ -31.0, 1e-5, 1.000]],
+ [[ 78.0, 0.98, 0.0 ], [ 45.0, 9e-6, 1.001]],
+ [[-13.0, 0.99, 1e-5], [ 150, 2e-5, 1.002]],
+ [[ 94.0, 1.00, 9e-6], [-182.0, 0.0, 0.99]]]
+ mag_trans = [0.1, 0.4, 0.0, -0.3, 0.2, 0.0, -0.1, -0.3]
+
+ # Convert into pixels (undistorted) with the following info.
+ scale = 0.01 # arcsec / pix
+ shift = [1.0, 1.0] # pix
- for ss in range(len(shifts)):
- #transforms.PolyTransform2D(1, transforms[ss])
- xnew = x - shifts[ss][0]
- ynew = y - shifts[ss][1]
+ for ss in range(len(times)):
+ dt = times[ss] - lis['t0']
+
+ x = lis['x0']
+ y = lis['y0']
+ t = np.ones(N_stars) * times[ss]
+
+ # Convert into pixels
+ xp = (x / -scale) + shift[0] # -1 from switching to increasing to West (right)
+ yp = (y / scale) + shift[1]
+ xpe = lis['x0_err'] / scale
+ ype = lis['y0_err'] / scale
+
+ # Distort the positions
+ trans = transforms.PolyTransform(1, xy_trans[ss][0], xy_trans[ss][1], mag_offset=mag_trans[ss])
+ xd, yd = trans.evaluate(xp, yp)
+ md = trans.evaluate_mag(lis['m0'])
# Perturb with small errors (0.1 pix)
- xnew += np.random.randn(N_stars) * 0.1
- ynew += np.random.randn(N_stars) * 0.1
+ xd += np.random.randn(N_stars) * 0.1
+ yd += np.random.randn(N_stars) * 0.1
+ md += np.random.randn(N_stars) * 0.02
+ xde = xpe
+ yde = ype
+ mde = lis['m0_err']
- mnew = m + np.random.randn(N_stars) * 0.05
+ # Save the new list as a starlist.
+ new_lis = starlists.StarList([lis['name'], md, mde, xd, xde, yd, yde, t],
+ names=('name', 'm', 'me', 'x', 'xe', 'y', 'ye', 't'))
- _out = open('random_shift_{0:d}.lis'.format(ss+1), 'w')
- for ii in range(N_stars):
- _out.write(fmt.format(name[ii], mnew[ii], xnew[ii], ynew[ii]))
- _out.close()
+ new_lis.write('random_{0:d}.fits'.format(ss), overwrite=True)
- return shifts
+ return (xy_trans,mag_trans)
+
+def make_fake_starlists_poly0_vel(seed=-1):
+ # If seed >=0, then set random seed to that value
+ if seed >= 0:
+ np.random.seed(seed=seed)
+
+ N_stars = 200
+
+ x0 = np.random.rand(N_stars) * 10.0 # arcsec (increasing to East)
+ y0 = np.random.rand(N_stars) * 10.0 # arcsec
+ x0e = np.ones(N_stars) * 1.0e-4 # arcsec
+ y0e = np.ones(N_stars) * 1.0e-4 # arcsec
+ vx = np.random.randn(N_stars) * 5.0 # mas / yr
+ vy = np.random.randn(N_stars) * 5.0 # mas / yr
+ vxe = np.ones(N_stars) * 0.05 # mas / yr
+ vye = np.ones(N_stars) * 0.05 # mas / yr
+ m0 = (np.random.rand(N_stars) * 8) + 9 # mag
+ m0e = np.random.randn(N_stars) * 0.05 # mag
+ t0 = np.ones(N_stars) * 2019.5
+
+ # Make all the errors positive
+ x0e = np.abs(x0e)
+ y0e = np.abs(y0e)
+ m0e = np.abs(m0e)
+ vxe = np.abs(vxe)
+ vye = np.abs(vye)
+
+ name = ['star_{0:03d}'.format(ii) for ii in range(N_stars)]
+
+ # Make an StarList
+ lis = starlists.StarList([name, m0, m0e, x0, x0e, y0, y0e, vx, vxe, vy, vye, t0],
+ names = ('name', 'm0', 'm0_err', 'x0', 'x0_err', 'y0', 'y0_err',
+ 'vx', 'vx_err', 'vy', 'vy_err', 't0'))
+
+ sdx = np.argsort(m0)
+ lis = lis[sdx]
+
+ # Save original positions as reference (1st) list
+ # in a StarList format (with velocities).
+ lis.write('random_vel_ref.fits', overwrite=True)
+
+ ##########
+ # Propogate to new times and distort.
+ ##########
+ # Make 4 new starlists with different epochs and transformations.
+ times = [2018.5, 2019.0, 2019.5, 2020.0, 2020.5, 2021.0, 2021.5, 2022.0]
+ xy_trans = [[[ 6.5], [ 10.1]],
+ [[100.3], [ 50.5]],
+ [[ 0.0], [ 0.0]],
+ [[250.0], [-250.0]],
+ [[ 50.0], [ -31.0]],
+ [[ 78.0], [ 45.0]],
+ [[-13.0], [ 150]],
+ [[ 94.0], [-182.0]]]
+ mag_trans = [0.1, 0.4, 0.0, -0.3, 0.2, 0.0, -0.1, -0.3]
+
+ # Convert into pixels (undistorted) with the following info.
+ scale = 0.01 # arcsec / pix
+ shift = [1.0, 1.0] # pix
+
+ for ss in range(len(times)):
+ dt = times[ss] - lis['t0']
+
+ x = lis['x0'] + (lis['vx']/1e3) * dt
+ y = lis['y0'] + (lis['vy']/1e3) * dt
+ t = np.ones(N_stars) * times[ss]
+
+ # Convert into pixels
+ xp = (x / -scale) + shift[0] # -1 from switching to increasing to West (right)
+ yp = (y / scale) + shift[1]
+ xpe = lis['x0_err'] / scale
+ ype = lis['y0_err'] / scale
+
+ # Distort the positions
+ trans = transforms.PolyTransform(0, xy_trans[ss][0], xy_trans[ss][1], mag_offset=mag_trans[ss])
+ xd, yd = trans.evaluate(xp, yp)
+ md = trans.evaluate_mag(lis['m0'])
+
+ # Perturb with small errors (0.1 pix)
+ xd += np.random.randn(N_stars) * xpe
+ yd += np.random.randn(N_stars) * ype
+ md += np.random.randn(N_stars) * 0.02
+ xde = xpe
+ yde = ype
+ mde = lis['m0_err']
+
+ # Save the new list as a starlist.
+ new_lis = starlists.StarList([lis['name'], md, mde, xd, xde, yd, yde, t],
+ names=('name', 'm', 'me', 'x', 'xe', 'y', 'ye', 't'))
+
+ new_lis.write('random_vel_p0_{0:d}.fits'.format(ss), overwrite=True)
+
+ return (xy_trans, mag_trans)
def make_fake_starlists_poly1_vel(seed=-1):
@@ -384,12 +721,12 @@ def make_fake_starlists_poly1_vel(seed=-1):
x0 = np.random.rand(N_stars) * 10.0 # arcsec (increasing to East)
y0 = np.random.rand(N_stars) * 10.0 # arcsec
- x0e = np.random.randn(N_stars) * 5.0e-4 # arcsec
- y0e = np.random.randn(N_stars) * 5.0e-4 # arcsec
+ x0e = np.ones(N_stars) * 1.0e-4 # arcsec
+ y0e = np.ones(N_stars) * 1.0e-4 # arcsec
vx = np.random.randn(N_stars) * 5.0 # mas / yr
vy = np.random.randn(N_stars) * 5.0 # mas / yr
- vxe = np.random.randn(N_stars) * 0.1 # mas / yr
- vye = np.random.randn(N_stars) * 0.1 # mas / yr
+ vxe = np.ones(N_stars) * 0.05 # mas / yr
+ vye = np.ones(N_stars) * 0.05 # mas / yr
m0 = (np.random.rand(N_stars) * 8) + 9 # mag
m0e = np.random.randn(N_stars) * 0.05 # mag
t0 = np.ones(N_stars) * 2019.5
@@ -405,8 +742,8 @@ def make_fake_starlists_poly1_vel(seed=-1):
# Make an StarList
lis = starlists.StarList([name, m0, m0e, x0, x0e, y0, y0e, vx, vxe, vy, vye, t0],
- names = ('name', 'm0', 'm0e', 'x0', 'x0e', 'y0', 'y0e',
- 'vx', 'vxe', 'vy', 'vye', 't0'))
+ names = ('name', 'm0', 'm0_err', 'x0', 'x0_err', 'y0', 'y0_err',
+ 'vx', 'vx_err', 'vy', 'vy_err', 't0'))
sdx = np.argsort(m0)
lis = lis[sdx]
@@ -419,12 +756,16 @@ def make_fake_starlists_poly1_vel(seed=-1):
# Propogate to new times and distort.
##########
# Make 4 new starlists with different epochs and transformations.
- times = [2018.5, 2019.5, 2020.5, 2021.5]
+ times = [2018.5, 2019.0, 2019.5, 2020.0, 2020.5, 2021.0, 2021.5, 2022.0]
xy_trans = [[[ 6.5, 0.99, 1e-5], [ 10.1, 1e-5, 0.99]],
[[100.3, 0.98, 1e-5], [ 50.5, 9e-6, 1.001]],
- [[ 0.0, 1.00, 0.0], [ 0.0, 0.0, 1.0]],
- [[250.0, 0.97, 2e-5], [-250.0, 1e-5, 1.001]]]
- mag_trans = [0.1, 0.4, 0.0, -0.3]
+ [[ 0.0, 1.00, 0.0], [ 0.0, 0.0, 1.000]],
+ [[250.0, 1.01, 2e-5], [-250.0, 1e-5, 0.98]],
+ [[ 50.0, 1.01, 1e-5], [ -31.0, 1e-5, 1.000]],
+ [[ 78.0, 0.98, 0.0 ], [ 45.0, 9e-6, 1.001]],
+ [[-13.0, 0.99, 1e-5], [ 150, 2e-5, 1.002]],
+ [[ 94.0, 1.00, 9e-6], [-182.0, 0.0, 0.99]]]
+ mag_trans = [0.1, 0.4, 0.0, -0.3, 0.2, 0.0, -0.1, -0.3]
# Convert into pixels (undistorted) with the following info.
scale = 0.01 # arcsec / pix
@@ -440,8 +781,226 @@ def make_fake_starlists_poly1_vel(seed=-1):
# Convert into pixels
xp = (x / -scale) + shift[0] # -1 from switching to increasing to West (right)
yp = (y / scale) + shift[1]
- xpe = lis['x0e'] / scale
- ype = lis['y0e'] / scale
+ xpe = lis['x0_err'] / scale
+ ype = lis['y0_err'] / scale
+
+ # Distort the positions
+ trans = transforms.PolyTransform(1, xy_trans[ss][0], xy_trans[ss][1], mag_offset=mag_trans[ss])
+ xd, yd = trans.evaluate(xp, yp)
+ md = trans.evaluate_mag(lis['m0'])
+
+ # Perturb with small errors (0.1 mas)
+ xd += np.random.randn(N_stars) * xpe
+ yd += np.random.randn(N_stars) * ype
+ md += np.random.randn(N_stars) * 0.02
+ xde = xpe
+ yde = ype
+ mde = lis['m0_err']
+
+ # Save the new list as a starlist.
+ new_lis = starlists.StarList([lis['name'], md, mde, xd, xde, yd, yde, t],
+ names=('name', 'm', 'me', 'x', 'xe', 'y', 'ye', 't'))
+
+ new_lis.write('random_vel_{0:d}.fits'.format(ss), overwrite=True)
+
+ return (xy_trans, mag_trans)
+
+def make_fake_starlists_poly1_acc(seed=-1):
+ # If seed >=0, then set random seed to that value
+ if seed >= 0:
+ np.random.seed(seed=seed)
+
+ N_stars = 200
+
+ x0 = np.random.rand(N_stars) * 10.0 # arcsec (increasing to East)
+ y0 = np.random.rand(N_stars) * 10.0 # arcsec
+ x0e = np.ones(N_stars) * 1.0e-4 # arcsec
+ y0e = np.ones(N_stars) * 1.0e-4 # arcsec
+ vx = np.random.randn(N_stars) * 5.0 # mas / yr
+ vy = np.random.randn(N_stars) * 5.0 # mas / yr
+ vxe = np.ones(N_stars) * 0.1 # mas / yr
+ vye = np.ones(N_stars) * 0.1 # mas / yr
+ ax = np.random.randn(N_stars) * 0.5 # mas / yr^2
+ ay = np.random.randn(N_stars) * 0.5 # mas / yr^2
+ axe = np.ones(N_stars) * 0.01 # mas / yr^2
+ aye = np.ones(N_stars) * 0.01 # mas / yr^2
+ m0 = (np.random.rand(N_stars) * 8) + 9 # mag
+ m0e = np.random.randn(N_stars) * 0.05 # mag
+ t0 = np.ones(N_stars) * 2019.5
+
+ # Make all the errors positive
+ x0e = np.abs(x0e)
+ y0e = np.abs(y0e)
+ m0e = np.abs(m0e)
+ vxe = np.abs(vxe)
+ vye = np.abs(vye)
+ axe = np.abs(axe)
+ aye = np.abs(aye)
+
+ name = ['star_{0:03d}'.format(ii) for ii in range(N_stars)]
+
+ # Make an StarList
+ lis = starlists.StarList([name, m0, m0e,
+ x0, x0e, y0, y0e,
+ vx, vxe, vy, vye,
+ ax, axe, ay, aye,
+ t0],
+ names = ('name', 'm0', 'm0_err',
+ 'x0', 'x0_err', 'y0', 'y0_err',
+ 'vx0', 'vx0_err', 'vy0', 'vy0_err',
+ 'ax', 'ax_err', 'ay', 'ay_err',
+ 't0'))
+
+ sdx = np.argsort(m0)
+ lis = lis[sdx]
+
+ # Save original positions as reference (1st) list
+ # in a StarList format (with velocities).
+ lis.write('random_acc_ref.fits', overwrite=True)
+
+ ##########
+ # Propogate to new times and distort.
+ ##########
+ # Make 4 new starlists with different epochs and transformations.
+ times = [2018.5, 2019.0, 2019.5, 2020.0, 2020.5, 2021.0, 2021.5, 2022.0]
+ xy_trans = [[[ 6.5, 0.99, 1e-5], [ 10.1, 1e-5, 0.99]],
+ [[100.3, 0.98, 1e-5], [ 50.5, 9e-6, 1.001]],
+ [[ 0.0, 1.00, 0.0], [ 0.0, 0.0, 1.000]],
+ [[250.0, 0.97, 2e-5], [-250.0, 1e-5, 1.001]],
+ [[ 50.0, 1.01, 1e-5], [ -31.0, 1e-5, 1.000]],
+ [[ 78.0, 0.98, 0.0 ], [ 45.0, 9e-6, 1.001]],
+ [[-13.0, 0.99, 1e-5], [ 150, 2e-5, 1.002]],
+ [[ 94.0, 1.00, 9e-6], [-182.0, 0.0, 0.99]]]
+ mag_trans = [0.1, 0.4, 0.0, -0.3, 0.2, 0.0, -0.1, -0.3]
+
+ # Convert into pixels (undistorted) with the following info.
+ scale = 0.01 # arcsec / pix
+ shift = [1.0, 1.0] # pix
+
+ for ss in range(len(times)):
+ dt = times[ss] - lis['t0']
+
+ x = lis['x0'] + (lis['vx0']/1e3) * dt + 0.5*(lis['ax']/1e3) * dt**2
+ y = lis['y0'] + (lis['vy0']/1e3) * dt + 0.5*(lis['ay']/1e3) * dt**2
+ t = np.ones(N_stars) * times[ss]
+
+ # Convert into pixels
+ xp = (x / -scale) + shift[0] # -1 from switching to increasing to West (right)
+ yp = (y / scale) + shift[1]
+ xpe = lis['x0_err'] / scale
+ ype = lis['y0_err'] / scale
+
+ # Distort the positions
+ trans = transforms.PolyTransform(1, xy_trans[ss][0], xy_trans[ss][1], mag_offset=mag_trans[ss])
+ xd, yd = trans.evaluate(xp, yp)
+ md = trans.evaluate_mag(lis['m0'])
+
+ # Perturb with small errors (0.1 pix)
+ xd += np.random.randn(N_stars) * xpe
+ yd += np.random.randn(N_stars) * ype
+ md += np.random.randn(N_stars) * 0.02
+ xde = xpe
+ yde = ype
+ mde = lis['m0_err']
+
+ # Save the new list as a starlist.
+ new_lis = starlists.StarList([lis['name'], md, mde, xd, xde, yd, yde, t],
+ names=('name', 'm', 'me', 'x', 'xe', 'y', 'ye', 't'))
+
+ new_lis.write('random_acc_{0:d}.fits'.format(ss), overwrite=True)
+
+ return (xy_trans, mag_trans)
+
+def make_fake_starlists_poly1_par(seed=-1):
+ # If seed >=0, then set random seed to that value
+ if seed >= 0:
+ np.random.seed(seed=seed)
+
+ N_stars = 200
+
+ x0 = np.random.rand(N_stars) * 10.0 # arcsec (increasing to East)
+ y0 = np.random.rand(N_stars) * 10.0 # arcsec
+ x0e = np.random.randn(N_stars) * 5.0e-4 # arcsec
+ y0e = np.random.randn(N_stars) * 5.0e-4 # arcsec
+ vx = np.random.randn(N_stars) * 5.0 # mas / yr
+ vy = np.random.randn(N_stars) * 5.0 # mas / yr
+ vxe = np.random.randn(N_stars) * 0.1 # mas / yr
+ vye = np.random.randn(N_stars) * 0.1 # mas / yr
+ pi = np.random.randn(N_stars) * 0.5 # mas
+ pie = np.random.randn(N_stars) * 0.01 # mas
+ m0 = (np.random.rand(N_stars) * 8) + 9 # mag
+ m0e = np.random.randn(N_stars) * 0.05 # mag
+ t0 = np.ones(N_stars) * 2019.5
+
+ # Make all the errors positive
+ x0e = np.abs(x0e)
+ y0e = np.abs(y0e)
+ m0e = np.abs(m0e)
+ vxe = np.abs(vxe)
+ vye = np.abs(vye)
+ pie = np.abs(pie)
+
+ name = ['star_{0:03d}'.format(ii) for ii in range(N_stars)]
+
+ # Make an StarList
+ lis = starlists.StarList([name, m0, m0e,
+ x0, x0e, y0, y0e,
+ vx, vxe, vy, vye,
+ pi, pie,
+ t0],
+ names = ('name', 'm0', 'm0_err',
+ 'x0', 'x0_err', 'y0', 'y0_err',
+ 'vx', 'vx_err', 'vy', 'vy_err',
+ 'pi', 'pi_err',
+ 't0'))
+
+ sdx = np.argsort(m0)
+ lis = lis[sdx]
+
+ # Save original positions as reference (1st) list
+ # in a StarList format (with velocities).
+ lis.write('random_par_ref.fits', overwrite=True)
+
+ ##########
+ # Propogate to new times and distort.
+ ##########
+ # Make 4 new starlists with different epochs and transformations.
+ '''times = [2018.5, 2019.5, 2020.5, 2021.5]
+ xy_trans = [[[ 6.5, 0.99, 1e-5], [ 10.1, 1e-5, 0.99]],
+ [[100.3, 0.98, 1e-5], [ 50.5, 9e-6, 1.001]],
+ [[ 0.0, 1.00, 0.0], [ 0.0, 0.0, 1.0]],
+ [[250.0, 0.97, 2e-5], [-250.0, 1e-5, 1.001]]]
+ mag_trans = [0.1, 0.4, 0.0, -0.3]'''
+
+ times = [2018.5, 2019.0, 2019.5, 2020.0, 2020.5, 2021.0, 2021.5, 2022.0]
+ xy_trans = [[[ 6.5, 0.99, 1e-5], [ 10.1, 1e-5, 0.99]],
+ [[100.3, 0.98, 1e-5], [ 50.5, 9e-6, 1.001]],
+ [[ 0.0, 1.00, 0.0], [ 0.0, 0.0, 1.0]],
+ [[250.0, 0.97, 2e-5], [-250.0, 1e-5, 1.001]],
+ [[ 50.0, 1.00, 0.0], [ -31.0, 0.0, 1.000]],
+ [[ 78.0, 1.00, 0.0 ], [ 45.0, 0.0, 1.00]],
+ [[-13.0, 1.00, 0.0], [ 150, 0.0, 1.00]],
+ [[ 94.0, 1.00, 0.0], [-182.0, 0.0, 1.00]]]
+ mag_trans = [0.1, 0.4, 0.0, -0.3, 0.0, 0.0, 0.0, 0.0]
+
+ # Convert into pixels (undistorted) with the following info.
+ scale = 0.01 # arcsec / pix
+ shift = [1.0, 1.0] # pix
+
+ for ss in range(len(times)):
+ dt = times[ss] - lis['t0']
+
+ par_mod = motion_model.Parallax(PA=0,RA=18.0, Dec=-30.0)
+ par_mod_dat = par_mod.get_batch_pos_at_time(dt+lis['t0'], x0=lis['x0'],vx=lis['vx']/1e3, pi=lis['pi'],
+ y0=lis['y0'], vy=lis['vy']/1e3, t0=lis['t0'])
+ x,y = par_mod_dat[0], par_mod_dat[1]
+ t = np.ones(N_stars) * times[ss]
+
+ # Convert into pixels
+ xp = (x / -scale) + shift[0] # -1 from switching to increasing to West (right)
+ yp = (y / scale) + shift[1]
+ xpe = lis['x0_err'] / scale
+ ype = lis['y0_err'] / scale
# Distort the positions
trans = transforms.PolyTransform(1, xy_trans[ss][0], xy_trans[ss][1], mag_offset=mag_trans[ss])
@@ -454,15 +1013,16 @@ def make_fake_starlists_poly1_vel(seed=-1):
md += np.random.randn(N_stars) * 0.02
xde = xpe
yde = ype
- mde = lis['m0e']
+ mde = lis['m0_err']
# Save the new list as a starlist.
new_lis = starlists.StarList([lis['name'], md, mde, xd, xde, yd, yde, t],
names=('name', 'm', 'me', 'x', 'xe', 'y', 'ye', 't'))
- new_lis.write('random_vel_{0:d}.fits'.format(ss), overwrite=True)
+ new_lis.write('random_par_{0:d}.fits'.format(ss), overwrite=True)
return (xy_trans, mag_trans)
+
def test_MosaicToRef_hst_me():
"""
@@ -509,11 +1069,11 @@ def test_MosaicToRef_hst_me():
outlier_tol=[None], mag_lim=[13, 21],
trans_class=transforms.PolyTransform,
trans_args=[{'order': 1}],
- use_vel=False,
+ default_motion_model='Fixed',
use_ref_new=False,
update_ref_orig=False,
mag_trans=False,
- weights='both,std',
+ trans_weights='both,std',
init_guess_mode='miracle', verbose=False)
msc.fit()
tab = msc.ref_table
@@ -546,7 +1106,7 @@ def test_bootstrap():
outlier_tol = None
mag_lim = None
ref_mag_lim = None
- weights = 'both,var'
+ trans_weights = 'both,var'
mag_trans = False
n_boot = 15
@@ -560,8 +1120,8 @@ def test_bootstrap():
mag_trans=mag_trans,
mag_lim=mag_lim,
ref_mag_lim=ref_mag_lim,
- weights=weights,
- use_vel=True,
+ trans_weights=trans_weights,
+ default_motion_model='Linear',
use_ref_new=False,
update_ref_orig=False,
init_guess_mode='name',
@@ -576,12 +1136,12 @@ def test_bootstrap():
# Run bootstrap: no boot_epochs_min
match1.calc_bootstrap_errors(n_boot=n_boot, boot_epochs_min=boot_epochs_min)
-
# Make sure columns exist, and none of them are nan values
assert np.sum(np.isnan(match1.ref_table['xe_boot'])) == 0
assert np.sum(np.isnan(match1.ref_table['ye_boot'])) == 0
- assert np.sum(np.isnan(match1.ref_table['vxe_boot'])) == 0
- assert np.sum(np.isnan(match1.ref_table['vye_boot'])) == 0
+ assert np.sum(np.isnan(match1.ref_table['vx_err_boot'])) == 0
+ assert np.sum(np.isnan(match1.ref_table['vy_err_boot'])) == 0
+ #pdb.set_trace()
# Test 2: make sure boot_epochs_min is working
# Eliminate some rows to list2, so some stars are only in 1 epoch.
@@ -595,8 +1155,8 @@ def test_bootstrap():
mag_trans=mag_trans,
mag_lim=mag_lim,
ref_mag_lim=ref_mag_lim,
- weights=weights,
- use_vel=True,
+ trans_weights=trans_weights,
+ default_motion_model='Linear',
use_ref_new=False,
update_ref_orig=False,
init_guess_mode='name',
@@ -620,14 +1180,14 @@ def test_bootstrap():
# For "good" stars: all bootstrap vals should be present
assert np.sum(np.isnan(out['xe_boot'][good])) == 0
assert np.sum(np.isnan(out['ye_boot'][good])) == 0
- assert np.sum(np.isnan(out['vxe_boot'][good])) == 0
- assert np.sum(np.isnan(out['vye_boot'][good])) == 0
+ assert np.sum(np.isnan(out['vx_err_boot'][good])) == 0
+ assert np.sum(np.isnan(out['vy_err_boot'][good])) == 0
# For "bad" stars, all bootstrap vals should be nans
assert np.sum(np.isfinite(out['xe_boot'][bad])) == 0
assert np.sum(np.isfinite(out['ye_boot'][bad])) == 0
- assert np.sum(np.isfinite(out['vxe_boot'][bad])) == 0
- assert np.sum(np.isfinite(out['vye_boot'][bad])) == 0
+ assert np.sum(np.isfinite(out['vx_err_boot'][bad])) == 0
+ assert np.sum(np.isfinite(out['vy_err_boot'][bad])) == 0
return
@@ -659,7 +1219,7 @@ def test_calc_vel_in_bootstrap():
outlier_tol = None
mag_lim = None
ref_mag_lim = None
- weights = 'both,var'
+ trans_weights = 'both,var'
mag_trans = False
n_boot = 15
@@ -673,8 +1233,8 @@ def test_calc_vel_in_bootstrap():
mag_trans=mag_trans,
mag_lim=mag_lim,
ref_mag_lim=ref_mag_lim,
- weights=weights,
- use_vel=True,
+ trans_weights=trans_weights,
+ default_motion_model='Linear',
use_ref_new=False,
update_ref_orig=False,
init_guess_mode='name',
@@ -692,15 +1252,15 @@ def test_calc_vel_in_bootstrap():
assert 'xe_boot' in match_vel.ref_table.keys()
assert np.sum(np.isnan(match_vel.ref_table['xe_boot'])) == 0
- assert 'vxe_boot' in match_vel.ref_table.keys()
- assert np.sum(np.isnan(match_vel.ref_table['vxe_boot'])) == 0
+ assert 'vx_err_boot' in match_vel.ref_table.keys()
+ assert np.sum(np.isnan(match_vel.ref_table['vx_err_boot'])) == 0
# Run without calc_vel_in_bootstrap, make sure velocities are NOT calculated
match.calc_bootstrap_errors(n_boot=n_boot, calc_vel_in_bootstrap=False)
assert 'xe_boot' in match.ref_table.keys()
assert np.sum(np.isnan(match.ref_table['xe_boot'])) == 0
- assert 'vxe_boot' not in match.ref_table.keys()
+ assert 'vx_err_boot' not in match.ref_table.keys()
return
@@ -727,7 +1287,7 @@ def test_transform_xym():
outlier_tol = None
mag_lim = None
ref_mag_lim = None
- weights = 'both,var'
+ trans_weights = 'both,var'
n_boot = 15
mag_trans = False
@@ -740,8 +1300,8 @@ def test_transform_xym():
mag_trans=mag_trans,
mag_lim=mag_lim,
ref_mag_lim=ref_mag_lim,
- weights=weights,
- use_vel=False,
+ trans_weights=trans_weights,
+ default_motion_model='Fixed',
use_ref_new=False,
update_ref_orig=False,
init_guess_mode='name',
@@ -775,8 +1335,8 @@ def test_transform_xym():
mag_trans=mag_trans,
mag_lim=mag_lim,
ref_mag_lim=ref_mag_lim,
- weights=weights,
- use_vel=False,
+ trans_weights=trans_weights,
+ default_motion_model='Fixed',
use_ref_new=False,
update_ref_orig=False,
init_guess_mode='name',
@@ -809,7 +1369,7 @@ def test_MosaicToRef_mag_bug():
"""
make_fake_starlists_poly1_vel()
- ref_list = starlists.StarList.from_lis_file('random_0.lis', error=False)
+ ref_list = starlists.StarList.read('random_vel_0.fits')
lists = [ref_list]
msc = align.MosaicToRef(ref_list, lists,
@@ -819,7 +1379,7 @@ def test_MosaicToRef_mag_bug():
outlier_tol=None,
trans_class=transforms.PolyTransform,
trans_args=[{'order': 1}],
- use_vel=False,
+ default_motion_model='Fixed',
use_ref_new=False,
update_ref_orig=False,
verbose=True)
@@ -853,8 +1413,8 @@ def test_masked_cols():
# Coordinates are arcsecs offset +x to the East.
targets_dict = {'ob150029': [0.0, 0.0],
- 'S11_15_3.9': [ 1.13982, 3.73524],
- 'S13_13_4.5': [-4.42878, 0.03100]
+ 'S005': [1.1416, 3.7405],
+ 'S002': [-4.421, 0.027]
}
# Get gaia catalog stars. Note that this produces a masked column table
@@ -882,7 +1442,7 @@ def test_masked_cols():
dr_tol=[0.2, 0.1], dm_tol=[1, 1],
trans_class=transforms.PolyTransform,
trans_args=[{'order': 1}, {'order': 1}],
- use_vel=True,
+ default_motion_model='Linear',
use_ref_new=False,
update_ref_orig=False,
mag_trans=True,
diff --git a/flystar/tests/test_all_detected.fits b/flystar/tests/test_all_detected.fits
new file mode 100644
index 0000000..ae56198
--- /dev/null
+++ b/flystar/tests/test_all_detected.fits
@@ -0,0 +1,2911 @@
+SIMPLE = T / conforms to FITS standard BITPIX = 8 / array data type NAXIS = 0 / number of array dimensions EXTEND = T END XTENSION= 'BINTABLE' / binary table extension BITPIX = 8 / array data type NAXIS = 2 / number of array dimensions NAXIS1 = 632 / length of dimension 1 NAXIS2 = 2000 / length of dimension 2 PCOUNT = 0 / number of group parameters GCOUNT = 1 / number of groups TFIELDS = 21 / number of table fields TTYPE1 = 'name ' TFORM1 = 'K ' TTYPE2 = 'x ' TFORM2 = '12D ' TDIM2 = '(2,6) ' TTYPE3 = 'y ' TFORM3 = '12D ' TDIM3 = '(2,6) ' TTYPE4 = 'm ' TFORM4 = '12D ' TDIM4 = '(2,6) ' TTYPE5 = 'xe ' TFORM5 = '6D ' TDIM5 = '(6) ' TTYPE6 = 'ye ' TFORM6 = '6D ' TDIM6 = '(6) ' TTYPE7 = 'me ' TFORM7 = '6D ' TDIM7 = '(6) ' TTYPE8 = 'n ' TFORM8 = '6D ' TDIM8 = '(6) ' TTYPE9 = 'det ' TFORM9 = '6D ' TDIM9 = '(6) ' TTYPE10 = 'vx ' TFORM10 = 'D ' TTYPE11 = 'vy ' TFORM11 = 'D ' TTYPE12 = 'vxe ' TFORM12 = 'D ' TTYPE13 = 'vye ' TFORM13 = 'D ' TTYPE14 = 'x0 ' TFORM14 = 'D ' TTYPE15 = 'y0 ' TFORM15 = 'D ' TTYPE16 = 'x0e ' TFORM16 = 'D ' TTYPE17 = 'y0e ' TFORM17 = 'D ' TTYPE18 = 'chi2_vx ' TFORM18 = 'D ' TTYPE19 = 'chi2_vy ' TFORM19 = 'D ' TTYPE20 = 't0 ' TFORM20 = 'D ' TTYPE21 = 'n_vfit ' TFORM21 = 'D ' EPNAMES = '2005_F814W_F1' EPNAMES = '2010_F125W_F3' EPNAMES = '2010_F139M_F2' EPNAMES = '2010_F160W_F1' EPNAMES = '2013_F160W_F1' EPNAMES = '2015_F160W_F1' ZPOINTS = 32.6783 ZPOINTS = 25.2305 ZPOINTS = 23.2835 ZPOINTS = 24.5698 ZPOINTS = 24.5698 ZPOINTS = 24.5698 YEARS = 2005.485 YEARS = 2010.652 YEARS = 2010.652 YEARS = 2010.652 YEARS = 2013.199 YEARS = 2015.148 HIERARCH DATE PRODUCED = '2025-06-30' HIERARCH INSTRUMENT = 'ACSWFC ' HIERARCH INSTRUMENT = 'WFC3IR ' HIERARCH INSTRUMENT = 'WFC3IR ' HIERARCH INSTRUMENT = 'WFC3IR ' HIERARCH INSTRUMENT = 'WFC3IR ' HIERARCH INSTRUMENT = 'WFC3IR ' END @
1&y@
c+(@
1&y@4U*@
1&y@OS@
1&y@ŕ@
1&y@
!@
1&y@]H/@nzG@ns2ph@nzG@n:t@nzG@mI@nzG@mm@nzG@nb3@nzG@ns@8䎊@8m1@4S@3!d@3~"@3Q@䩤@2@2h4Z@2@2ĊRd@2@2&EK?hjaQ?*?iy?Û?
+Ld?OU=6i?/nI|??`l??!g?'?χ1?# ?jo?/O?ޥe?.Eôv?[\@ @" @ @" @4 @. ? ? ? ? ? ? ?Ek ?mo ?zS ?T8O@@n?/??Cs?9wZe`?3#@溦z@k%>@ @`ě@\1'@`ě@Q4K@`ě@Mw1@`ě@G@`ě@:6@`ě@4j~@ۊ=p@ێV@ۊ=p@{lD@ۊ=p@۞Q@ۊ=p@ہTɅ@ۊ=p@ۙb@ۊ=p@cA@6=:@6:)^@4hr@4SMj@3`A@3\(@3._o @3:L/|@3._o @3BC,@3._o @3G?Ol?.5?{?d`Xp?͵?>;?
?>%?:?Җhn?|9.)?@~?
+B?7ly\?J鞤?Jf?8? J6Л@ @ @ @ @, @( ? ? ? ? ? ? C &Ԡ ?*2iۂA?Y領~@OûZ@یc?D?tN'p?{Q(?@bn{@ @+. @+. @+. @+. @+@٦@+@ k@(6E. @(6E. @(6E. @(6E. @(6E@)Q@(6E@!p<@8s.>@4S.Mm@3`A7.Qn@2YJ.NC,@2YJ@2>@2YJ@1E2a|@8 J@8 #@8 :@8 >+?BxT?g{=@8 J@8 @8 i@8 ?VYk?Պu@8 p@8
*@8 p@8 ?Z?\ @ @ ? ? @zG@w@zG@rGF@zG@s@zG@=b@zG@*0@zG@X@շKƧ@ռ(@շKƧ@7@շKƧ@շX@շKƧ@շ@շKƧ@նz@շKƧ@ո}H@8g l@8\N@4hr @4&@4"-V@4*͞&@3B@5@3GKƧ@3B@5@3G@3B@5@3H9Xb?q!U?+W?](s?A2x?wX?>V$?TU?[G,?ҌI?,#t?s?|[z?ӖO_?[
S? e?Za7?Us?DΊ@ @ @ @ @* @( ? ? ? ? ? ? ?VM Bx ?QԬy!?Bex.@W.V@ոAA?nɢf?[~?u?+\t@oF5i@ @EQ@9R4@EQ@G2@EQ@?'-9@EQ@DqN@EQ@D@EQ@FW@/j~#@/,l@/j~#@/i3ߢ@/j~#@/qjK>h@/j~#@/fX@/j~#@/m*@/j~#@/uA@8g l@8u@2r Ĝ@2QU|@2gKƧ@2l76@1&@1"@1&@1[@1&@1} t?ڢ??b r}?N[x?},A? J?P*i?6 k?ZU?1O}?=е?zpY?i?V0qRi?@&pp??~?zA?Ad`@ @ @&