diff --git a/c/include/digital_rf.h b/c/include/digital_rf.h index d0c4b46..583772c 100644 --- a/c/include/digital_rf.h +++ b/c/include/digital_rf.h @@ -132,6 +132,10 @@ EXPORT int digital_rf_write_blocks_hdf5( uint64_t, long double, int*, int*, int*, int*, int*, int*, uint64_t*); extern "C" EXPORT int digital_rf_get_unix_time_rational( uint64_t, uint64_t, uint64_t, int*, int*, int*, int*, int*, int*, uint64_t*); + extern "C" EXPORT int digital_rf_get_timestamp_floor( + uint64_t, uint64_t, uint64_t, uint64_t*, uint64_t*); + extern "C" EXPORT int digital_rf_get_sample_ceil( + uint64_t, uint64_t, uint64_t, uint64_t, uint64_t*); extern "C" EXPORT Digital_rf_write_object * digital_rf_create_write_hdf5( char*, hid_t, uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, char *, int, int, int, int, int, int); extern "C" EXPORT int digital_rf_write_hdf5(Digital_rf_write_object*, uint64_t, void*,uint64_t); @@ -149,6 +153,10 @@ EXPORT int digital_rf_write_blocks_hdf5( uint64_t sample_rate_numerator, uint64_t sample_rate_denominator, int * year, int * month, int *day, int * hour, int * minute, int * second, uint64_t * picosecond); + EXPORT int digital_rf_get_timestamp_floor( + uint64_t sample_index, uint64_t sample_rate_numerator, uint64_t sample_rate_denominator, uint64_t * second, uint64_t * picosecond); + EXPORT int digital_rf_get_sample_ceil( + uint64_t second, uint64_t picosecond, uint64_t sample_rate_numerator, uint64_t sample_rate_denominator, uint64_t * sample_index); EXPORT Digital_rf_write_object * digital_rf_create_write_hdf5( char * directory, hid_t dtype_id, uint64_t subdir_cadence_secs, uint64_t file_cadence_millisecs, uint64_t global_start_sample, @@ -166,10 +174,6 @@ EXPORT int digital_rf_write_blocks_hdf5( #endif /* Private method declarations */ -int digital_rf_get_timestamp_floor(uint64_t sample_index, uint64_t sample_rate_numerator, - uint64_t sample_rate_denominator, uint64_t * second, uint64_t * picosecond); -int digital_rf_get_sample_ceil(uint64_t second, uint64_t picosecond, - uint64_t sample_rate_numerator, uint64_t sample_rate_denominator, uint64_t * sample_index); int digital_rf_get_time_parts(time_t unix_second, int * year, int * month, int *day, int * hour, int * minute, int * second); int digital_rf_get_subdir_file(Digital_rf_write_object *hdf5_data_object, uint64_t global_sample, diff --git a/c/include/digital_rf_version.h b/c/include/digital_rf_version.h index d7eca6f..6e8e72b 100644 --- a/c/include/digital_rf_version.h +++ b/c/include/digital_rf_version.h @@ -1,2 +1,2 @@ // library version, increment to match package version when C interface changes -#define DIGITAL_RF_VERSION "2.6.0" +#define DIGITAL_RF_VERSION "2.7.0" diff --git a/conda-forge.yml b/conda-forge.yml index 1ea5390..47aa183 100644 --- a/conda-forge.yml +++ b/conda-forge.yml @@ -26,6 +26,8 @@ # documentation on possible keys and values. build_platform: + linux_aarch64: linux_64 + #linux_ppc64le: linux_64 osx_arm64: osx_64 linux_aarch64: linux_64 clone_depth: 0 @@ -33,8 +35,6 @@ github_actions: store_build_artifacts: true provider: linux_64: github_actions - linux_aarch64: github_actions - #linux_ppc64le: github_actions osx_64: github_actions win_64: github_actions recipe_dir: recipes/conda diff --git a/news/integer_math_python.rst b/news/integer_math_python.rst new file mode 100644 index 0000000..a5538de --- /dev/null +++ b/news/integer_math_python.rst @@ -0,0 +1,26 @@ +**Added:** + +* The Digital RF and Digital Metadata reader objects now provide a ``sample_rate`` property that represents the rational sample rate as a ``fractions.Fraction`` object. The ``samples_per_second`` property of ```np.longdouble``` dtype still exists for backwards compatibility, but new code should use ``sample_rate`` instead. +* Add new ``digital_rf_get_timestamp_floor`` and ``digital_rf_get_sample_ceil`` C functions that can be used to convert between a timestamp and sample index, given the rational sample rate. These have been made available so that users can perform these calculations in a way that is consistent with what is done internally. +* Add new ``datetime_to_timedelta_tuple``, ``get_samplerate_frac``, ``sample_to_time_floor``, ``time_to_sample_ceil`` Python utility functions for datetime, timestamp, and sample index math. These match the new C functions. + +**Changed:** + +* All internal code has been updated so that sample rate calculations use a rational representation instead of a ``np.longdouble`` floating point. + +**Deprecated:** + +* Deprecate ``samples_to_timedelta`` utility function. Use ``sample_to_time_floor`` instead and create a timedelta object if necessary: ``datetime.timedelta(seconds=seconds, microseconds=picoseconds // 1000000)``. +* Deprecate ``time_to_sample`` utility function. Use ``time_to_sample_ceil`` instead in combination with ``datetime_to_timedelta_tuple`` if necessary. + +**Removed:** + +* + +**Fixed:** + +* + +**Security:** + +* diff --git a/python/digital_rf/digital_metadata.py b/python/digital_rf/digital_metadata.py index 407ad1f..fbd3d75 100644 --- a/python/digital_rf/digital_metadata.py +++ b/python/digital_rf/digital_metadata.py @@ -38,7 +38,7 @@ from six.moves import urllib, zip # local imports -from . import list_drf +from . import list_drf, util from ._version import __version__, __version_tuple__ try: @@ -194,6 +194,10 @@ def __init__( raise ValueError(errstr % str(sample_rate_denominator)) self._sample_rate_denominator = int(sample_rate_denominator) + self._sample_rate = util.get_samplerate_frac( + sample_rate_numerator, sample_rate_denominator + ) + # have to go to uint64 before longdouble to ensure correct conversion # from int self._samples_per_second = np.longdouble( @@ -209,6 +213,10 @@ def __init__( self._fields = None # No data written yet self._write_properties() + def get_sample_rate(self): + """Return the sample rate in Hz as a fractions.Fraction.""" + return self._sample_rate + def get_samples_per_second(self): """Return the sample rate in Hz as a np.longdouble.""" return self._samples_per_second @@ -333,7 +341,7 @@ def _sample_group_generator(self, samples): Digital Metadata file and takes its name from the sample index. """ - samples_per_file = self._file_cadence_secs * self._samples_per_second + samples_per_file = self._file_cadence_secs * self._sample_rate for file_idx, sample_group in itertools.groupby( samples, lambda s: np.uint64(s / samples_per_file) ): @@ -472,7 +480,7 @@ def __str__(self): attr_list = ( "_subdir_cadence_secs", "_file_cadence_secs", - "_samples_per_second", + "_sample_rate", "_file_name", ) for attr in attr_list: @@ -587,6 +595,9 @@ def __init__(self, metadata_dir, accept_empty=True): self._samples_per_second = np.longdouble( np.uint64(self._sample_rate_numerator) ) / np.longdouble(np.uint64(self._sample_rate_denominator)) + self._sample_rate = util.get_samplerate_frac( + self._sample_rate_numerator, self._sample_rate_denominator + ) fname = f.attrs["file_name"] if isinstance(fname, bytes): # for convenience and forward-compatibility with h5py>=2.9 @@ -714,6 +725,10 @@ def get_fields(self): # _fields is an internal data structure, so make a copy for the user return copy.deepcopy(self._fields) + def get_sample_rate(self): + """Return the sample rate in Hz as a fractions.Fraction.""" + return self._sample_rate + def get_sample_rate_numerator(self): """Return the numerator of the sample rate in Hz.""" return self._sample_rate_numerator @@ -1020,9 +1035,8 @@ def _get_file_list(self, sample0, sample1): scheme. """ - # need to go through numpy uint64 to prevent conversion to float - start_ts = int(np.uint64(np.uint64(sample0) / self._samples_per_second)) - end_ts = int(np.uint64(np.uint64(sample1) / self._samples_per_second)) + start_ts, picoseconds = util.sample_to_time_floor(sample0, self._sample_rate) + end_ts, picoseconds = util.sample_to_time_floor(sample1, self._sample_rate) # convert ts to be divisible by self._file_cadence_secs start_ts = (start_ts // self._file_cadence_secs) * self._file_cadence_secs @@ -1207,7 +1221,7 @@ def __str__(self): attr_list = ( "_subdir_cadence_secs", "_file_cadence_secs", - "_samples_per_second", + "_sample_rate", "_file_name", ) for attr in attr_list: diff --git a/python/digital_rf/digital_rf_hdf5.py b/python/digital_rf/digital_rf_hdf5.py index 4b57f1c..50493d8 100644 --- a/python/digital_rf/digital_rf_hdf5.py +++ b/python/digital_rf/digital_rf_hdf5.py @@ -20,7 +20,6 @@ import collections import datetime -import fractions import glob import os import re @@ -34,7 +33,7 @@ import six # local imports -from . import _py_rf_write_hdf5, digital_metadata, list_drf +from . import _py_rf_write_hdf5, digital_metadata, list_drf, util from ._version import __version__, __version_tuple__ __all__ = ( @@ -969,11 +968,11 @@ def read(self, start_sample, end_sample, channel_name, sub_channel=None): # first get the names of all possible files with data subdir_cadence_secs = file_properties["subdir_cadence_secs"] file_cadence_millisecs = file_properties["file_cadence_millisecs"] - samples_per_second = file_properties["samples_per_second"] + sample_rate = file_properties["sample_rate"] filepaths = self._get_file_list( start_sample, end_sample, - samples_per_second, + sample_rate, subdir_cadence_secs, file_cadence_millisecs, ) @@ -1080,7 +1079,7 @@ def get_properties(self, channel_name, sample=None): num_subchannels : int sample_rate_numerator : int sample_rate_denominator : int - samples_per_second : np.longdouble + samples_per_second : np.longdouble (don't rely on this!) subdir_cadence_secs : int The additional properties particular to each file are: @@ -1102,12 +1101,12 @@ def get_properties(self, channel_name, sample=None): subdir_cadence_secs = global_properties["subdir_cadence_secs"] file_cadence_millisecs = global_properties["file_cadence_millisecs"] - samples_per_second = global_properties["samples_per_second"] + sample_rate = global_properties["sample_rate"] file_list = self._get_file_list( sample, sample, - samples_per_second, + sample_rate, subdir_cadence_secs, file_cadence_millisecs, ) @@ -1229,15 +1228,17 @@ def read_metadata(self, start_sample, end_sample, channel_name, method="ffill"): For convenience, some pertinent metadata inherent to the Digital RF channel is added to the Digital Metadata, including: + sample_rate : fractions.Fraction sample_rate_numerator : int sample_rate_denominator : int - samples_per_second : np.longdouble + samples_per_second : np.longdouble (don't rely on this!) """ properties = self.get_properties(channel_name) added_metadata = { key: properties[key] for key in ( + "sample_rate", "sample_rate_numerator", "sample_rate_denominator", "samples_per_second", @@ -1301,11 +1302,11 @@ def get_continuous_blocks(self, start_sample, end_sample, channel_name): file_properties = self.get_properties(channel_name) subdir_cadence_secs = file_properties["subdir_cadence_secs"] file_cadence_millisecs = file_properties["file_cadence_millisecs"] - samples_per_second = file_properties["samples_per_second"] + sample_rate = file_properties["sample_rate"] filepaths = self._get_file_list( start_sample, end_sample, - samples_per_second, + sample_rate, subdir_cadence_secs, file_cadence_millisecs, ) @@ -1345,11 +1346,11 @@ def get_last_write(self, channel_name): file_properties = self.get_properties(channel_name) subdir_cadence_seconds = file_properties["subdir_cadence_secs"] file_cadence_millisecs = file_properties["file_cadence_millisecs"] - samples_per_second = file_properties["samples_per_second"] + sample_rate = file_properties["sample_rate"] file_list = self._get_file_list( last_sample - 1, last_sample, - samples_per_second, + sample_rate, subdir_cadence_seconds, file_cadence_millisecs, ) @@ -1602,7 +1603,7 @@ def read_vector_c81d( def _get_file_list( sample0, sample1, - samples_per_second, + sample_rate, subdir_cadence_seconds, file_cadence_millisecs, ): @@ -1622,8 +1623,8 @@ def _get_file_list( Sample index for end of read (inclusive), given in the number of samples since the epoch (time_since_epoch*sample_rate). - samples_per_second : np.longdouble - Sample rate. + sample_rate : fractions.Fraction | first argument to ``util.get_samplerate_frac`` + Sample rate in Hz. subdir_cadence_secs : int Number of seconds of data found in one subdir. For example, 3600 @@ -1644,13 +1645,10 @@ def _get_file_list( if (sample1 - sample0) > 1e12: warnstr = "Requested read size, %i samples, is very large" warnings.warn(warnstr % (sample1 - sample0), RuntimeWarning) - sample0 = int(sample0) - sample1 = int(sample1) - # need to go through numpy uint64 to prevent conversion to float - start_ts = int(np.uint64(np.uint64(sample0) / samples_per_second)) - end_ts = int(np.uint64(np.uint64(sample1) / samples_per_second)) + 1 - start_msts = int(np.uint64(np.uint64(sample0) / samples_per_second * 1000)) - end_msts = int(np.uint64(np.uint64(sample1) / samples_per_second * 1000)) + start_ts, picoseconds = util.sample_to_time_floor(sample0, sample_rate) + start_msts = start_ts * 1000 + picoseconds // 1000000000 + end_ts, picoseconds = util.sample_to_time_floor(sample1, sample_rate) + end_msts = end_ts * 1000 + picoseconds // 1000000000 # get subdirectory start and end ts start_sub_ts = int( @@ -1811,8 +1809,9 @@ def __init__(self, channel_name, top_level_dir_meta_list=None): """Create a new _channel_properties object. This populates `self.properties`, which is a dictionary of - attributes found in the HDF5 files (eg, samples_per_second). It also - sets the attribute `max_samples_per_file`. + attributes found in the HDF5 files (e.g. sample_rate_numerator / + sample_rate_denominator). It also sets the attribute + `max_samples_per_file`. Parameters @@ -1830,10 +1829,11 @@ def __init__(self, channel_name, top_level_dir_meta_list=None): self.top_level_dir_meta_list = top_level_dir_meta_list self.properties = self._read_properties() file_cadence_millisecs = self.properties["file_cadence_millisecs"] - samples_per_second = self.properties["samples_per_second"] - self.max_samples_per_file = int( - np.uint64(np.ceil(file_cadence_millisecs * samples_per_second / 1000)) - ) + sample_rate_numerator = self.properties["sample_rate_numerator"] + sample_rate_denominator = self.properties["sample_rate_denominator"] + num = file_cadence_millisecs * sample_rate_numerator + den = 1000 * sample_rate_denominator + self.max_samples_per_file = num // den + (num % den != 0) def _read_properties(self): """Get a dict of the properties stored in the drf_properties.h5 file. @@ -1981,13 +1981,15 @@ def _read_properties(self): # if no sample_rate_numerator/sample_rate_denominator, then we must # have an older version with samples_per_second as uint64 sps = ret_dict["samples_per_second"] - spsfrac = fractions.Fraction(sps).limit_denominator() + spsfrac = util.get_samplerate_frac(sps) ret_dict["samples_per_second"] = np.longdouble(sps) ret_dict["sample_rate_numerator"] = spsfrac.numerator ret_dict["sample_rate_denominator"] = spsfrac.denominator + ret_dict["sample_rate"] = spsfrac else: sps = np.longdouble(np.uint64(srn)) / np.longdouble(np.uint64(srd)) ret_dict["samples_per_second"] = sps + ret_dict["sample_rate"] = util.get_samplerate_frac(srn, srd) # success return ret_dict diff --git a/python/digital_rf/util.py b/python/digital_rf/util.py index 641cab5..ac86fdb 100644 --- a/python/digital_rf/util.py +++ b/python/digital_rf/util.py @@ -7,36 +7,225 @@ # The full license is in the LICENSE file, distributed with this software. # ---------------------------------------------------------------------------- """Utility functions for Digital RF and Digital Metadata.""" -from __future__ import absolute_import, division, print_function + +from __future__ import annotations import ast import datetime +import fractions +import warnings import dateutil.parser import numpy as np - import six __all__ = ( + "datetime_to_timedelta_tuple", "datetime_to_timestamp", "epoch", + "get_samplerate_frac", "parse_identifier_to_sample", "parse_identifier_to_time", "sample_to_datetime", + "sample_to_time_floor", "samples_to_timedelta", "time_to_sample", + "time_to_sample_ceil", ) -epoch = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) +_default_epoch = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) +epoch = _default_epoch -def time_to_sample(time, samples_per_second): - """Get a sample index from a time using a given sample rate. +def get_samplerate_frac(sr_or_numerator, denominator=None): + """Convert argument sample rate to a rational Fraction. + + Arguments are passed directly to the fractions.Fraction class, and the denominator + of the result is limited to 32 bits. + + Parameters + ---------- + sr_or_numerator : int | float | numpy.number | Rational | Decimal | str + Sample rate in Hz, or the numerator of the sample rate if `denominator` is + given. Most numeric types are accepted, falling back to evaluating the argument + as a string if passing directly to fractions.Fraction fails. String arguments + can represent the sample rate or a rational expression like "123/456". + + denominator: int | Rational, optional + Denominator of the sample rate in Hz, if not None. Must be an integer or + a Rational type, as expected by the `denominator` argument of + fractions.Fraction. + + + Returns + ------- + frac : fractions.Fraction + Rational representation of the sample rate. + + """ + try: + frac = fractions.Fraction(sr_or_numerator, denominator) + except TypeError: + # try converting sr to str, then to fraction (works for np.longdouble) + sr_frac = fractions.Fraction(str(sr_or_numerator)) + frac = fractions.Fraction(sr_frac, denominator) + return frac.limit_denominator(2**32) + + +def time_to_sample_ceil(timedelta, sample_rate): + """Convert a timedelta into a number of samples using a given sample rate. + + Ceiling rounding is used so that the value is the whole number of samples + that spans *at least* the given `timedelta` but no more than + ``timedelta + 1 / sample_rate``. This complements the flooring in + `sample_to_time_floor`, so that:: + + time_to_sample_ceil(sample_to_time_floor(sample, sr), sr) == sample + Parameters ---------- + timedelta : (second, picosecond) tuple | np.timedelta64 | datetime.timedelta | float + Time span to convert to a number of samples, either scalar or array_like. + To represent large time spans with high accuracy, pass a 2-tuple containing + the number of whole seconds and additional picoseconds. Floating point + values are interpreted as a number of seconds. + + sample_rate : fractions.Fraction | first argument to ``get_samplerate_frac`` + Sample rate in Hz. + + + Returns + ------- + nsamples : array_like + Number of samples in the `timedelta` time span at a rate of + `sample_rate`, using ceiling rounding (up to the next whole sample). + + """ + if isinstance(timedelta, tuple): + t_sec, t_psec = timedelta + elif hasattr(timedelta, "dtype"): + if np.issubdtype(timedelta.dtype, "timedelta64"): + onesec = np.timedelta64(1, "s") + t_sec = timedelta // onesec + t_psec = (timedelta % onesec) // np.timedelta64(1, "ps") + else: + # floating point seconds + t_sec = np.int64(timedelta) + t_psec = np.int64(np.round((timedelta % 1) * 1e12)) + elif isinstance(timedelta, datetime.timedelta): + t_sec = int(timedelta.total_seconds()) + t_psec = 1000000 * timedelta.microseconds + else: + # float seconds + t_sec = int(timedelta) + t_psec = int(np.round((timedelta % 1) * 1e12)) + # ensure that sample_rate is a fractions.Fraction + if not isinstance(sample_rate, fractions.Fraction): + sample_rate = get_samplerate_frac(sample_rate) + + srn = sample_rate.numerator + srd = sample_rate.denominator + # calculate with divide/modulus split to avoid overflow + # (divide by denominator and track remainder *before* multiplying by numerator) + # sample_idx = t * n / d = (sec + (psec / 1e12)) * n / d + + # start with picosecond part + tmp_div = (t_psec // srd) * srn + tmp_mod = (t_psec % srd) * srn + tmp_div += tmp_mod // srd + tmp_mod = tmp_mod % srd + # quotient and remainder are in terms of (samples * 1e12) + quotient = tmp_div + remainder = tmp_mod # remainder w.r.t. sample_rate_denominator + + # multiply and divide second part + tmp_div = (t_sec // srd) * srn + tmp_mod = (t_sec % srd) * srn + tmp_div += tmp_mod // srd + tmp_mod = tmp_mod % srd + # consolidate: tmp_div + tmp_mod / d + quotient / 1e12 + remainder / 1e12 / d + # add second remainder to picosecond part in terms of (samples * 1e12) + remainder += tmp_mod * 1_000_000_000_000 + quotient += remainder // srd + remainder = remainder % srd + # now have: tmp_div + quotient / 1e12 + remainder / 1e12 / d + # consolidate into single quotient and remainder + tmp_div += quotient // 1_000_000_000_000 + quotient = quotient % 1_000_000_000_000 + remainder *= quotient * srd + quotient = tmp_div + # now have: quotient + remainder / 1e12 / d + # update remainder to be in terms of samples using ceiling rounding + remainder = remainder // 1_000_000_000_000 + ((remainder % 1_000_000_000_000) != 0) + # now hav in terms of samples: quotient + remainder / d + # finally ceiling round remainder into quotient + quotient += remainder != 0 + + return quotient + + +def sample_to_time_floor(nsamples, sample_rate): + """Convert a number of samples into a timedelta using a given sample rate. + + Floor rounding is used so that the given whole number of samples spans + *at least* the returned amount of time, accurate to the picosecond. + This complements the ceiling rounding in `time_to_sample_ceil`, so that:: + + time_to_sample_ceil(sample_to_time_floor(sample, sr), sr) == sample + + + Parameters + ---------- + nsamples : array_like + Whole number of samples to convert into a span of time. + + sample_rate : fractions.Fraction | first argument to ``get_samplerate_frac`` + Sample rate in Hz. + + + Returns + ------- + seconds : array_like + Number of whole seconds in the time span covered by `nsamples` at a rate + of `sample_rate`. + picoseconds : array_like + Number of additional picoseconds in the time span covered by `nsamples`, + using floor rounding (down to the previous whole number of picoseconds). + + """ + # ensure that sample_rate is a fractions.Fraction + if not isinstance(sample_rate, fractions.Fraction): + sample_rate = get_samplerate_frac(sample_rate) + + srn = sample_rate.numerator + srd = sample_rate.denominator + # calculate with divide/modulus split to avoid overflow + # second = s * d // n == ((s // n) * d) + ((si % n) * d) // n + tmp_div = nsamples // srn + tmp_mod = nsamples % srn + seconds = tmp_div * srd + tmp = tmp_mod * srd + tmp_div = tmp // srn + tmp_mod = tmp % srn + seconds += tmp_div + # picoseconds calculated from remainder of division to calculate seconds + # picosecond = rem * 1e12 // n = rem * (1e12 // n) + (rem * (1e12 % n)) // n + tmp = tmp_mod + tmp_div = 1_000_000_000_000 // srn + tmp_mod = 1_000_000_000_000 % srn + picoseconds = (tmp * tmp_div) + ((tmp * tmp_mod) // srn) + + return (seconds, picoseconds) + + +def time_to_sample(time, samples_per_second, epoch=None): + """Get a sample index from a time using a given sample rate. + + Parameters + ---------- time : datetime | float Time corresponding to the desired sample index. If not given as a datetime object, the numeric value is interpreted as a UTC timestamp @@ -45,29 +234,38 @@ def time_to_sample(time, samples_per_second): samples_per_second : np.longdouble Sample rate in Hz. + epoch : datetime, optional + Epoch time. If None, the Digital RF default (the Unix epoch, + January 1, 1970) is used. + Returns ------- - sample_index : int Index to the identified sample given in the number of samples since the epoch (time_since_epoch*sample_per_second). """ + warnings.warn( + "`time_to_sample` is deprecated. Use `time_to_sample_ceil` instead in" + " combination with `datetime_to_timedelta_tuple` if necessary.", + DeprecationWarning, + ) if isinstance(time, datetime.datetime): if time.tzinfo is None: # assume UTC if timezone was not specified time = time.replace(tzinfo=datetime.timezone.utc) + if epoch is None: + epoch = _default_epoch td = time - epoch tsec = int(td.total_seconds()) tfrac = 1e-6 * td.microseconds tidx = int(np.uint64(tsec * samples_per_second + tfrac * samples_per_second)) return tidx - else: - return int(np.uint64(time * samples_per_second)) + return int(np.uint64(time * samples_per_second)) -def sample_to_datetime(sample, samples_per_second): +def sample_to_datetime(sample, sample_rate, epoch=None): """Get datetime corresponding to the given sample index. Parameters @@ -76,9 +274,13 @@ def sample_to_datetime(sample, samples_per_second): sample : int Sample index in number of samples since epoch. - samples_per_second : np.longdouble + sample_rate : fractions.Fraction | first argument to ``get_samplerate_frac`` Sample rate in Hz. + epoch : datetime, optional + Epoch time. If None, the Digital RF default (the Unix epoch, + January 1, 1970) is used. + Returns ------- @@ -87,7 +289,11 @@ def sample_to_datetime(sample, samples_per_second): Datetime corresponding to the given sample index. """ - return epoch + samples_to_timedelta(sample, samples_per_second) + if epoch is None: + epoch = _default_epoch + seconds, picoseconds = sample_to_time_floor(sample, sample_rate) + td = datetime.timedelta(seconds=seconds, microseconds=picoseconds // 1000000) + return epoch + td def samples_to_timedelta(samples, samples_per_second): @@ -110,6 +316,12 @@ def samples_to_timedelta(samples, samples_per_second): Timedelta corresponding to the number of samples. """ + warnings.warn( + "`samples_to_timedelta` is deprecated. Use `sample_to_time_floor` instead" + " and create a timedelta object if necessary:" + " `datetime.timedelta(seconds=seconds, microseconds=picoseconds // 1000000)`", + DeprecationWarning, + ) # splitting into secs/frac lets us get a more accurate datetime secs = int(samples // samples_per_second) frac = (samples % samples_per_second) / samples_per_second @@ -118,7 +330,40 @@ def samples_to_timedelta(samples, samples_per_second): return datetime.timedelta(seconds=secs, microseconds=microseconds) -def datetime_to_timestamp(dt): +def datetime_to_timedelta_tuple(dt, epoch=None): + """Return timedelta (seconds, picoseconds) tuple from epoch for a datetime object. + + Parameters + ---------- + + dt : datetime + Time specified as a datetime object. + + epoch : datetime, optional + Epoch time for converting absolute `dt` value to a number of seconds + since `epoch`. If None, the Digital RF default (the Unix epoch, + January 1, 1970) is used. + + + Returns + ------- + + ts : float + Time stamp (seconds since epoch). + + """ + if dt.tzinfo is None: + # assume UTC if timezone was not specified + dt = dt.replace(tzinfo=datetime.timezone.utc) + if epoch is None: + epoch = _default_epoch + timedelta = dt - epoch + seconds = timedelta.seconds + picoseconds = timedelta.microseconds * 1000000 + return (seconds, picoseconds) + + +def datetime_to_timestamp(dt, epoch=None): """Return time stamp (seconds since epoch) for a given datetime object. Parameters @@ -127,21 +372,28 @@ def datetime_to_timestamp(dt): dt : datetime Time specified as a datetime object. + epoch : datetime, optional + Epoch time for converting absolute `dt` value to a number of seconds + since `epoch`. If None, the Digital RF default (the Unix epoch, + January 1, 1970) is used. + Returns ------- ts : float - Time stamp (seconds since epoch of digital_rf.util.epoch). + Time stamp (seconds since epoch). """ if dt.tzinfo is None: # assume UTC if timezone was not specified dt = dt.replace(tzinfo=datetime.timezone.utc) + if epoch is None: + epoch = _default_epoch return (dt - epoch).total_seconds() -def parse_identifier_to_sample(iden, samples_per_second=None, ref_index=None): +def parse_identifier_to_sample(iden, sample_rate=None, ref_index=None, epoch=None): """Get a sample index from different forms of identifiers. Parameters @@ -162,12 +414,17 @@ def parse_identifier_to_sample(iden, samples_per_second=None, ref_index=None): 3) a time in ISO8601 format, e.g. '2016-01-01T16:24:00Z' 4) 'now' ('nowish'), indicating the current time (rounded up) - samples_per_second : np.longdouble, required for float and time `iden` - Sample rate in Hz used to convert a time to a sample index. + sample_rate : fractions.Fraction | first argument to ``get_samplerate_frac`` + Sample rate in Hz used to convert a time to a sample index. Required + when `iden` is given as a float or a time. - ref_index : int/long, required for '+' string form of `iden` + ref_index : int Reference index from which string `iden` beginning with '+' are - offset. + offset. Required when `iden` is a string that begins with '+'. + + epoch : datetime, optional + Epoch time to use in converting an `iden` representing an absolute time. + If None, the Digital RF default (the Unix epoch, January 1, 1970) is used. Returns @@ -181,7 +438,7 @@ def parse_identifier_to_sample(iden, samples_per_second=None, ref_index=None): is_relative = False if iden is None or iden == "": return None - elif isinstance(iden, six.string_types): + if isinstance(iden, six.string_types): if iden.startswith("+"): is_relative = True iden = iden.lstrip("+") @@ -203,11 +460,19 @@ def parse_identifier_to_sample(iden, samples_per_second=None, ref_index=None): iden = dateutil.parser.parse(iden) if not isinstance(iden, six.integer_types): - if samples_per_second is None: - raise ValueError( - "samples_per_second required when time identifier is used." - ) - idx = time_to_sample(iden, samples_per_second) + if sample_rate is None: + raise ValueError("sample_rate required when time identifier is used.") + if epoch is None: + epoch = _default_epoch + if isinstance(iden, datetime.datetime): + iden = iden - epoch + elif not is_relative: + # interpret float time as timestamp from unix epoch, so adjust from + # unix epoch to specified sample epoch + iden -= ( + epoch - datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) + ).total_seconds() + idx = time_to_sample_ceil(iden, sample_rate) else: idx = iden @@ -215,11 +480,10 @@ def parse_identifier_to_sample(iden, samples_per_second=None, ref_index=None): if ref_index is None: raise ValueError('ref_index required when relative "+" identifier is used.') return idx + ref_index - else: - return idx + return idx -def parse_identifier_to_time(iden, samples_per_second=None, ref_datetime=None): +def parse_identifier_to_time(iden, sample_rate=None, ref_datetime=None, epoch=None): """Get a time from different forms of identifiers. Parameters @@ -231,7 +495,7 @@ def parse_identifier_to_time(iden, samples_per_second=None, ref_datetime=None): If a float, it is interpreted as a UTC timestamp (seconds since epoch) and the corresponding datetime is returned. If an integer, it is interpreted as a sample index when - `samples_per_second` is not None and a UTC timestamp otherwise. + `sample_rate` is not None and a UTC timestamp otherwise. If a string, four forms are permitted: 1) a string which can be evaluated to an integer/float and interpreted as above, @@ -241,13 +505,18 @@ def parse_identifier_to_time(iden, samples_per_second=None, ref_datetime=None): 3) a time in ISO8601 format, e.g. '2016-01-01T16:24:00Z' 4) 'now' ('nowish'), indicating the current time (rounded up) - samples_per_second : np.longdouble, required for integer `iden` - Sample rate in Hz used to convert a sample index to a time. + sample_rate : fractions.Fraction | first argument to ``get_samplerate_frac`` + Sample rate in Hz used to convert a sample index to a time. Required + when `iden` is given as an integer. ref_datetime : datetime, required for '+' string form of `iden` Reference time from which string `iden` beginning with '+' are offset. Must be timezone-aware. + epoch : datetime, optional + Epoch time to use in converting an `iden` representing a sample index. + If None, the Digital RF default (the Unix epoch, January 1, 1970) is used. + Returns ------- @@ -259,7 +528,7 @@ def parse_identifier_to_time(iden, samples_per_second=None, ref_datetime=None): is_relative = False if iden is None or iden == "": return None - elif isinstance(iden, six.string_types): + if isinstance(iden, six.string_types): if iden.startswith("+"): is_relative = True iden = iden.lstrip("+") @@ -283,21 +552,27 @@ def parse_identifier_to_time(iden, samples_per_second=None, ref_datetime=None): dt = dt.replace(tzinfo=datetime.timezone.utc) return dt - if isinstance(iden, float) or samples_per_second is None: + if isinstance(iden, float) or sample_rate is None: td = datetime.timedelta(seconds=iden) + # timestamp is relative to unix epoch always + epoch = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) else: - td = samples_to_timedelta(iden, samples_per_second) + seconds, picoseconds = sample_to_time_floor(iden, sample_rate) + td = datetime.timedelta(seconds=seconds, microseconds=picoseconds // 1000000) + # identifier is a sample converted to a timedelta, now it should be + # converted to an absolute time using the specified sample epoch + if epoch is None: + epoch = _default_epoch if is_relative: if ref_datetime is None: raise ValueError( 'ref_datetime required when relative "+" identifier is used.' ) - elif ( + if ( not isinstance(ref_datetime, datetime.datetime) or ref_datetime.tzinfo is None ): raise ValueError("ref_datetime must be a timezone-aware datetime.") return td + ref_datetime - else: - return td + epoch + return td + epoch diff --git a/python/examples/beacon/beacon_record.py b/python/examples/beacon/beacon_record.py index 6174054..0964e10 100644 --- a/python/examples/beacon/beacon_record.py +++ b/python/examples/beacon/beacon_record.py @@ -13,6 +13,7 @@ Example configurations are included along with this script. """ + from __future__ import absolute_import, division, print_function import datetime @@ -29,7 +30,6 @@ import ephem import numpy as np from digital_rf import DigitalMetadataWriter - from six.moves import configparser @@ -580,7 +580,7 @@ def ephemeris_passes(opt, st0, et0): if opt.el_mask: el_val = np.rad2deg(el) - el_mask = np.float(opt.el_mask) + el_mask = np.float64(opt.el_mask) if opt.debug: print("# el_val ", el_val, " el_mask ", el_mask) diff --git a/python/examples/benchmark_rf_read_hdf5.py b/python/examples/benchmark_rf_read_hdf5.py index 27299d4..978c2d1 100644 --- a/python/examples/benchmark_rf_read_hdf5.py +++ b/python/examples/benchmark_rf_read_hdf5.py @@ -11,6 +11,7 @@ Assumes the benchmark write script has already been run. """ + from __future__ import absolute_import, division, print_function import os diff --git a/python/examples/benchmark_rf_write_hdf5.py b/python/examples/benchmark_rf_write_hdf5.py index e25ec65..932ecfc 100644 --- a/python/examples/benchmark_rf_write_hdf5.py +++ b/python/examples/benchmark_rf_write_hdf5.py @@ -7,6 +7,7 @@ # The full license is in the LICENSE file, distributed with this software. # ---------------------------------------------------------------------------- """Benchmark I/O of Digital RF write in different configurations.""" + from __future__ import absolute_import, division, print_function import os @@ -22,14 +23,14 @@ N_WRITES = int(1e9 // WRITE_BLOCK_SIZE) SAMPLE_RATE_NUMERATOR = int(1e9) SAMPLE_RATE_DENOMINATOR = 1 -sample_rate = np.longdouble(np.uint64(SAMPLE_RATE_NUMERATOR)) / np.longdouble( - np.uint64(SAMPLE_RATE_DENOMINATOR) +sample_rate = digital_rf.util.get_samplerate_frac( + SAMPLE_RATE_NUMERATOR, SAMPLE_RATE_DENOMINATOR ) subdir_cadence_secs = 3600 file_cadence_millisecs = 10 # start 2014-03-09 12:30:30 plus one sample -start_global_index = int(np.uint64(1394368230 * sample_rate)) + 1 +start_global_index = digital_rf.util.time_to_sample_ceil(1394368230, sample_rate) + 1 # data to write data_int16 = np.zeros((WRITE_BLOCK_SIZE, 2), dtype="i2") @@ -37,8 +38,8 @@ for i in range(WRITE_BLOCK_SIZE): j = i * 2 k = i * 2 + 1 - data_int16[i][0] = (j % 32768) * (j + 8192) * (j % 13) - data_int16[i][1] = (k % 32768) * (k + 8192) * (k % 13) + data_int16[i][0] = np.array((j % 32768) * (j + 8192) * (j % 13)).astype("i2") + data_int16[i][1] = np.array((k % 32768) * (k + 8192) * (k % 13)).astype("i2") datadir = os.path.join(tempfile.gettempdir(), "benchmark_digital_rf") print("creating top level dir {0}".format(datadir)) diff --git a/python/examples/example_read_digital_metadata.py b/python/examples/example_read_digital_metadata.py index bc888b1..7495370 100644 --- a/python/examples/example_read_digital_metadata.py +++ b/python/examples/example_read_digital_metadata.py @@ -11,13 +11,13 @@ Assumes the example Digital Metadata write script has already been run. """ + from __future__ import absolute_import, division, print_function import os import tempfile import digital_rf -import numpy as np metadata_dir = os.path.join(tempfile.gettempdir(), "example_metadata") stime = 1447082580 @@ -29,7 +29,7 @@ raise print("init okay") -start_idx = int(np.uint64(stime * dmr.get_samples_per_second())) +start_idx = digital_rf.util.time_to_sample_ceil(stime, dmr.get_sample_rate()) first_sample, last_sample = dmr.get_bounds() print("bounds are %i to %i" % (first_sample, last_sample)) @@ -55,6 +55,10 @@ latest_meta = dmr.read_latest() print(latest_meta) +print("test of get_sample_rate") +sr = dmr.get_sample_rate() +print(sr) + print("test of get_samples_per_second") sps = dmr.get_samples_per_second() print(sps) diff --git a/python/examples/example_write_digital_metadata.py b/python/examples/example_write_digital_metadata.py index 661b7d0..cc0b308 100644 --- a/python/examples/example_write_digital_metadata.py +++ b/python/examples/example_write_digital_metadata.py @@ -12,6 +12,7 @@ number of levels. """ + from __future__ import absolute_import, division, print_function import os @@ -24,8 +25,8 @@ metadata_dir = os.path.join(tempfile.gettempdir(), "example_metadata") subdirectory_cadence_seconds = 3600 file_cadence_seconds = 60 -samples_per_second_numerator = 10 -samples_per_second_denominator = 9 +sample_rate_numerator = 10 +sample_rate_denominator = 9 file_name = "rideout" stime = 1447082580 @@ -36,14 +37,14 @@ metadata_dir, subdirectory_cadence_seconds, file_cadence_seconds, - samples_per_second_numerator, - samples_per_second_denominator, + sample_rate_numerator, + sample_rate_denominator, file_name, ) print("first create okay") data_dict = {} -start_idx = int(np.uint64(stime * dmw.get_samples_per_second())) +start_idx = digital_rf.util.time_to_sample_ceil(stime, dmw.get_sample_rate()) # To save an array of data, make sure the first axis has the same length # as the samples index idx_arr = np.arange(70, dtype=np.int64) + start_idx @@ -95,8 +96,8 @@ metadata_dir, subdirectory_cadence_seconds, file_cadence_seconds, - samples_per_second_numerator, - samples_per_second_denominator, + sample_rate_numerator, + sample_rate_denominator, file_name, ) print("second create okay") diff --git a/python/examples/example_write_digital_rf.py b/python/examples/example_write_digital_rf.py index 3b1a3ec..d572bf7 100644 --- a/python/examples/example_write_digital_rf.py +++ b/python/examples/example_write_digital_rf.py @@ -11,6 +11,7 @@ Writes continuous complex short data. """ + from __future__ import absolute_import, division, print_function import os @@ -26,7 +27,9 @@ # writing parameters sample_rate_numerator = int(100) # 100 Hz sample rate - typically MUCH faster sample_rate_denominator = 1 -sample_rate = np.longdouble(sample_rate_numerator) / sample_rate_denominator +sample_rate = digital_rf.util.get_samplerate_frac( + sample_rate_numerator, sample_rate_denominator +) dtype_str = "i2" # short int sub_cadence_secs = ( 4 # Number of seconds of data in a subdirectory - typically MUCH larger @@ -50,7 +53,7 @@ arr_data[i]["i"] = 3 * i # start 2014-03-09 12:30:30 plus one sample -start_global_index = int(np.uint64(1394368230 * sample_rate)) + 1 +start_global_index = digital_rf.util.time_to_sample_ceil(1394368230, sample_rate) + 1 # set up top level directory shutil.rmtree(chdir, ignore_errors=True) diff --git a/python/examples/sounder/prc_analyze.py b/python/examples/sounder/prc_analyze.py index db44af2..ee1e35e 100755 --- a/python/examples/sounder/prc_analyze.py +++ b/python/examples/sounder/prc_analyze.py @@ -16,6 +16,7 @@ doi:10.5194/amt-9-829-2016, 2016. """ + from __future__ import absolute_import, division, print_function import datetime @@ -224,7 +225,7 @@ def analyze_prc( os.remove(f) d = drf.DigitalRFReader(op.datadir) - sr = d.get_properties(op.ch)["samples_per_second"] + sr = d.get_properties(op.ch)["sample_rate"] b = d.get_bounds(op.ch) idx = np.array(b[0]) if os.path.isfile(datpath): diff --git a/python/examples/sounder/tx.py b/python/examples/sounder/tx.py index 43f37d5..3044792 100755 --- a/python/examples/sounder/tx.py +++ b/python/examples/sounder/tx.py @@ -8,6 +8,7 @@ # The full license is in the LICENSE file, distributed with this software. # ---------------------------------------------------------------------------- """Transmit waveforms with synchronized USRPs.""" + from __future__ import absolute_import, division, print_function import math @@ -25,7 +26,6 @@ import digital_rf as drf import numpy as np from gnuradio import analog, blocks, gr, uhd - from six.moves import configparser @@ -405,15 +405,11 @@ def _usrp_setup(self): u.set_samp_rate(float(op.samplerate)) # read back actual value samplerate = u.get_samp_rate() - # calculate longdouble precision sample rate + # calculate rational sample rate # (integer division of clock rate) cr = u.get_clock_rate() srdec = int(round(cr / samplerate)) - samplerate_ld = np.longdouble(cr) / srdec - op.samplerate = samplerate_ld - sr_rat = Fraction(cr).limit_denominator() / srdec - op.samplerate_num = sr_rat.numerator - op.samplerate_den = sr_rat.denominator + op.samplerate = drf.util.get_samplerate_frac(cr, srdec) # set per-channel options # set command time so settings are synced @@ -626,14 +622,14 @@ def run(self, starttime=None, endtime=None, duration=None, period=10): now = datetime.now(tz=timezone.utc) # launch on integer second by default for convenience (ceil + 1) lt = now.replace(microsecond=0) + timedelta(seconds=2) - ltts = (lt - drf.util.epoch).total_seconds() + lttd = lt - drf.util.epoch # adjust launch time forward so it falls on an exact sample since epoch - lt_samples = np.ceil(ltts * op.samplerate) - ltts = lt_samples / op.samplerate - lt = drf.util.sample_to_datetime(lt_samples, op.samplerate) + lt_rsamples = drf.util.time_to_sample_ceil(lttd, op.samplerate) + lt = drf.util.sample_to_datetime(lt_rsamples, op.samplerate) if op.verbose: ltstr = lt.strftime("%a %b %d %H:%M:%S.%f %Y") - print("Launch time: {0} ({1})".format(ltstr, repr(ltts))) + msg = "Launch time: {0} ({1})" + print(msg.format(ltstr, repr(lt.timestamp()))) # command launch time ct_td = lt - drf.util.epoch ct_secs = ct_td.total_seconds() // 1.0 diff --git a/python/gr_digital_rf/digital_rf_sink.py b/python/gr_digital_rf/digital_rf_sink.py index f8e7338..eb8a34b 100644 --- a/python/gr_digital_rf/digital_rf_sink.py +++ b/python/gr_digital_rf/digital_rf_sink.py @@ -7,6 +7,7 @@ # The full license is in the LICENSE file, distributed with this software. # ---------------------------------------------------------------------------- """Module defining a Digital RF Source block.""" + from __future__ import absolute_import, division, print_function import os @@ -19,19 +20,18 @@ import numpy as np import pmt import six +from digital_rf import DigitalMetadataWriter, DigitalRFWriter, _py_rf_write_hdf5, util from gnuradio import gr from six.moves import zip -from digital_rf import DigitalMetadataWriter, DigitalRFWriter, _py_rf_write_hdf5, util - -def parse_time_pmt(val, samples_per_second): +def parse_time_pmt(val, sample_rate): """Get (sec, frac, idx) from an rx_time pmt value.""" - tsec = np.uint64(pmt.to_uint64(pmt.tuple_ref(val, 0))) + tsec = int(np.uint64(pmt.to_uint64(pmt.tuple_ref(val, 0)))) tfrac = pmt.to_double(pmt.tuple_ref(val, 1)) # calculate sample index of time and floor to uint64 - tidx = np.uint64(tsec * samples_per_second + tfrac * samples_per_second) - return int(tsec), tfrac, int(tidx) + tidx = util.time_to_sample_ceil((tsec, int(tfrac * 1e12)), sample_rate) + return tsec, tfrac, tidx def translate_rx_freq(tag): @@ -319,12 +319,12 @@ def __init__( self._work_done = False - self._samples_per_second = np.longdouble( - np.uint64(sample_rate_numerator) - ) / np.longdouble(np.uint64(sample_rate_denominator)) + self._sample_rate = util.get_samplerate_frac( + self._sample_rate_numerator, self._sample_rate_denominator + ) if min_chunksize is None: - self._min_chunksize = max(int(self._samples_per_second // 1000), 1) + self._min_chunksize = max(int(self._sample_rate // 1000), 1) else: self._min_chunksize = min_chunksize @@ -346,7 +346,7 @@ def __init__( # will be None if start is None or '' self._start_sample = util.parse_identifier_to_sample( - start, self._samples_per_second, None + start, self._sample_rate, None ) if self._start_sample is None: if self._ignore_tags: @@ -360,9 +360,8 @@ def __init__( self._next_rel_sample = 0 if self._debug: tidx = self._start_sample - timedelta = util.samples_to_timedelta(tidx, self._samples_per_second) - tsec = int(timedelta.total_seconds() // 1) - tfrac = timedelta.microseconds / 1e6 + tsec, picoseconds = util.sample_to_time_floor(tidx, self._sample_rate) + tfrac = picoseconds / 1e12 tagstr = ("|{0}|start @ sample 0: {1}+{2} ({3})\n").format( self._channel_name, tsec, tfrac, tidx ) @@ -462,7 +461,7 @@ def _read_tags(self, nsamples): # separate data into blocks to be written for tag in time_tags: offset = tag.offset - tsec, tfrac, tidx = parse_time_pmt(tag.value, self._samples_per_second) + tsec, tfrac, tidx = parse_time_pmt(tag.value, self._sample_rate) # index into data block for this tag bidx = offset - nread diff --git a/python/gr_digital_rf/digital_rf_source.py b/python/gr_digital_rf/digital_rf_source.py index 3f9cd44..56545d7 100644 --- a/python/gr_digital_rf/digital_rf_source.py +++ b/python/gr_digital_rf/digital_rf_source.py @@ -205,7 +205,7 @@ def __init__( itemsize = self._properties["H5Tget_size"] is_complex = self._properties["is_complex"] vlen = self._properties["num_subchannels"] - sr = self._properties["samples_per_second"] + sr = self._properties["sample_rate"] self._itemsize = itemsize self._sample_rate = sr @@ -289,9 +289,9 @@ def _queue_tags(self, sample, tags): tag_dict = self._tag_queue.get(sample, {}) if not tag_dict: # add time and rate tags - time = np.uint64(sample) / self._sample_rate + seconds, picoseconds = util.sample_to_time_floor(sample, self._sample_rate) tag_dict["rx_time"] = pmt.make_tuple( - pmt.from_uint64(int(np.uint64(time))), pmt.from_double(float(time % 1)) + pmt.from_uint64(seconds), pmt.from_double(picoseconds / 1e12) ) tag_dict["rx_rate"] = self._sample_rate_pmt for k, v in tags.items(): diff --git a/python/lib/py_rf_write_hdf5.c b/python/lib/py_rf_write_hdf5.c index d6c7eb5..02e83f4 100644 --- a/python/lib/py_rf_write_hdf5.c +++ b/python/lib/py_rf_write_hdf5.c @@ -544,6 +544,111 @@ static PyObject * _py_rf_write_hdf5_get_unix_time(PyObject * self, PyObject * ar } +static PyObject * _py_rf_write_hdf5_get_timestamp_floor(PyObject * self, PyObject * args) +/* _py_rf_write_hdf5_get_timestamp_floor converts a sample index into the nearest + * earlier timestamp (flooring) divided into second and picosecond parts, using + * the sample rate expressed as a rational fraction. + * + * Flooring is used so that sample falls in the window of time represented by + * the returned timestamp, which includes that time up until the next possible + * timestamp: second + [picosecond, picosecond + 1). + * + * Inputs: python list with + * 1. unix_sample_index - python int representing number of samples at given sample rate since UT midnight 1970-01-01 + * 2. sample_rate_numerator - python int sample rate numerator in Hz + * 3. sample_rate_denominator - python int sample rate denominator in Hz + * + * Returns tuple with (second,picosecond) if success, NULL pointer if not + */ +{ + // input arguments + uint64_t unix_sample_index = 0; + uint64_t sample_rate_numerator = 0; + uint64_t sample_rate_denominator = 0; + + // local variables + PyObject *retObj; + uint64_t second; + uint64_t picosecond; + int result; + + // parse input arguments + if (!PyArg_ParseTuple(args, "KKK", + &unix_sample_index, + &sample_rate_numerator, + &sample_rate_denominator)) + { + return NULL; + } + + // call underlying method + result = digital_rf_get_timestamp_floor( + unix_sample_index, sample_rate_numerator, sample_rate_denominator, + &second, &picosecond); + if (result != 0) + return(NULL); + + // create needed object + retObj = Py_BuildValue("KK", second, picosecond); + + //return tuple; + return(retObj); + +} + + +static PyObject * _py_rf_write_hdf5_get_sample_ceil(PyObject * self, PyObject * args) +/* _py_rf_write_hdf5_get_sample_ceil converts a timestamp (divided into second + * and picosecond parts) into the next nearest sample (ceil), using the sample + * rate expressed as a rational fraction. + * + * Ceiling is used to complement the flooring in get_timestamp_floor, so that + * get_sample_ceil(get_timestamp_floor(sample_index)) == sample_index. + * + * Inputs: python list with + * 1. second - python int giving the whole seconds part of the timestamp + * 2. picosecond - python int giving the picoseconds part of the timestamp + * 2. sample_rate_numerator - python int sample rate numerator in Hz + * 3. sample_rate_denominator - python int sample rate denominator in Hz + * + * Returns an integer sample index if success, NULL pointer if not + */ +{ + // input arguments + uint64_t second = 0; + uint64_t picosecond = 0; + uint64_t sample_rate_numerator = 0; + uint64_t sample_rate_denominator = 0; + + // local variables + PyObject *retObj; + uint64_t sample_index; + int result; + + // parse input arguments + if (!PyArg_ParseTuple(args, "KKKK", + &second, + &picosecond, + &sample_rate_numerator, + &sample_rate_denominator)) + { + return NULL; + } + + // call underlying method + result = digital_rf_get_sample_ceil( + second, picosecond, sample_rate_numerator, sample_rate_denominator, + &sample_index); + if (result != 0) + return(NULL); + + // create needed object + retObj = Py_BuildValue("K", sample_index); + return(retObj); + +} + + /********** Initialization code for module ******************************/ @@ -556,6 +661,8 @@ static PyMethodDef _py_rf_write_hdf5Methods[] = {"get_last_dir_written", _py_rf_write_hdf5_get_last_dir_written, METH_VARARGS}, {"get_last_utc_timestamp", _py_rf_write_hdf5_get_last_utc_timestamp,METH_VARARGS}, {"get_unix_time", _py_rf_write_hdf5_get_unix_time, METH_VARARGS}, + {"get_timestamp_floor", _py_rf_write_hdf5_get_timestamp_floor, METH_VARARGS}, + {"get_sample_ceil", _py_rf_write_hdf5_get_sample_ceil, METH_VARARGS}, {"get_version", _py_rf_write_hdf5_get_version, METH_NOARGS}, {NULL, NULL} /* Sentinel */ }; diff --git a/python/tests/conftest.py b/python/tests/conftest.py index b02f994..d2bffdb 100644 --- a/python/tests/conftest.py +++ b/python/tests/conftest.py @@ -6,6 +6,16 @@ # # The full license is in the LICENSE file, distributed with this software. # ---------------------------------------------------------------------------- +from __future__ import annotations + +import datetime +import itertools +import os + +import numpy as np +import pytest + +import digital_rf def pytest_configure(config): @@ -36,3 +46,432 @@ def pytest_collection_modifyitems(items): selected_items.append(item) items[:] = selected_items + + +############################################################################### +# constant fixtures ######################################################### +############################################################################### + + +@pytest.fixture(scope="session") +def start_timestamp_tuple(): + start_dt = datetime.datetime( + 2014, 3, 9, 12, 30, 30, 0, tzinfo=datetime.timezone.utc + ) + timedelta = start_dt - digital_rf.util.epoch + seconds = int(timedelta.total_seconds()) + picoseconds = timedelta.microseconds * 1000000 + return (seconds, picoseconds) + + +############################################################################### +# parametrized fixtures ##################################################### +############################################################################### + + +_dtype_strs = [ + "s8", + "sc8", + "u8", + "uc8", + "s16", + "sc16", + "u16", + "uc16", + "s32", + "sc32", + "u32", + "uc32", + "s64", + "sc64", + "u64", + "uc64", + "f32", + "fc32", + "f64", + "fc64", +] +_num_subchannels = [(1, "x1"), (8, "x8")] +_df_params = list(itertools.product(_dtype_strs, (sc[0] for sc in _num_subchannels))) +_df_ids = list( + "".join(p) + for p in itertools.product(_dtype_strs, (sc[1] for sc in _num_subchannels)) +) + + +@pytest.fixture(scope="session", params=_df_params, ids=_df_ids) +def data_params(request): + dtypes = dict( + s8=np.dtype("i1"), + sc8=np.dtype([("r", "i1"), ("i", "i1")]), + u8=np.dtype("u1"), + uc8=np.dtype([("r", "u1"), ("i", "u1")]), + s16=np.dtype("i2"), + sc16=np.dtype([("r", "i2"), ("i", "i2")]), + u16=np.dtype("u2"), + uc16=np.dtype([("r", "u2"), ("i", "u2")]), + s32=np.dtype("i4"), + sc32=np.dtype([("r", "i4"), ("i", "i4")]), + u32=np.dtype("u4"), + uc32=np.dtype([("r", "u4"), ("i", "u4")]), + s64=np.dtype("i8"), + sc64=np.dtype([("r", "i8"), ("i", "i8")]), + u64=np.dtype("u8"), + uc64=np.dtype([("r", "u8"), ("i", "u8")]), + f32=np.dtype("f4"), + fc32=np.dtype("c8"), + f64=np.dtype("f8"), + fc64=np.dtype("c16"), + ) + dtype_str = request.param[0] + dtype = dtypes[dtype_str] + is_complex = dtype.names is not None or np.issubdtype(dtype, np.complexfloating) + num_subchannels = request.param[1] + return dict( + dtype_str=dtype_str, + dtype=dtype, + is_complex=is_complex, + num_subchannels=num_subchannels, + ) + + +@pytest.fixture(scope="session", params=[True, False], ids=["continuous", "gapped"]) +def file_params(request): + return dict(is_continuous=request.param) + + +@pytest.fixture( + scope="session", + params=[ + # compression_level, checksum + (0, False), + (9, True), + ], + ids=["nozip,nochecksum", "zip,checksum"], +) +def hdf_filter_params(request): + return dict(compression_level=request.param[0], checksum=request.param[1]) + + +@pytest.fixture( + scope="session", + params=[ + # sample rates must be set so that start_timestamp_tuple is an exact sample time + # srnum, srden, sdcsec, fcms + (200, 3, 2, 400) + ], + ids=["200/3hz,2s,400ms"], +) +def sample_params(request): + return dict( + sample_rate_numerator=request.param[0], + sample_rate_denominator=request.param[1], + subdir_cadence_secs=request.param[2], + file_cadence_millisecs=request.param[3], + ) + + +############################################################################### +# remaining fixtures ######################################################## +############################################################################### + + +@pytest.fixture(scope="session") +def param_dict( + data_params, file_params, hdf_filter_params, sample_params, start_global_index +): + params = dict(start_global_index=start_global_index) + params.update(data_params) + params.update(file_params) + params.update(hdf_filter_params) + params.update(sample_params) + return params + + +@pytest.fixture(scope="session") +def param_str(param_dict): + s = ( + "{dtype_str}_x{num_subchannels}_n{sample_rate_numerator}" + "_d{sample_rate_denominator}_{subdir_cadence_secs}s" + "_{file_cadence_millisecs}ms_c{is_continuous:b}" + ).format(**param_dict) + return s + + +@pytest.fixture(scope="session") +def dtype_str(data_params): + return data_params["dtype_str"] + + +@pytest.fixture(scope="session") +def dtype(data_params): + return data_params["dtype"] + + +@pytest.fixture(scope="session") +def is_complex(data_params): + return data_params["is_complex"] + + +@pytest.fixture(scope="session") +def num_subchannels(data_params): + return data_params["num_subchannels"] + + +@pytest.fixture(scope="session") +def is_continuous(file_params): + return file_params["is_continuous"] + + +@pytest.fixture(scope="session") +def compression_level(hdf_filter_params): + return hdf_filter_params["compression_level"] + + +@pytest.fixture(scope="session") +def checksum(hdf_filter_params): + return hdf_filter_params["checksum"] + + +@pytest.fixture(scope="session") +def sample_rate_numerator(sample_params): + return sample_params["sample_rate_numerator"] + + +@pytest.fixture(scope="session") +def sample_rate_denominator(sample_params): + return sample_params["sample_rate_denominator"] + + +@pytest.fixture(scope="session") +def subdir_cadence_secs(sample_params): + return sample_params["subdir_cadence_secs"] + + +@pytest.fixture(scope="session") +def file_cadence_millisecs(sample_params): + return sample_params["file_cadence_millisecs"] + + +@pytest.fixture(scope="session") +def sample_rate(sample_rate_numerator, sample_rate_denominator): + return digital_rf.util.get_samplerate_frac( + sample_rate_numerator, sample_rate_denominator + ) + + +@pytest.fixture(scope="session") +def start_global_index(sample_rate, start_timestamp_tuple): + return digital_rf.util.time_to_sample_ceil(start_timestamp_tuple, sample_rate) + + +@pytest.fixture(scope="session") +def start_datetime(start_timestamp_tuple): + seconds, picoseconds = start_timestamp_tuple + start_dt = datetime.datetime.fromtimestamp(seconds, tz=datetime.timezone.utc) + start_dt += datetime.timedelta(microseconds=picoseconds // 1000000) + return start_dt + + +@pytest.fixture(scope="session") +def end_global_index( + file_cadence_millisecs, + sample_rate, + start_global_index, + subdir_cadence_secs, +): + # want data to span at least two subdirs to test creation + naming + # also needs to span at least 8 files to accommodate write blocks (below) + nsamples_subdirs = digital_rf.util.time_to_sample_ceil( + 1.5 * subdir_cadence_secs, sample_rate + ) + nsamples_files = digital_rf.util.time_to_sample_ceil( + 8 * file_cadence_millisecs / 1000, sample_rate + ) + nsamples = max(nsamples_subdirs, nsamples_files) + return start_global_index + nsamples - 1 + + +@pytest.fixture(scope="session") +def bounds( + end_global_index, + file_cadence_millisecs, + is_continuous, + sample_rate_numerator, + sample_rate_denominator, + start_global_index, +): + if is_continuous: + srn = sample_rate_numerator + srd = sample_rate_denominator + fcms = file_cadence_millisecs + + # bounds are at file boundaries + # milliseconds of file with start_global_index + sms = (((start_global_index * srd * 1000) // srn) // fcms) * fcms + # sample index at start of file (ceil b/c start at first whole sample) + ss = ((sms * srn) + (srd * 1000) - 1) // (srd * 1000) + # milliseconds of file after end_global_index + ems = ((((end_global_index * srd * 1000) // srn) // fcms) + 1) * fcms + # sample index at start of file (ceil b/c start at first whole sample) + es = ((ems * srn) + (srd * 1000) - 1) // (srd * 1000) + return (ss, es - 1) + return (start_global_index, end_global_index) + + +@pytest.fixture(scope="session") +def data_block_slices( + bounds, + end_global_index, + file_cadence_millisecs, + sample_rate, + start_global_index, +): + # blocks = [(start_sample, stop_sample)] + blocks = [] + samples_per_file = digital_rf.util.time_to_sample_ceil( + file_cadence_millisecs / 1000, sample_rate + ) + + # first block stops in middle of second file + sstart = start_global_index + sstop = bounds[0] + int(1.5 * samples_per_file) + blocks.append((sstart, sstop)) + + # second block continues where first ended, stops in middle of third file + sstart = sstop + sstop = bounds[0] + int(2.5 * samples_per_file) + blocks.append((sstart, sstop)) + + # first gap, no skipped file + # third block starts in middle of 4th file, ends in middle of 5th + sstart = bounds[0] + int(3.5 * samples_per_file) + sstop = bounds[0] + int(4.5 * samples_per_file) + blocks.append((sstart, sstop)) + + # second gap, skipping one file completely + # fourth block starts in middle of 7th file, ends later in 7th file + sstart = bounds[0] + int(6.5 * samples_per_file) + sstop = bounds[0] + int(6.9 * samples_per_file) + blocks.append((sstart, sstop)) + + # fifth and final block continues just after previous write and goes to end + sstart = sstop + 2 + sstop = end_global_index + 1 + blocks.append((sstart, sstop)) + + return blocks + + +def generate_rf_data(shape, dtype, seed): + np.random.seed(seed % 2**32) + nitems = np.prod(shape) + byts = np.random.randint(0, 256, nitems * dtype.itemsize, "u1") + return byts.view(dtype).reshape(shape) + + +@pytest.fixture(scope="session") +def data(bounds, data_block_slices, dtype, num_subchannels): + data_len = bounds[1] - bounds[0] + 1 + if num_subchannels == 1: + shape = (data_len,) + else: + shape = (data_len, num_subchannels) + + # start off with data array containing its appropriate fill value + if np.issubdtype(dtype, np.inexact): + fill_value = np.nan + elif dtype.names is not None: + fill_value = np.empty(1, dtype=dtype)[0] + fill_value["r"] = np.iinfo(dtype["r"]).min + fill_value["i"] = np.iinfo(dtype["i"]).min + else: + fill_value = np.iinfo(dtype).min + data = np.full(shape, fill_value, dtype=dtype) + + rdata = generate_rf_data(shape, dtype, 0) + # fill array data for what we will be writing + for sstart, sstop in data_block_slices: + bstart = sstart - bounds[0] + bend = sstop - bounds[0] + data[bstart:bend] = rdata[bstart:bend] + + return data + + +@pytest.fixture(scope="session") +def data_file_list(sample_params): + # manually generated to match output of writing data given + # _sample_rate_params + # get params as a tuple of values sorted in key order + params = tuple(v for k, v in sorted(sample_params.items())) + file_lists = { + (400, 3, 200, 2): [ + os.path.join("2014-03-09T12-30-30", "rf@1394368230.000.h5"), + os.path.join("2014-03-09T12-30-30", "rf@1394368230.400.h5"), + os.path.join("2014-03-09T12-30-30", "rf@1394368230.800.h5"), + os.path.join("2014-03-09T12-30-30", "rf@1394368231.200.h5"), + os.path.join("2014-03-09T12-30-30", "rf@1394368231.600.h5"), + os.path.join("2014-03-09T12-30-32", "rf@1394368232.400.h5"), + os.path.join("2014-03-09T12-30-32", "rf@1394368232.800.h5"), + ] + } + return file_lists[params] + + +@pytest.fixture(scope="class") +def datadir(param_str, tmpdir_factory): + return tmpdir_factory.mktemp(f"{param_str}_", numbered=True) + + +@pytest.fixture(scope="class") +def chdir(param_str, datadir): + chdir = datadir.mkdir(param_str) + return chdir + + +@pytest.fixture(scope="class") +def channel(chdir): + return str(chdir.basename) + + +@pytest.fixture(scope="class") +def drf_writer_param_dict(chdir, param_dict): + return dict( + directory=str(chdir), + dtype=param_dict["dtype"], + subdir_cadence_secs=param_dict["subdir_cadence_secs"], + file_cadence_millisecs=param_dict["file_cadence_millisecs"], + start_global_index=param_dict["start_global_index"], + sample_rate_numerator=param_dict["sample_rate_numerator"], + sample_rate_denominator=param_dict["sample_rate_denominator"], + uuid_str=chdir.basename, + compression_level=param_dict["compression_level"], + checksum=param_dict["checksum"], + is_complex=param_dict["is_complex"], + num_subchannels=param_dict["num_subchannels"], + is_continuous=param_dict["is_continuous"], + marching_periods=False, + ) + + +@pytest.fixture(scope="class") +def drf_writer_factory(drf_writer_param_dict): + def writer_factory(**kwargs_updates): + kwargs = drf_writer_param_dict.copy() + kwargs.update(**kwargs_updates) + return digital_rf.DigitalRFWriter(**kwargs) + + return writer_factory + + +@pytest.fixture(scope="class") +def drf_writer(drf_writer_factory): + with drf_writer_factory() as dwo: + yield dwo + + +@pytest.fixture(scope="class") +def drf_reader(chdir): + with digital_rf.DigitalRFReader(chdir.dirname) as dro: + yield dro diff --git a/python/tests/test_digital_rf_hdf5.py b/python/tests/test_digital_rf_hdf5.py index 84461a8..9747577 100644 --- a/python/tests/test_digital_rf_hdf5.py +++ b/python/tests/test_digital_rf_hdf5.py @@ -8,449 +8,31 @@ # ---------------------------------------------------------------------------- """Tests for the digital_rf.digital_rf_hdf5 module.""" -from __future__ import absolute_import, division, print_function +from __future__ import annotations -import datetime import itertools import os -import digital_rf import h5py import numpy as np import packaging.version import pytest -############################################################################### -# constant fixtures ######################################################### -############################################################################### - - -@pytest.fixture(scope="session") -def start_datetime(): - return datetime.datetime(2014, 3, 9, 12, 30, 30, 0, None) - - -@pytest.fixture(scope="session") -def start_global_index(samples_per_second, start_datetime): - return digital_rf.util.time_to_sample(start_datetime, samples_per_second) - - -############################################################################### -# parametrized fixtures ##################################################### -############################################################################### - - -_dtype_strs = [ - "s8", - "sc8", - "u8", - "uc8", - "s16", - "sc16", - "u16", - "uc16", - "s32", - "sc32", - "u32", - "uc32", - "s64", - "sc64", - "u64", - "uc64", - "f32", - "fc32", - "f64", - "fc64", -] -_num_subchannels = [(1, "x1"), (8, "x8")] -_df_params = list(itertools.product(_dtype_strs, (sc[0] for sc in _num_subchannels))) -_df_ids = list( - "".join(p) - for p in itertools.product(_dtype_strs, (sc[1] for sc in _num_subchannels)) -) - - -@pytest.fixture(scope="session", params=_df_params, ids=_df_ids) -def data_params(request): - dtypes = dict( - s8=np.dtype("i1"), - sc8=np.dtype([("r", "i1"), ("i", "i1")]), - u8=np.dtype("u1"), - uc8=np.dtype([("r", "u1"), ("i", "u1")]), - s16=np.dtype("i2"), - sc16=np.dtype([("r", "i2"), ("i", "i2")]), - u16=np.dtype("u2"), - uc16=np.dtype([("r", "u2"), ("i", "u2")]), - s32=np.dtype("i4"), - sc32=np.dtype([("r", "i4"), ("i", "i4")]), - u32=np.dtype("u4"), - uc32=np.dtype([("r", "u4"), ("i", "u4")]), - s64=np.dtype("i8"), - sc64=np.dtype([("r", "i8"), ("i", "i8")]), - u64=np.dtype("u8"), - uc64=np.dtype([("r", "u8"), ("i", "u8")]), - f32=np.dtype("f4"), - fc32=np.dtype("c8"), - f64=np.dtype("f8"), - fc64=np.dtype("c16"), - ) - dtype_str = request.param[0] - dtype = dtypes[dtype_str] - is_complex = dtype.names is not None or np.issubdtype(dtype, np.complexfloating) - num_subchannels = request.param[1] - return dict( - dtype_str=dtype_str, - dtype=dtype, - is_complex=is_complex, - num_subchannels=num_subchannels, - ) - - -@pytest.fixture(scope="session", params=[True, False], ids=["continuous", "gapped"]) -def file_params(request): - return dict(is_continuous=request.param) - - -@pytest.fixture( - scope="session", - params=[ - # compression_level, checksum - (0, False), - (9, True), - ], - ids=["nozip,nochecksum", "zip,checksum"], -) -def hdf_filter_params(request): - return dict(compression_level=request.param[0], checksum=request.param[1]) - - -@pytest.fixture( - scope="session", - params=[ - # srnum, srden, sdcsec, fcms - (200, 3, 2, 400) - ], - ids=["200/3hz,2s,400ms"], -) -def sample_params(request): - return dict( - sample_rate_numerator=request.param[0], - sample_rate_denominator=request.param[1], - subdir_cadence_secs=request.param[2], - file_cadence_millisecs=request.param[3], - ) - - -############################################################################### -# remaining fixtures ######################################################## -############################################################################### - - -@pytest.fixture(scope="session") -def param_dict( - data_params, file_params, hdf_filter_params, sample_params, start_global_index -): - params = dict(start_global_index=start_global_index) - params.update(data_params) - params.update(file_params) - params.update(hdf_filter_params) - params.update(sample_params) - return params - - -@pytest.fixture(scope="session") -def param_str(param_dict): - s = ( - "{dtype_str}_x{num_subchannels}_n{sample_rate_numerator}" - "_d{sample_rate_denominator}_{subdir_cadence_secs}s" - "_{file_cadence_millisecs}ms_c{is_continuous:b}" - ).format(**param_dict) - return s - - -@pytest.fixture(scope="session") -def dtype_str(data_params): - return data_params["dtype_str"] - - -@pytest.fixture(scope="session") -def dtype(data_params): - return data_params["dtype"] - - -@pytest.fixture(scope="session") -def is_complex(data_params): - return data_params["is_complex"] - - -@pytest.fixture(scope="session") -def num_subchannels(data_params): - return data_params["num_subchannels"] - - -@pytest.fixture(scope="session") -def is_continuous(file_params): - return file_params["is_continuous"] - - -@pytest.fixture(scope="session") -def compression_level(hdf_filter_params): - return hdf_filter_params["compression_level"] - - -@pytest.fixture(scope="session") -def checksum(hdf_filter_params): - return hdf_filter_params["checksum"] - - -@pytest.fixture(scope="session") -def sample_rate_numerator(sample_params): - return sample_params["sample_rate_numerator"] - - -@pytest.fixture(scope="session") -def sample_rate_denominator(sample_params): - return sample_params["sample_rate_denominator"] - - -@pytest.fixture(scope="session") -def subdir_cadence_secs(sample_params): - return sample_params["subdir_cadence_secs"] - - -@pytest.fixture(scope="session") -def file_cadence_millisecs(sample_params): - return sample_params["file_cadence_millisecs"] - - -@pytest.fixture(scope="session") -def samples_per_second(sample_rate_numerator, sample_rate_denominator): - return np.longdouble(sample_rate_numerator) / sample_rate_denominator - - -@pytest.fixture(scope="session") -def end_global_index( - file_cadence_millisecs, - sample_rate_numerator, - sample_rate_denominator, - samples_per_second, - start_global_index, - subdir_cadence_secs, -): - # want data to span at least two subdirs to test creation + naming - # also needs to span at least 8 files to accommodate write blocks (below) - nsamples_subdirs = int( - (1.5 * subdir_cadence_secs * sample_rate_numerator) // sample_rate_denominator - ) - nsamples_files = int( - np.ceil(8 * file_cadence_millisecs * (samples_per_second / 1000)) - ) - nsamples = max(nsamples_subdirs, nsamples_files) - return start_global_index + nsamples - 1 - - -@pytest.fixture(scope="session") -def bounds( - end_global_index, - file_cadence_millisecs, - is_continuous, - sample_rate_numerator, - sample_rate_denominator, - start_global_index, -): - if is_continuous: - srn = sample_rate_numerator - srd = sample_rate_denominator - fcms = file_cadence_millisecs - - # bounds are at file boundaries - # milliseconds of file with start_global_index - sms = (((start_global_index * srd * 1000) // srn) // fcms) * fcms - # sample index at start of file (ceil b/c start at first whole sample) - ss = ((sms * srn) + (srd * 1000) - 1) // (srd * 1000) - # milliseconds of file after end_global_index - ems = ((((end_global_index * srd * 1000) // srn) // fcms) + 1) * fcms - # sample index at start of file (ceil b/c start at first whole sample) - es = ((ems * srn) + (srd * 1000) - 1) // (srd * 1000) - return (ss, es - 1) - else: - return (start_global_index, end_global_index) - - -@pytest.fixture(scope="session") -def data_block_slices( - bounds, - end_global_index, - file_cadence_millisecs, - samples_per_second, - start_global_index, -): - # blocks = [(start_sample, stop_sample)] - blocks = [] - samples_per_file = file_cadence_millisecs * (samples_per_second / 1000) - - # first block stops in middle of second file - sstart = start_global_index - sstop = bounds[0] + int(1.5 * samples_per_file) - blocks.append((sstart, sstop)) - - # second block continues where first ended, stops in middle of third file - sstart = sstop - sstop = bounds[0] + int(2.5 * samples_per_file) - blocks.append((sstart, sstop)) - - # first gap, no skipped file - # third block starts in middle of 4th file, ends in middle of 5th - sstart = bounds[0] + int(3.5 * samples_per_file) - sstop = bounds[0] + int(4.5 * samples_per_file) - blocks.append((sstart, sstop)) - - # second gap, skipping one file completely - # fourth block starts in middle of 7th file, ends later in 7th file - sstart = bounds[0] + int(6.5 * samples_per_file) - sstop = bounds[0] + int(6.9 * samples_per_file) - blocks.append((sstart, sstop)) - - # fifth and final block continues just after previous write and goes to end - sstart = sstop + 2 - sstop = end_global_index + 1 - blocks.append((sstart, sstop)) - - return blocks - - -def generate_rf_data(shape, dtype, seed): - np.random.seed(seed % 2**32) - nitems = np.prod(shape) - byts = np.random.randint(0, 256, nitems * dtype.itemsize, "u1") - return byts.view(dtype).reshape(shape) - - -@pytest.fixture(scope="session") -def data(bounds, data_block_slices, dtype, num_subchannels): - data_len = bounds[1] - bounds[0] + 1 - if num_subchannels == 1: - shape = (data_len,) - else: - shape = (data_len, num_subchannels) - - # start off with data array containing its appropriate fill value - if np.issubdtype(dtype, np.inexact): - fill_value = np.nan - elif dtype.names is not None: - fill_value = np.empty(1, dtype=dtype)[0] - fill_value["r"] = np.iinfo(dtype["r"]).min - fill_value["i"] = np.iinfo(dtype["i"]).min - else: - fill_value = np.iinfo(dtype).min - data = np.full(shape, fill_value, dtype=dtype) - - rdata = generate_rf_data(shape, dtype, 0) - # fill array data for what we will be writing - for sstart, sstop in data_block_slices: - bstart = sstart - bounds[0] - bend = sstop - bounds[0] - data[bstart:bend] = rdata[bstart:bend] - - return data - - -@pytest.fixture(scope="session") -def data_file_list(sample_params): - # manually generated to match output of writing data given - # _sample_rate_params - # get params as a tuple of values sorted in key order - params = tuple(v for k, v in sorted(sample_params.items())) - file_lists = { - (400, 3, 200, 2): [ - os.path.join("2014-03-09T12-30-30", "rf@1394368230.000.h5"), - os.path.join("2014-03-09T12-30-30", "rf@1394368230.400.h5"), - os.path.join("2014-03-09T12-30-30", "rf@1394368230.800.h5"), - os.path.join("2014-03-09T12-30-30", "rf@1394368231.200.h5"), - os.path.join("2014-03-09T12-30-30", "rf@1394368231.600.h5"), - os.path.join("2014-03-09T12-30-32", "rf@1394368232.400.h5"), - os.path.join("2014-03-09T12-30-32", "rf@1394368232.800.h5"), - ] - } - return file_lists[params] - - -@pytest.fixture(scope="class") -def datadir(param_str, tmpdir_factory): - return tmpdir_factory.mktemp("{0}_".format(param_str), numbered=True) - - -@pytest.fixture(scope="class") -def chdir(param_str, datadir): - chdir = datadir.mkdir(param_str) - return chdir - - -@pytest.fixture(scope="class") -def channel(chdir): - return str(chdir.basename) - - -@pytest.fixture(scope="class") -def drf_writer_param_dict(chdir, param_dict): - return dict( - directory=str(chdir), - dtype=param_dict["dtype"], - subdir_cadence_secs=param_dict["subdir_cadence_secs"], - file_cadence_millisecs=param_dict["file_cadence_millisecs"], - start_global_index=param_dict["start_global_index"], - sample_rate_numerator=param_dict["sample_rate_numerator"], - sample_rate_denominator=param_dict["sample_rate_denominator"], - uuid_str=chdir.basename, - compression_level=param_dict["compression_level"], - checksum=param_dict["checksum"], - is_complex=param_dict["is_complex"], - num_subchannels=param_dict["num_subchannels"], - is_continuous=param_dict["is_continuous"], - marching_periods=False, - ) - - -@pytest.fixture(scope="class") -def drf_writer_factory(drf_writer_param_dict): - def writer_factory(**kwargs_updates): - kwargs = drf_writer_param_dict.copy() - kwargs.update(**kwargs_updates) - return digital_rf.DigitalRFWriter(**kwargs) - - return writer_factory - - -@pytest.fixture(scope="class") -def drf_writer(drf_writer_factory): - with drf_writer_factory() as dwo: - yield dwo - - -@pytest.fixture(scope="class") -def drf_reader(chdir): - with digital_rf.DigitalRFReader(chdir.dirname) as dro: - yield dro - - -############################################################################### -# tests ##################################################################### -############################################################################### +import digital_rf def test_get_unix_time( sample_rate_numerator, sample_rate_denominator, start_datetime, start_global_index ): - global_index = start_global_index + 1 - index_dt = start_datetime + datetime.timedelta(microseconds=15000) + # test conversion of the start time dt, picoseconds = digital_rf.get_unix_time( - global_index, sample_rate_numerator, sample_rate_denominator + start_global_index, sample_rate_numerator, sample_rate_denominator ) - assert dt == index_dt - assert picoseconds == index_dt.microsecond * 1000000 + assert dt == start_datetime.replace(tzinfo=None) + assert picoseconds == start_datetime.microsecond * 1000000 -class TestDigitalRFChannel(object): +class TestDigitalRFChannel: """Test writing and reading of a Digital RF channel.""" @pytest.mark.firstonly( @@ -460,7 +42,7 @@ def test_writer_init( self, channel, chdir, drf_writer_param_dict, drf_writer_factory ): """Test writer object's init.""" - channel = "{0}_init_test".format(channel) + channel = f"{channel}_init_test" # error when directory doesn't exist with pytest.raises(IOError): @@ -571,7 +153,7 @@ def test_writer_write_input_checking( # by always using two dimensions even for num_subchannels==1, # we test both the 2d (here) and 1d (other test fcn) input cases shape = (100, num_subchannels) - chdir = chdir.dirpath().mkdir("{0}_input_tests".format(channel)) + chdir = chdir.dirpath().mkdir(f"{channel}_input_tests") with drf_writer_factory(directory=str(chdir)) as dwo: # test interleaved input format (complex writer only) if is_complex: @@ -584,13 +166,13 @@ def test_writer_write_input_checking( # (one byte integer values will upcast to almost anything else) if np.issubdtype(dwo.realdtype, np.unsignedinteger): base_type = "u1" - other_type = "i{0}".format(dwo.realdtype.itemsize) + other_type = f"i{dwo.realdtype.itemsize}" else: base_type = "i1" if np.issubdtype(dwo.realdtype, np.integer): - other_type = "u{0}".format(dwo.realdtype.itemsize) + other_type = f"u{dwo.realdtype.itemsize}" else: - other_type = "f{0}".format(dwo.realdtype.itemsize * 2) + other_type = f"f{dwo.realdtype.itemsize * 2}" if is_complex: # test upcast from complex input # also tests struct dtype input when floating complex @@ -632,11 +214,11 @@ def test_writer_write_input_checking( # test initializing with interleaved format if is_complex: - chdir = chdir.dirpath().mkdir("{0}_input_tests_interleaved".format(channel)) + chdir = chdir.dirpath().mkdir(f"{channel}_input_tests_interleaved") if dtype.names is not None: realdtype = dtype["r"] else: - realdtype = np.dtype("f{0}".format(dtype.itemsize // 2)) + realdtype = np.dtype(f"f{dtype.itemsize // 2}") with drf_writer_factory( directory=str(chdir), dtype=realdtype, is_complex=True ) as dwo: @@ -660,7 +242,7 @@ def test_writer_write_blocks( start_global_index, ): """Test writer object's write_blocks method.""" - chdir = chdir.dirpath().mkdir("{0}_write_blocks".format(channel)) + chdir = chdir.dirpath().mkdir(f"{channel}_write_blocks") with drf_writer_factory(directory=str(chdir)) as dwo: # write the specified data blocks from data_block_slices block_starts, block_stops = zip(*data_block_slices) @@ -757,7 +339,7 @@ def test_writer_output_files_write_blocks(self, channel, chdir, data_file_list): # (we don't check the contents of the written files here since testing # the reader will accomplish that as long as we keep in mind that # errors there could actually indicate a problem in writing) - chdir = chdir.dirpath("{0}_write_blocks".format(channel)) + chdir = chdir.dirpath(f"{channel}_write_blocks") drf_files = digital_rf.lsdrf( str(chdir), include_drf=True, @@ -906,8 +488,8 @@ def test_reader_get_properties( channel, drf_reader, drf_writer_param_dict, - samples_per_second, - start_datetime, + sample_rate, + start_timestamp_tuple, start_global_index, ): """Test reader object's get_properties method.""" @@ -925,9 +507,12 @@ def test_reader_get_properties( is_complex=p["is_complex"], is_continuous=p["is_continuous"], num_subchannels=p["num_subchannels"], - samples_per_second=samples_per_second, + samples_per_second=( + np.longdouble(sample_rate.numerator) / sample_rate.denominator + ), sample_rate_numerator=p["sample_rate_numerator"], sample_rate_denominator=p["sample_rate_denominator"], + sample_rate=sample_rate, subdir_cadence_secs=p["subdir_cadence_secs"], ) props = drf_reader.get_properties(channel) @@ -939,9 +524,7 @@ def test_reader_get_properties( expected_sample_properties = dict( computer_time=None, - init_utc_timestamp=int( - digital_rf.util.datetime_to_timestamp(start_datetime) - ), + init_utc_timestamp=start_timestamp_tuple[0], sequence_num=0, uuid_str=p["uuid_str"], ) @@ -991,7 +574,7 @@ def test_reader_read_metadata( bounds, channel, drf_reader, - samples_per_second, + sample_rate, sample_rate_numerator, sample_rate_denominator, start_global_index, @@ -1000,13 +583,16 @@ def test_reader_read_metadata( # must be run after test_reader_get_digital_metadata which creates # the metadata that we'll be reading expected_metadata = dict( - samples_per_second=samples_per_second, + samples_per_second=( + np.longdouble(sample_rate.numerator) / sample_rate.denominator + ), sample_rate_numerator=sample_rate_numerator, sample_rate_denominator=sample_rate_denominator, + sample_rate=sample_rate, ) # read the blocks channel first, which has no Digital Metadata channel # all metadata - block_channel = "{0}_write_blocks".format(channel) + block_channel = f"{channel}_write_blocks" md = drf_reader.read_metadata(bounds[0], bounds[1], block_channel, method=None) assert list(md.keys()) == [bounds[0]] for k, v in expected_metadata.items(): diff --git a/python/tests/test_util.py b/python/tests/test_util.py new file mode 100644 index 0000000..ef860d7 --- /dev/null +++ b/python/tests/test_util.py @@ -0,0 +1,105 @@ +# ---------------------------------------------------------------------------- +# Copyright (c) 2017 Massachusetts Institute of Technology (MIT) +# All rights reserved. +# +# Distributed under the terms of the BSD 3-clause license. +# +# The full license is in the LICENSE file, distributed with this software. +# ---------------------------------------------------------------------------- +"""Tests for the digital_rf.digital_rf_hdf5 module.""" + +from __future__ import annotations + +import datetime +import fractions + +import numpy as np + +import digital_rf + + +def reference_time_to_sample_ceil(timedelta, sample_rate): + if isinstance(timedelta, tuple): + t_sec, t_psec = timedelta + elif isinstance(timedelta, np.timedelta64): + onesec = np.timedelta64(1, "s") + t_sec = timedelta // onesec + t_psec = (timedelta % onesec) // np.timedelta64(1, "ps") + elif isinstance(timedelta, datetime.timedelta): + t_sec = int(timedelta.total_seconds()) + t_psec = 1000000 * timedelta.microseconds + else: + t_sec = int(timedelta) + t_psec = int(np.round((timedelta % 1) * 1e12)) + # ensure that sample_rate is a fractions.Fraction + if not isinstance(sample_rate, fractions.Fraction): + sample_rate = digital_rf.util.get_samplerate_frac(sample_rate) + # calculate rational values for the second and picosecond parts + s_frac = t_sec * sample_rate + t_psec * sample_rate / 10**12 + # get an integer value through ceiling rounding + return int(s_frac) + ((s_frac % 1) != 0) + + +def reference_sample_to_time_floor(nsamples, sample_rate): + nsamples = int(nsamples) + # ensure that sample_rate is a fractions.Fraction + if not isinstance(sample_rate, fractions.Fraction): + sample_rate = digital_rf.util.get_samplerate_frac(sample_rate) + + # get the timedelta as a Fraction + t_frac = nsamples / sample_rate + + seconds = int(t_frac) + picoseconds = int((t_frac % 1) * 10**12) + + return (seconds, picoseconds) + + +############################################################################### +# tests ##################################################################### +############################################################################### + + +def test_sample_timestamp_conversion( + sample_rate_numerator, sample_rate_denominator, sample_rate, start_global_index +): + # test that sample index round trips through get_sample_ceil(get_timestamp_floor()) + for global_index in range(start_global_index, start_global_index + 100): + ref_second, ref_picosecond = reference_sample_to_time_floor( + global_index, sample_rate + ) + + second, picosecond = digital_rf._py_rf_write_hdf5.get_timestamp_floor( + global_index, sample_rate_numerator, sample_rate_denominator + ) + assert second == ref_second + assert picosecond == ref_picosecond + second2, picosecond2 = digital_rf.util.sample_to_time_floor( + global_index, sample_rate + ) + assert second2 == ref_second + assert picosecond2 == ref_picosecond + + ref_rt_global_index = reference_time_to_sample_ceil( + (ref_second, ref_picosecond), sample_rate + ) + assert ref_rt_global_index == global_index + + rt_global_index = digital_rf._py_rf_write_hdf5.get_sample_ceil( + second, picosecond, sample_rate_numerator, sample_rate_denominator + ) + assert rt_global_index == ref_rt_global_index + rt_global_index2 = digital_rf.util.time_to_sample_ceil( + (second, picosecond), sample_rate + ) + assert rt_global_index2 == ref_rt_global_index + + # test passing array to get_sample_ceil(get_timestamp_floor()) + global_idxs = np.arange(start_global_index, start_global_index + 100, dtype="int64") + seconds, picoseconds = digital_rf.util.sample_to_time_floor( + global_idxs, sample_rate + ) + rt_global_idxs = digital_rf.util.time_to_sample_ceil( + (seconds, picoseconds), sample_rate + ) + np.testing.assert_equal(rt_global_idxs, global_idxs) diff --git a/python/tools/drf_cross_sti.py b/python/tools/drf_cross_sti.py index 80dc310..780c6d3 100644 --- a/python/tools/drf_cross_sti.py +++ b/python/tools/drf_cross_sti.py @@ -16,7 +16,6 @@ """ - import datetime import itertools as it import optparse @@ -71,7 +70,7 @@ def __init__(self, control): elif self.control.xtype == "pairs": args = [iter(pl)] * 2 - self.xlist = list(it.izip_longest(*args)) + self.xlist = list(it.zip_longest(*args)) elif self.control.xtype == "combo": self.xlist = list(it.combinations(pl, 2)) @@ -130,12 +129,8 @@ def plot(self): print(("pair is : ", xidx, yidx)) # sample rate - xsr = self.dio[xidx].get_properties(self.channel[xidx])[ - "samples_per_second" - ] - ysr = self.dio[yidx].get_properties(self.channel[yidx])[ - "samples_per_second" - ] + xsr = self.dio[xidx].get_properties(self.channel[xidx])["sample_rate"] + ysr = self.dio[yidx].get_properties(self.channel[yidx])["sample_rate"] if self.control.verbose: print(("sample rate, x: ", xsr, " y: ", ysr)) @@ -157,19 +152,19 @@ def plot(self): if self.control.start: dtst0 = dateutil.parser.parse(self.control.start) - st0 = ( - dtst0 - datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) - ).total_seconds() - st0 = int(st0 * sr) + st0 = dtst0 - datetime.datetime( + 1970, 1, 1, tzinfo=datetime.timezone.utc + ) + st0 = drf.util.time_to_sample_ceil(st0, sr) else: st0 = int(b[0]) if self.control.end: dtst0 = dateutil.parser.parse(self.control.end) - et0 = ( - dtst0 - datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) - ).total_seconds() - et0 = int(et0 * sr) + et0 = dtst0 - datetime.datetime( + 1970, 1, 1, tzinfo=datetime.timezone.utc + ) + et0 = drf.util.time_to_sample_ceil(et0, sr) else: et0 = int(b[1]) @@ -231,10 +226,10 @@ def plot(self): for p in np.arange(0, self.control.frames * 2, 2): sti_csd_data_coherence = np.zeros( - [self.control.num_fft, self.control.bins], np.float + [self.control.num_fft, self.control.bins], np.float64 ) sti_csd_data_phase = np.zeros( - [self.control.num_fft, self.control.bins], np.float + [self.control.num_fft, self.control.bins], np.float64 ) sti_times = np.zeros([self.control.bins], np.complex128) @@ -388,9 +383,9 @@ def plot(self): print("last ", start_sample) # create a time stamp - start_time = st0 / sr + start_time, picoseconds = drf.util.sample_to_time_floor(st0, sr) srt_time = time.gmtime(start_time) - sub_second = int(round((start_time - int(start_time)) * 100)) + sub_second = int(round(picoseconds / 1e10)) timestamp = "%d-%02d-%02d %02d:%02d:%02d.%02d UT" % ( srt_time[0], diff --git a/python/tools/drf_plot.py b/python/tools/drf_plot.py index faf9fd2..8c4b4eb 100644 --- a/python/tools/drf_plot.py +++ b/python/tools/drf_plot.py @@ -1182,8 +1182,8 @@ def usage(): print("loading metadata") drf_properties = drf.get_properties(chans[chidx]) - sfreq_ld = drf_properties["samples_per_second"] - sfreq = float(sfreq_ld) + sample_rate = drf_properties["sample_rate"] + sfreq = float(sample_rate) toffset = start_sample print(toffset) @@ -1191,7 +1191,7 @@ def usage(): if atime == 0: atime = ustart else: - atime = int(np.uint64(atime * sfreq_ld)) + atime = drf.util.time_to_sample_ceil(atime, sample_rate) sstart = atime + int(toffset) dlen = stop_sample - start_sample diff --git a/python/tools/drf_sound.py b/python/tools/drf_sound.py index aa083e9..42e8b98 100644 --- a/python/tools/drf_sound.py +++ b/python/tools/drf_sound.py @@ -15,6 +15,7 @@ directly to sounddevice or through a wave file save out. """ + from __future__ import absolute_import, division, print_function import datetime @@ -56,7 +57,7 @@ def makeasound(self): Iterate over the data set and output a sound through sounddevice. """ - sr = self.dio.get_properties(self.channel)["samples_per_second"] + sr = self.dio.get_properties(self.channel)["sample_rate"] if self.control.verbose: print("sample rate: ", sr) @@ -68,19 +69,15 @@ def makeasound(self): if self.control.start: dtst0 = dateutil.parser.parse(self.control.start) - st0 = ( - dtst0 - datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) - ).total_seconds() - st0 = int(st0 * sr) + st0 = dtst0 - datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) + st0 = drf.util.time_to_sample_ceil(st0, sr) else: st0 = int(bound[0]) if self.control.end: dtst0 = dateutil.parser.parse(self.control.end) - et0 = ( - dtst0 - datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) - ).total_seconds() - et0 = int(et0 * sr) + et0 = dtst0 - datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) + et0 = drf.util.time_to_sample_ceil(et0, sr) else: et0 = int(bound[1]) @@ -110,7 +107,7 @@ def makeasound(self): print("first ", start_sample) - audiostuff = np.zeros((blocks, reads_per_block * dsamps), dtype=np.float) + audiostuff = np.zeros((blocks, reads_per_block * dsamps), dtype=np.float64) if self.control.verbose: print( "processing info : ", @@ -132,7 +129,9 @@ def makeasound(self): ) if self.control.freqshift: - tvec = np.arange(len(data), dtype=np.float) / sr + start_sample / sr + tvec = np.arange(len(data), dtype=np.float64) / float(sr) + float( + start_sample / sr + ) f_osc = np.exp(1j * 2 * np.pi * self.control.freqshift * tvec) data_fs = data * f_osc else: @@ -146,7 +145,7 @@ def makeasound(self): start_sample += samples_per_stripe audiostuff_cent = audiostuff - audiostuff.flatten().mean() audiostuff_norm = audiostuff_cent / np.abs(audiostuff_cent.flatten()).max() - audiostuff_norm = audiostuff_norm.astype(float) + audiostuff_norm = audiostuff_norm.astype(np.float64) if self.control.outname: fname = os.path.splitext(self.control.outname)[0] ext = ".wav" diff --git a/python/tools/drf_sti.py b/python/tools/drf_sti.py index da9f699..0e31c2a 100644 --- a/python/tools/drf_sti.py +++ b/python/tools/drf_sti.py @@ -11,9 +11,8 @@ """Multithreaded variant by dsheen to handle much larger datasets smoothly 2025/06/04""" """Flag Added to use all samples in file and compute integration automatically 2025/07/14""" - -import datetime import argparse +import datetime import os import sys import time @@ -88,16 +87,11 @@ def __init__(self, opt): # open digital RF path self.dio = drf.DigitalRFReader(self.opt.path) - self.sr = self.dio.get_properties(self.channels[0])["samples_per_second"] + self.sr = self.dio.get_properties(self.channels[0])["sample_rate"] self.bounds = self.dio.get_bounds(self.channels[0]) - self.dt_start = datetime.datetime.fromtimestamp( - int(self.bounds[0] / self.sr), - tz=datetime.timezone.utc, - ) - self.dt_stop = datetime.datetime.fromtimestamp( - int(self.bounds[1] / self.sr), tz=datetime.timezone.utc - ) + self.dt_start = drf.util.sample_to_datetime(self.bounds[0], self.sr) + self.dt_stop = drf.util.sample_to_datetime(self.bounds[1], self.sr) print( f"bound times {self.dt_start.isoformat()} to {self.dt_stop.isoformat()} UTC" @@ -281,19 +275,15 @@ def plot(self): if self.opt.start: dtst0 = dateutil.parser.parse(self.opt.start) - st0 = ( - dtst0 - datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) - ).total_seconds() - st0 = int(st0 * self.sr) + st0 = dtst0 - datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) + st0 = drf.util.time_to_sample_ceil(st0, self.sr) else: st0 = int(b[0]) if self.opt.end: dtst0 = dateutil.parser.parse(self.opt.end) - et0 = ( - dtst0 - datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) - ).total_seconds() - et0 = int(et0 * self.sr) + et0 = dtst0 - datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) + et0 = drf.util.time_to_sample_ceil(et0, self.sr) else: et0 = int(b[1]) @@ -455,9 +445,9 @@ def plot(self): print("last {0}".format(start_sample)) # create a time stamp - start_time = int(st0 / self.sr) + start_time, picoseconds = drf.util.sample_to_time_floor(st0, self.sr) srt_time = time.gmtime(start_time) - sub_second = int(round((start_time - int(start_time)) * 100)) + sub_second = int(round(picoseconds / 1e10)) timestamp = "%d-%02d-%02d %02d:%02d:%02d.%02d UT" % ( srt_time[0], @@ -624,8 +614,8 @@ def parse_command_line(): dest="auto_length", action="store_true", default=False, - help="""Automatically determine length to use samples in time range - as efficiently as possible. (Default: False. Requires that + help="""Automatically determine length to use samples in time range + as efficiently as possible. (Default: False. Requires that -b and -i are specified)""", ) parser.add_argument( @@ -651,7 +641,7 @@ def parse_command_line(): action="store_true", default=False, help="""Automatically determine integration to use samples in time range - as efficiently as possible (Default: False. Requires that + as efficiently as possible (Default: False. Requires that -l and -b are specified)""", ) parser.add_argument( @@ -688,9 +678,9 @@ def parse_command_line(): dest="num_processes", default=1, type=int, - help="""Number of processes to use for computing the STI. + help="""Number of processes to use for computing the STI. If omitted defaults to 1 (single threaded). - setting processes to 0 will default to using a number of + setting processes to 0 will default to using a number of processes equal to the number of available cpu cores""", ) parser.add_argument( diff --git a/python/tools/thor.py b/python/tools/thor.py index 7cf6488..809c208 100644 --- a/python/tools/thor.py +++ b/python/tools/thor.py @@ -8,6 +8,7 @@ # The full license is in the LICENSE file, distributed with this software. # ---------------------------------------------------------------------------- """Record data from synchronized USRPs in Digital RF format.""" + from __future__ import absolute_import, division, print_function import argparse @@ -21,14 +22,13 @@ from fractions import Fraction from itertools import chain, cycle, islice, repeat from subprocess import call -from textwrap import dedent, fill, TextWrapper +from textwrap import TextWrapper, dedent, fill import digital_rf as drf import gr_digital_rf as gr_drf import numpy as np -from gnuradio import blocks +from gnuradio import blocks, gr, uhd from gnuradio import filter as grfilter -from gnuradio import gr, uhd try: from gnuradio import zeromq @@ -590,13 +590,11 @@ def _usrp_setup(self): # read back actual sample rate value samplerate = u.get_samp_rate() - # calculate longdouble precision/rational sample rate + # calculate rational sample rate # (integer division of clock rate) cr = op.clock_rates[0] srdec = int(round(cr / samplerate)) - samplerate_ld = np.longdouble(cr) / srdec - op.samplerate = samplerate_ld - op.samplerate_frac = Fraction(cr).limit_denominator() / srdec + op.samplerate = drf.util.get_samplerate_frac(cr, srdec) # set per-channel options # set command time so settings are synced @@ -747,26 +745,25 @@ def _zmq_pub_sink_setup(self): def _finalize_options(self): op = self.op - op.ch_samplerates_frac = [] + op.ch_samplerates = [] op.resampling_ratios = [] op.resampling_filter_taps = [] op.resampling_filter_delays = [] op.channelizer_filter_taps = [] op.channelizer_filter_delays = [] for ko, (osr, nsc) in enumerate(zip(op.ch_samplerates, op.ch_nsubchannels)): - # get output sample rate fraction - # (op.samplerate_frac final value is set in _usrp_setup + # get output resampling ratio + # (op.samplerate final value is set in _usrp_setup # so can't get output sample rate until after that is done) if osr is None: - ch_samplerate_frac = op.samplerate_frac + ratio = Fraction(1) else: - ch_samplerate_frac = Fraction(osr).limit_denominator() - op.ch_samplerates_frac.append(ch_samplerate_frac) - - # get resampling ratio - ratio = ch_samplerate_frac / op.samplerate_frac + ratio = (Fraction(osr) / op.samplerate).limit_denominator(2**16) op.resampling_ratios.append(ratio) + # get output samplerate fraction + op.ch_samplerates.append(op.samplerate * ratio) + # get resampling low-pass filter taps if ratio == 1: op.resampling_filter_taps.append(np.zeros(0)) @@ -917,15 +914,14 @@ def run(self, starttime=None, endtime=None, duration=None, period=10): now = datetime.now(tz=timezone.utc) # launch on integer second by default for convenience (ceil + 2) lt = now.replace(microsecond=0) + timedelta(seconds=3) - ltts = (lt - drf.util.epoch).total_seconds() + lttd = lt - drf.util.epoch # adjust launch time forward so it falls on an exact sample since epoch - lt_rsamples = int(np.ceil(ltts * op.samplerate)) - ltts = lt_rsamples / op.samplerate + lt_rsamples = drf.util.time_to_sample_ceil(lttd, op.samplerate) lt = drf.util.sample_to_datetime(lt_rsamples, op.samplerate) if op.verbose: ltstr = lt.strftime("%a %b %d %H:%M:%S.%f %Y") msg = "Launch time: {0} ({1})\nSample index: {2}" - print(msg.format(ltstr, repr(ltts), lt_rsamples)) + print(msg.format(ltstr, repr(lt.timestamp()), lt_rsamples)) # command launch time ct_td = lt - drf.util.epoch ct_secs = ct_td.total_seconds() // 1.0 @@ -949,7 +945,7 @@ def run(self, starttime=None, endtime=None, duration=None, period=10): mbnum = op.mboardnum_bychan[kr] # output settings that get modified depending on processing - ch_samplerate_frac = op.ch_samplerates_frac[ko] + ch_samplerate = op.ch_samplerates[ko] ch_centerfreq = op.ch_centerfreqs[ko] start_sample_adjust = 0 @@ -1008,7 +1004,7 @@ def run(self, starttime=None, endtime=None, duration=None, period=10): # make frequency shift block if necessary if ch_centerfreq is not False: f_shift = ch_centerfreq - op.centerfreqs[kr] - phase_inc = -2 * np.pi * f_shift / ch_samplerate_frac + phase_inc = -2 * np.pi * f_shift / ch_samplerate rotator = blocks.rotator_cc(phase_inc) else: ch_centerfreq = op.centerfreqs[kr] @@ -1052,9 +1048,9 @@ def run(self, starttime=None, endtime=None, duration=None, period=10): # modify output settings accordingly ch_centerfreq = ch_centerfreq + np.fft.fftfreq( - nsc, 1 / float(ch_samplerate_frac) + nsc, 1 / float(ch_samplerate) ) - ch_samplerate_frac = ch_samplerate_frac / nsc + ch_samplerate = ch_samplerate / nsc else: channelizer = None @@ -1074,10 +1070,9 @@ def run(self, starttime=None, endtime=None, duration=None, period=10): convert = None # get start sample - ch_samplerate_ld = np.longdouble( - ch_samplerate_frac.numerator - ) / np.longdouble(ch_samplerate_frac.denominator) - start_sample = int(np.uint64(ltts * ch_samplerate_ld)) + start_sample_adjust + start_sample = ( + drf.util.time_to_sample_ceil(lttd, ch_samplerate) + start_sample_adjust + ) # create digital RF sink dst = gr_drf.digital_rf_channel_sink( @@ -1085,8 +1080,8 @@ def run(self, starttime=None, endtime=None, duration=None, period=10): dtype=op.ch_out_specs[ko]["dtype"], subdir_cadence_secs=op.subdir_cadence_s, file_cadence_millisecs=op.file_cadence_ms, - sample_rate_numerator=ch_samplerate_frac.numerator, - sample_rate_denominator=ch_samplerate_frac.denominator, + sample_rate_numerator=ch_samplerate.numerator, + sample_rate_denominator=ch_samplerate.denominator, start=start_sample, ignore_tags=False, is_complex=True,