diff --git a/wave_utils/include/wave/utils/data.hpp b/wave_utils/include/wave/utils/data.hpp index c8c9c2a3..082152b6 100644 --- a/wave_utils/include/wave/utils/data.hpp +++ b/wave_utils/include/wave/utils/data.hpp @@ -9,6 +9,7 @@ #include #include +#include #include #include "wave/utils/math.hpp" @@ -57,17 +58,27 @@ int mat2csv(std::string file_path, MatX data); * The entries must be separated by whitespace, and be in row-major order. */ template -Eigen::Matrix matrixFromStream(std::istream &in_stream) { +inline Eigen::Matrix matrixFromStream(std::istream &in) { Eigen::Matrix mat; for (auto i = 0; i < Rows; ++i) { for (auto j = 0; j < Cols; ++j) { - in_stream >> mat(i, j); + in >> mat(i, j); } } return mat; } +/** Reads a matrix from a string + * + * The entries must be separated by whitespace, and be in row-major order. + */ +template +inline Eigen::Matrix matrixFromString(std::string &in) { + auto iss = std::istringstream{in}; + return matrixFromStream(iss); +}; + /** @} group utils */ } // namespace wave diff --git a/wave_vision/include/wave/vision/dataset/VioDataset.hpp b/wave_vision/include/wave/vision/dataset/VioDataset.hpp index dc9349f3..12b86498 100644 --- a/wave_vision/include/wave/vision/dataset/VioDataset.hpp +++ b/wave_vision/include/wave/vision/dataset/VioDataset.hpp @@ -71,15 +71,78 @@ struct VioDataset { // Calibration - /** Camera intrinsic matrix */ - Mat3 camera_K; + /** Camera object containing intrinsic matrix */ + VoTestCamera camera; /** Camera transformation from IMU frame (expressed in IMU frame) */ Vec3 I_p_IC; Rotation R_IC; + /** Geographic datum (arbitrary for now, just used for output) */ + Vec3 lla_datum{43.472981, -80.540092, 0}; + + /* An arbitrary start time, used for output timestamps.txt + * @todo maybe we will care about absolute time in future + */ + std::chrono::system_clock::time_point start_time = + std::chrono::system_clock::now(); + /** Ground truth 3D landmark positions in the world frame. */ LandmarkMap landmarks; + // I/O methods + + /** + * Writes dataset to the given directory. A format similar to kitti is used. + */ + void outputToDirectory(const std::string &output_dir) const; + + /** + * Write calibration files to the given directory. + * + * To be compatible with kitti this produces 3 files, + * - calib_cam_to_cam.txt + * - calib_imu_to_velo.txt + * - calib_velo_to_cam.txt + * + * though the transformation imu_to_velo is identity for now. + */ + void outputCalibration(const std::string &output_dir) const; + + /** Writes pose and inertial measurements to the given directory + * + * The output is in kitti format. It includes: + * - A file `timestamps.txt` with one row for each time step + * - A directory `data` containing a text file for each time step. The files + * are numbered with the format 0000000000.txt. + * + * Each data file has one row of information matching kitti's + * `dataformat.txt` (not included here). When writing, we fill only the + * fields `lat` `lon` `alt` `roll` `pitch` `yaw` (with ground truth), and + * `vf` `vl` `vu` `wf` `wl` `wu` (with simulated imu measurements). + */ + void outputPoses(const std::string &output_dir) const; + + /** Write feature observations to the given directory. + * + * The output is placed in a directory called `image_00` and includes + * + * - A file `timestamps.txt` with one row for each time step - A directory + * `features` containing a text file for each time step. The files are + * numbered with the format 0000000000.txt. + * + * Each observations file has one row for each feature measurement. Each row + * has three space-separated fields: landmark id, pixels x, and pixels y. + */ + void outputObserved(const std::string &output_dir) const; + + /** Reads a dataset from files in the given directory. + * + * The format must be the same as that created by outputToDirectory(), or + * a kitti dataset. + */ + static VioDataset loadFromDirectory(const std::string &input_dir); + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW }; diff --git a/wave_vision/include/wave/vision/dataset/VoDataset.hpp b/wave_vision/include/wave/vision/dataset/VoDataset.hpp index bde2ac23..ee5350d4 100644 --- a/wave_vision/include/wave/vision/dataset/VoDataset.hpp +++ b/wave_vision/include/wave/vision/dataset/VoDataset.hpp @@ -108,7 +108,7 @@ struct VoDataset { */ void outputObserved(const std::string &output_dir); - /** Writes robot state group truth to the given file. + /** Writes robot state ground truth to the given file. * * The output is space-separated. Each row contains 8 values: the time, the * 3d position (x, y, z), and the quaternion orientation (x, y, z, w). diff --git a/wave_vision/include/wave/vision/dataset/VoTestCamera.hpp b/wave_vision/include/wave/vision/dataset/VoTestCamera.hpp index e2085ccf..27dcc8f5 100644 --- a/wave_vision/include/wave/vision/dataset/VoTestCamera.hpp +++ b/wave_vision/include/wave/vision/dataset/VoTestCamera.hpp @@ -46,6 +46,20 @@ class VoTestCamera { VoTestCamera(int image_width, int image_height, Mat3 K, double hz) : image_width{image_width}, image_height{image_height}, K{K}, hz{hz} {} + // Helper getters + double fx() const { + return K(0, 0); + } + double fy() const { + return K(1, 1); + } + double cx() const { + return K(0, 2); + } + double cy() const { + return K(1, 2); + } + /** Check whether camera is triggered at this time-step * @returns boolean to denote true or false */ diff --git a/wave_vision/src/dataset/VioDataset.cpp b/wave_vision/src/dataset/VioDataset.cpp index 4c0b9f96..ca2ecdff 100644 --- a/wave_vision/src/dataset/VioDataset.cpp +++ b/wave_vision/src/dataset/VioDataset.cpp @@ -1,3 +1,429 @@ #include "wave/vision/dataset/VioDataset.hpp" -namespace wave {} // namespace wave +#include // for localtime +#include // for put_time +#include // for create_directories +#include "wave/utils/config.hpp" +#include "wave/vision/dataset/VoDataset.hpp" + +namespace wave { + +// Helper functions used only in this file +namespace { + +// Given a time point, produce a string matching "2011-09-26 14:02:22.484109563" +std::string formatTimestamp( + const std::chrono::system_clock::time_point &system_time_point) { + auto t = std::chrono::system_clock::to_time_t(system_time_point); + + // time_t does not have fractional seconds, so need to do this extra + auto ns_since_epoch = std::chrono::duration_cast( + system_time_point.time_since_epoch()); + auto ns = ns_since_epoch.count() % static_cast(1e9); + + std::stringstream ss; + ss << std::put_time(std::gmtime(&t), "%Y-%m-%d %H:%M:%S."); + ss << std::setw(9) << std::setfill('0') << ns; + return ss.str(); +} + +// Read a string matching "2011-09-26 14:02:22.484109563" from an input stream, +// and produce a time point. Return true on success +bool readTimepointFromStream( + std::istream &in, std::chrono::system_clock::time_point &time_point) { + // std::get_time does not have fractional seconds, so it's not trivial. + // First we parse the part up to the dot, then the fractional seconds + double fractional_secs; + std::tm tm; + in >> std::get_time(&tm, "%Y-%m-%d %H:%M:%S"); + in >> fractional_secs; + time_point = std::chrono::system_clock::from_time_t(std::mktime(&tm)); + + // Add fractional seconds + const auto d = + std::chrono::duration_cast( + std::chrono::duration(fractional_secs)); + time_point += d; + + // If the stream ended, we will return false + return in.good(); +} + + +// Data contained in the kitti format data files +struct OxtsData { + double lat, lon, alt, roll, pitch, yaw; + double vn, ve, vf, vl, vu, ax, ay, az; + double af, al, au, wx, wy, wz, wf, wl, wu; + double pos_accuracy, vel_accuracy; + int navstat, numsats, posmode, velmode, orimode; +}; + +std::istream &operator>>(std::istream &in, OxtsData &o) { + in >> o.lat >> o.lon >> o.alt; + in >> o.roll >> o.pitch >> o.yaw; + in >> o.vn >> o.ve >> o.vf; + in >> o.vl >> o.vu; + in >> o.ax >> o.ay >> o.az; + in >> o.af >> o.al >> o.au; + in >> o.wx >> o.wy >> o.wz; + in >> o.wf >> o.wl >> o.wu; + in >> o.pos_accuracy >> o.vel_accuracy; + in >> o.navstat >> o.numsats >> o.posmode >> o.velmode >> o.orimode; + return in; +} + +std::ostream &operator<<(std::ostream &os, const OxtsData &o) { + os << o.lat << " " << o.lon << " " << o.alt << " "; + os << o.roll << " " << o.pitch << " " << o.yaw << " "; + os << o.vn << " " << o.ve << " " << o.vf << " "; + os << o.vl << " " << o.vu << " "; + os << o.ax << " " << o.ay << " " << o.az << " "; + os << o.af << " " << o.al << " " << o.au << " "; + os << o.wx << " " << o.wy << " " << o.wz << " "; + os << o.wf << " " << o.wl << " " << o.wu << " "; + os << o.pos_accuracy << " " << o.vel_accuracy << " "; + os << o.navstat << " " << o.numsats << " " << o.posmode << " " << o.velmode + << " " << o.orimode; + return os; +} + +// Convert lat/lon coordinates to local cartsian using simple spherical model +// (good enough for points close to the origin) +// lla vectors are lat (degrees), lon (degrees), alt (m) +Vec3 enuFromLla(Vec3 lla_datum, Vec3 lla) { + const auto radius = 6378137; + const auto l = radius * M_PI / 180; + const auto x = (lla[1] - lla_datum[1]) * l * cos(lla[0] * M_PI / 180); + const auto y = (lla[0] - lla_datum[0]) * l; + return Vec3{x, y, lla[2]}; +} + +// Convert local cartesian to lat/lon using simple model +Vec3 llaFromEnu(Vec3 lla_datum, Vec3 enu) { + const auto radius = 6378137; + const auto l = radius * M_PI / 180; + const auto lat = lla_datum[0] + enu[1] / l; + const auto lon = lla_datum[1] + enu[0] / cos(lat * M_PI / 180) / l; + return Vec3{lat, lon, enu[2]}; +} + +// For each time in the container, write a timestamp +void writeTimestampsToFile( + const VioDataset::PoseContainer &container, + const std::chrono::system_clock::time_point &start_time, + const std::string &output_path) { + std::ofstream timestamps_file{output_path}; + + // Let the first measurement in the container occur at that start_time + // (with the current setup, we have to convert from steady_clock to + // system_clock values - todo?) + const auto steady_start_time = container.begin()->time_point; + + for (const auto &meas : container) { + auto time_point = start_time + (meas.time_point - steady_start_time); + timestamps_file << formatTimestamp(time_point) << std::endl; + } +} + +// load calibration files into dataset +void loadCalibration(const std::string &input_dir, VioDataset &dataset) { + // The dataset files happen to be valid yaml with the "name: value" format, + // but the matrix values are not formatted as arrays. To yaml, they are + // strings. + // + // As a quick solution, use the yaml Parser to read strings first. Then + // re-parse each row as a matrix. We only care about a few fields for now. + std::string string_S_rect, string_P_rect; + + wave::ConfigParser parser; + parser.addParam("S_rect_00", &string_S_rect); + parser.addParam("P_rect_00", &string_P_rect); + parser.load(input_dir + "/calib_cam_to_cam.txt"); + + const auto camera_P = matrixFromString<3, 4>(string_P_rect); + const auto image_dims = matrixFromString<2, 1>(string_S_rect); + dataset.camera.K = camera_P.leftCols<3>(); + dataset.camera.image_width = static_cast(image_dims.x()); + dataset.camera.image_height = static_cast(image_dims.y()); + + // Now read calib_imu_to_velo and calib_velo_to_cam the same way + std::string string_R_VI, string_T_VI, string_R_CV, string_T_CV; + parser = wave::ConfigParser{}; + parser.addParam("R", &string_R_VI); + parser.addParam("T", &string_T_VI); + parser.load(input_dir + "/calib_imu_to_velo.txt"); + + parser = wave::ConfigParser{}; + parser.addParam("R", &string_R_CV); + parser.addParam("T", &string_T_CV); + parser.load(input_dir + "/calib_velo_to_cam.txt"); + + const auto R_VI = matrixFromString<3, 3>(string_R_VI); + const auto R_CV = matrixFromString<3, 3>(string_R_CV); + const auto V_p_VI = matrixFromString<3, 1>(string_T_VI); + const auto C_p_CV = matrixFromString<3, 1>(string_T_CV); + + // Now calculate what we really want: calibration imu-to-cam. + Mat3 R_IC = (R_CV * R_VI).transpose(); + Vec3 C_p_CI = C_p_CV + R_CV * V_p_VI; + + dataset.R_IC.setFromMatrix(R_IC); + dataset.I_p_IC = R_IC * -C_p_CI; +} + +// Load landmarks into dataset +void loadLandmarks(const std::string &input_dir, VioDataset &dataset) { + std::ifstream landmarks_file{input_dir + "/landmarks.txt"}; + for (LandmarkId id; landmarks_file >> id;) { + auto landmark_pos = matrixFromStream<3, 1>(landmarks_file); + dataset.landmarks.emplace(id, landmark_pos); + } +} + +// Produce a filename the file with the format "0000000000.txt" +std::string paddedTxtFilename(int i) { + std::stringstream ss; + ss.fill('0'); + ss.width(10); + ss << i << ".txt"; + return ss.str(); +} + +// Load gps and imu measurements into dataset +void loadPoses(const std::string &input_dir, VioDataset &dataset) { + const auto timestamps_filename = input_dir + "/oxts/timestamps.txt"; + auto timestamps_file = std::ifstream{timestamps_filename}; + + if (!timestamps_file) { + throw std::runtime_error("Could not read " + timestamps_filename); + } + + const auto data_dir = input_dir + "/oxts/data"; + std::chrono::system_clock::time_point system_start_time, system_time_point; + const auto start_time = std::chrono::steady_clock::now(); // arbitrary + + for (int i = 0; readTimepointFromStream(timestamps_file, system_time_point); + ++i) { + // load the file with format "0000000000.txt" + const auto filename = data_dir + "/" + paddedTxtFilename(i); + auto data_file = std::ifstream{filename}; + OxtsData data; + data_file >> data; + if (!data_file) { + throw std::runtime_error("Could not read " + filename); + } + + // Use the first position as the datum, and first time as the reference + if (i == 0) { + dataset.lla_datum << data.lat, data.lon, data.alt; + system_start_time = system_time_point; + } + const auto time_point = + start_time + (system_time_point - system_start_time); + + + VioDataset::PoseValue pose; + pose.G_p_GI = + enuFromLla(dataset.lla_datum, Vec3{data.lat, data.lon, data.alt}); + pose.R_GI.setFromEulerXYZ(Vec3{data.roll, data.pitch, data.yaw}); + dataset.poses.emplace(time_point, VioDataset::PoseSensor::Pose, pose); + + VioDataset::ImuValue imu; + imu.I_vel_GI << data.vf, data.vl, data.vu; + imu.I_ang_vel_GI << data.wf, data.wl, data.wu; + dataset.imu_measurements.emplace( + time_point, VioDataset::ImuSensor::Imu, imu); + } +} + +// Load feature measurements into dataset +void loadObserved(const std::string &input_dir, VioDataset &dataset) { + const auto timestamps_filename = input_dir + "/image_00/timestamps.txt"; + auto timestamps_file = std::ifstream{timestamps_filename}; + + if (!timestamps_file) { + throw std::runtime_error("Could not read " + timestamps_filename); + } + + const auto data_dir = input_dir + "/image_00/features"; + std::chrono::system_clock::time_point system_start_time, system_time_point; + const auto start_time = std::chrono::steady_clock::now(); // arbitrary + + for (ImageNum i = 0; + readTimepointFromStream(timestamps_file, system_time_point); + ++i) { + if (i == 0) { + // Use the first time as the reference + system_start_time = system_time_point; + } + const auto time_point = + start_time + (system_time_point - system_start_time); + + // Load the file with format "0000000000.txt" + const auto filename = data_dir + "/" + paddedTxtFilename(i); + auto data_file = std::ifstream{filename}; + if (!data_file) { + throw std::runtime_error("Could not read " + filename); + } + + LandmarkId landmark_id; + Vec2 meas; + // Read each row + while (true) { + data_file >> landmark_id; + meas = matrixFromStream<2, 1>(data_file); + if (!data_file) { + break; // couldn't read another row + } + + dataset.feature_measurements.emplace( + time_point, VioDataset::Camera::Left, landmark_id, i, meas); + } + } +} + +} // namespace + +void VioDataset::outputToDirectory(const std::string &output_dir) const { + boost::filesystem::create_directories(output_dir); + + // Landmarks - not in kitti. Output using existing vo format + VoDataset vo; + vo.landmarks = this->landmarks; + vo.outputLandmarks(output_dir + "/landmarks.txt"); + + this->outputCalibration(output_dir); + this->outputPoses(output_dir + "/oxts"); + this->outputObserved(output_dir); +} + +// Output calib_cam_to_cam.txt +// Most of this is just placeholder values for now, since our dataset class +// doesn't hold any information about distortion or calibration +void VioDataset::outputCalibration(const std::string &output_dir) const { + std::ofstream cam_file{output_dir + "/calib_cam_to_cam.txt"}; + auto fmt = Eigen::IOFormat{7, Eigen::DontAlignCols, " ", " "}; + + // We ignore the calib_time field for now + cam_file << "calib_time: 01-Jan-2000 12:00:00\n"; + // We ignore the corner_dist field for now (it refers to checkerboard size) + cam_file << "corner_dist: 0.000000e+00\n"; + + // Write only a few ow the parameters for one camera, for now + // S = size of images + Vec2 camera_S{this->camera.image_width, this->camera.image_height}; + cam_file << "S_rect_00: " << camera_S.format(fmt) << std::endl; + + // P = projection matrix + // For the first camera, this is just K with a zero fourth column (for + // additional cameras, P would include the transformation from the first) + Eigen::Matrix camera_P; + camera_P << camera.K, Vec3::Zero(); + cam_file << "P_rect_00: " << camera_P.format(fmt) << std::endl; + // That's it for now. + + // We have calib cam-to-imu but need to output imu_to_velo and velo_to_cam. + // For simplicity, choose velo frame equal to imu frame. + std::ofstream imu_to_velo_file{output_dir + "/calib_imu_to_velo.txt"}; + imu_to_velo_file << "R: " << Mat3::Identity().format(fmt) << std::endl; + imu_to_velo_file << "T: " << Vec3::Zero().format(fmt) << std::endl; + + // Now velo-to-cam == imu-to-cam + Mat3 R_CI = this->R_IC.toRotationMatrix().inverse(); + Vec3 C_p_CI = R_CI * -this->I_p_IC; + std::ofstream velo_to_cam_file{output_dir + "/calib_velo_to_cam.txt"}; + velo_to_cam_file << "R: " << R_CI.format(fmt) << std::endl; + velo_to_cam_file << "T: " << C_p_CI.format(fmt) << std::endl; +} + +void VioDataset::outputPoses(const std::string &output_dir) const { + boost::filesystem::create_directories(output_dir + "/data"); + + writeTimestampsToFile( + this->poses, this->start_time, output_dir + "/timestamps.txt"); + + int i = 0; + auto imu_iter = this->imu_measurements.cbegin(); + auto pose_iter = this->poses.cbegin(); + + // Note we assume they are the same length. + while (imu_iter != this->imu_measurements.cend() && + pose_iter != this->poses.cend()) { + std::ofstream data_file{output_dir + "/data/" + paddedTxtFilename(i)}; + + // We only write to some of the values right now. The others are zero. + auto data = OxtsData{}; + + const PoseValue &v = pose_iter->value; + auto lla = llaFromEnu(this->lla_datum, v.G_p_GI); + data.lat = lla[0]; + data.lon = lla[1]; + data.alt = lla[2]; + + Vec3 euler = v.R_GI.toRotationMatrix().eulerAngles(0, 1, 2); + data.roll = euler[0]; + data.pitch = euler[1]; + data.yaw = euler[2]; + + ImuValue m = imu_iter->value; + data.vf = m.I_vel_GI[0]; + data.vl = m.I_vel_GI[1]; + data.vu = m.I_vel_GI[2]; + data.wf = m.I_ang_vel_GI[0]; + data.wl = m.I_ang_vel_GI[1]; + data.wu = m.I_ang_vel_GI[2]; + + data_file << std::setprecision(12) << data << std::endl; + + ++pose_iter; + ++imu_iter; + ++i; + } +} + +void VioDataset::outputObserved(const std::string &output_dir) const { + boost::filesystem::create_directories(output_dir + "/image_00/features"); + auto timestamps_file = + std::ofstream{output_dir + "/image_00/timestamps.txt"}; + + // Let the first measurement in the container occur at that start_time + // (with the current setup, we have to convert from steady_clock to + // system_clock values) + const auto steady_start_time = + this->feature_measurements.cbegin()->time_point; + + // Loop through all measurements, starting a new file for each new timestamp + int i = 0; + auto time_point = this->start_time; + std::ofstream data_file; + for (const auto &meas : this->feature_measurements) { + const auto new_time_point = + start_time + (meas.time_point - steady_start_time); + if (i == 0 || new_time_point != time_point) { + // Write a timestamp + time_point = new_time_point; + timestamps_file << formatTimestamp(time_point) << std::endl; + // Open the next features file + data_file = std::ofstream{output_dir + "/image_00/features/" + + paddedTxtFilename(i++)}; + } + + data_file << meas.landmark_id << " "; + data_file << meas.value.x() << " " << meas.value.y() << std::endl; + } +} + + +VioDataset VioDataset::loadFromDirectory(const std::string &input_dir) { + VioDataset dataset; + + loadCalibration(input_dir, dataset); + loadLandmarks(input_dir, dataset); + loadPoses(input_dir, dataset); + loadObserved(input_dir, dataset); + + return dataset; +} + +} // namespace wave diff --git a/wave_vision/src/dataset/VioDatasetGenerator.cpp b/wave_vision/src/dataset/VioDatasetGenerator.cpp index 763473f3..3f6ff4cf 100644 --- a/wave_vision/src/dataset/VioDatasetGenerator.cpp +++ b/wave_vision/src/dataset/VioDatasetGenerator.cpp @@ -29,7 +29,7 @@ VioDataset::PoseValue poseFromVoState(const VoInstant &state) { return pose; } -// Get a TimePoint given a start time and duration in seconds +// Get a TimePoint given a start time and duration in seconds TimePoint timePointAfterStartTime(TimePoint time_start, double dt_seconds) { const auto d = std::chrono::duration(dt_seconds); return time_start + std::chrono::duration_cast(d); @@ -95,6 +95,7 @@ void addImuStatesToDataset(TimePoint time_start, } // namespace + VioDataset VioDatasetGenerator::generate() { VioDataset dataset; @@ -108,18 +109,27 @@ VioDataset VioDatasetGenerator::generate() { // Handle measurements for (auto i = 0u; i < vo.states.size(); ++i) { - addVoStateToDataset(i, time_start, vo.states[i], dataset); + const auto time_point = + timePointAfterStartTime(time_start, vo.states[i].time); + + addVoStateToDataset(i, time_point, vo.states[i], dataset); // Calculate imu measurements - // (the last one is missing) - if (i > 0) { + // (use forward difference) + if (i + 1 < vo.states.size()) { + addImuStatesToDataset( + time_point, vo.states[i], vo.states[i + 1], dataset); + } else { + // for simplicity just copy the second-last imu reading as the last addImuStatesToDataset( - time_start, vo.states[i - 1], vo.states[i], dataset); + time_point, vo.states[i - 1], vo.states[i], dataset); } } // Handle calibration - dataset.camera_K = vo.camera_K; + dataset.camera.K = vo.camera_K; + dataset.camera.image_width = 2 * dataset.camera.cx(); + dataset.camera.image_height = 2 * dataset.camera.cy(); // In VoDatasetGenerator, the transformation from body to camera is just a // rotation from NWU to EDN diff --git a/wave_vision/src/dataset/VoDataset.cpp b/wave_vision/src/dataset/VoDataset.cpp index 0a422c8a..8c1123b5 100644 --- a/wave_vision/src/dataset/VoDataset.cpp +++ b/wave_vision/src/dataset/VoDataset.cpp @@ -227,12 +227,10 @@ VoDataset VoDatasetGenerator::generate() { // simulate synthetic VO dataset double dt = 0.01; - TwoWheelRobot2DModel robot{Vec3{0.0, 0.0, 0.0}}; + Vec3 pose2d{0.0, 0.0, 0.0}; + TwoWheelRobot2DModel robot{pose2d}; for (int i = 0; i < 300; i++) { - // update state - Vec3 pose2d = robot.update(u, dt); - // convert 2d pose to 3d pose (pose of Body in Global frame) auto G_p_GB = Vec3{pose2d.x(), pose2d.y(), 0}; auto q_GB = Quaternion{Eigen::AngleAxisd{pose2d.z(), Vec3::UnitZ()}}; @@ -245,14 +243,23 @@ VoDataset VoDatasetGenerator::generate() { Eigen::AngleAxisd(-M_PI_2, Vec3::UnitX()); Quaternion q_GC = q_GB * q_BC; - int retval = this->camera.observeLandmarks( - dt, dataset.landmarks, q_GC, G_p_GB, instant.features_observed); + // We want to force observations on the zeroth frame. + double force = (i == 0 ? 1.0 : 0); + + int retval = this->camera.observeLandmarks(dt + force, + dataset.landmarks, + q_GC, + G_p_GB, + instant.features_observed); if (retval == 0) { instant.time = i * dt; instant.robot_G_p_GB = G_p_GB; instant.robot_q_GB = q_GB; dataset.states.push_back(instant); } + + // update state + pose2d = robot.update(u, dt); } return dataset; diff --git a/wave_vision/tests/dataset_tests/vio_dataset_tests.cpp b/wave_vision/tests/dataset_tests/vio_dataset_tests.cpp index ebfab754..d904a9b1 100644 --- a/wave_vision/tests/dataset_tests/vio_dataset_tests.cpp +++ b/wave_vision/tests/dataset_tests/vio_dataset_tests.cpp @@ -6,6 +6,7 @@ namespace wave { const auto test_config_file = "tests/data/vo_test.yaml"; +const auto test_output_dir = "/tmp/dataset_test"; TEST(VioDataset, constructor) { VioDataset dataset{}; @@ -21,4 +22,70 @@ TEST(VioDataset, generate) { EXPECT_FALSE(dataset.poses.empty()); } +TEST(VioDataset, writeAndReadToFile) { + // To test both operations, we write to files, read them, and ensure the + // resulting dataset is the one we started with + const auto tol = 1e-4; // Required precision for Eigen's isApprox + + VioDatasetGenerator generator; + + remove_dir(test_output_dir); + generator.configure(test_config_file); + auto dataset = generator.generate(); + + // Write + dataset.outputToDirectory(test_output_dir); + + // Read + auto input = VioDataset::loadFromDirectory(test_output_dir); + + // Compare landmarks + for (const auto &l : dataset.landmarks) { + const auto id = l.first; + ASSERT_TRUE(input.landmarks.count(id)); + EXPECT_PRED3(VectorsNearPrec, l.second, input.landmarks[id], tol); + } + + // Compare calibration + EXPECT_PRED2(MatricesNear, dataset.camera.K, input.camera.K); + EXPECT_PRED2(VectorsNear, dataset.I_p_IC, input.I_p_IC); + EXPECT_PRED2(MatricesNear, + dataset.R_IC.toRotationMatrix(), + input.R_IC.toRotationMatrix()); + + // Compare poses + ASSERT_EQ(dataset.poses.size(), input.poses.size()); + for (auto it = dataset.poses.cbegin(), in_it = input.poses.cbegin(); + it != dataset.poses.cend(); + ++it, ++in_it) { + const auto &a = it->value; + const auto &b = in_it->value; + EXPECT_PRED3(VectorsNearPrec, a.G_p_GI, b.G_p_GI, tol); + EXPECT_PRED2( + MatricesNear, a.R_GI.toRotationMatrix(), b.R_GI.toRotationMatrix()); + } + + ASSERT_EQ(dataset.imu_measurements.size(), input.imu_measurements.size()); + for (auto it = dataset.imu_measurements.cbegin(), + in_it = input.imu_measurements.cbegin(); + it != dataset.imu_measurements.cend(); + ++it, ++in_it) { + const auto &a = it->value; + const auto &b = in_it->value; + EXPECT_PRED3(VectorsNearPrec, a.I_vel_GI, b.I_vel_GI, tol); + EXPECT_PRED3(VectorsNearPrec, a.I_ang_vel_GI, b.I_ang_vel_GI, tol); + } + + // Compare feature measurements + ASSERT_EQ(dataset.feature_measurements.size(), + input.feature_measurements.size()); + for (auto it = dataset.feature_measurements.cbegin(), + in_it = input.feature_measurements.cbegin(); + it != dataset.feature_measurements.cend(); + ++it, ++in_it) { + EXPECT_EQ(it->landmark_id, in_it->landmark_id); + EXPECT_PRED3(VectorsNearPrec, it->value, in_it->value, tol); + } +} + } // wave namespace