diff --git a/.gitmodules b/.gitmodules index 5b8b83f..5015624 100644 --- a/.gitmodules +++ b/.gitmodules @@ -4,3 +4,6 @@ [submodule "third_party/tomlplusplus"] path = third_party/tomlplusplus url = https://github.com/marzer/tomlplusplus.git +[submodule "third_party/nlohmann"] + path = third_party/nlohmann + url = https://github.com/nlohmann/json.git diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..c1dfc2a --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,58 @@ +# Logloader + +## Architecture + +- `LogLoader` orchestrates download (MAVSDK) + upload (all backends) +- `UploadBackend` base class with SQLite DB tracking +- `FlightReviewBackend` extends UploadBackend - HTTP multipart upload to flight review servers +- `RobotoBackend` extends UploadBackend - RobotoAI API + S3 upload with SigV4 signing +- Config at `~/.local/share/logloader/config.toml` +- Local server is always Flight Review (installed with ARK OS) + +## Build + +- `make` in this directory (cmake under the hood) +- C++20, `-Wall -Wextra -Werror -Wpedantic` +- MAVSDK is NOT installed on dev machines - it's an embedded target dependency + +## Pending Refactoring + +The following issues were identified during code review and should be addressed: + +### Bugs (fix first) + +1. **Data race on `_should_exit` in UploadBackend** (`UploadBackend.hpp:69`): `_should_exit` is a plain `bool` written from the main thread via `stop()` and read from the upload thread. Make it `std::atomic`. + +2. **Data race on `_loop_disabled` in LogLoader** (`LogLoader.hpp:65`): Same issue — plain `bool` accessed from multiple threads. Make it `std::atomic`. + +3. **`upload_pending_logs` returns early instead of continuing** (`LogLoader.cpp:367-375`): If one log entry has an empty UUID or missing filepath, the function `return`s, which kills the entire upload queue for that backend. Change `return` to `continue`. + +4. **Unsafe `getenv("HOME")`** (`main.cpp:21`): `getenv("HOME")` can return `nullptr` (e.g. systemd service). `std::string(nullptr)` is UB. Guard it. + +5. **Hardcoded `substr(8)` in RobotoBackend** (`RobotoBackend.cpp:118` and throughout): Assumes URL starts with exactly `https://`. Extract the host at construction time, similar to `FlightReviewBackend::sanitize_url_and_determine_protocol()`. + +6. **Missing space in log message** (`LogLoader.cpp:176`): `"Received " << size << "log entries"` — missing space before "log". + +### Restructure: Separate download tracking from upload backends + +**Problem:** `UploadBackend` currently owns download-related methods (`num_logs_to_download`, `get_next_log_to_download`, `update_download_status`, `add_log_entry`). Download is a single operation but is tracked redundantly in 3 separate databases. Every download-related call must be replicated across all backends: +```cpp +_local_server->add_log_entry(entry); +_remote_server->add_log_entry(entry); +if (_roboto_backend) _roboto_backend->add_log_entry(entry); +``` +This pattern repeats in `request_log_entries` and `download_next_log`. + +**Solution:** Move download tracking to `LogLoader` with a single `logs.db`. Each `UploadBackend` only tracks its own upload state. Remove `add_log_entry`, `update_download_status`, `num_logs_to_download`, `get_next_log_to_download` from `UploadBackend`. + +### Restructure: Simplify `upload_log` + +**Problem:** `UploadBackend::upload_log()` (`UploadBackend.cpp:177-252`) reverse-engineers the UUID from the filename by parsing out the id/date, reading file size from disk, fabricating a `mavsdk::LogFiles::Entry`, and hashing. This is fragile — if `fs::file_size()` differs from the original MAVSDK-reported `size_bytes`, the UUID won't match and the upload silently fails. + +**Solution:** The caller (`upload_pending_logs`) already has the UUID. Pass it through, or restructure so `upload_log` receives the UUID directly. The filename-parsing logic can be removed entirely. + +### Nice-to-have + +- **No HTTP timeouts in FlightReviewBackend**: httplib clients have no timeout. A hanging server blocks the upload thread forever. Add `set_read_timeout()`. +- **New SSL client per API call in RobotoBackend**: 5 TLS handshakes per upload. Consider reusing a single client across the upload flow. +- **Redundant settings storage**: Both `_settings` and inherited `_base_settings` store `logs_directory`, `db_path`, `upload_enabled`. Pick one place. diff --git a/CMakeLists.txt b/CMakeLists.txt index 6529639..673a625 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -28,11 +28,14 @@ find_package(MAVSDK REQUIRED) include_directories(third_party/cpp-httplib/) include_directories(third_party/tomlplusplus/) +include_directories(third_party/nlohmann/include/) include_directories(${SQLite3_INCLUDE_DIRS}) add_executable(${PROJECT_NAME} src/main.cpp - src/ServerInterface.cpp + src/UploadBackend.cpp + src/FlightReviewBackend.cpp + src/RobotoBackend.cpp src/LogLoader.cpp) target_link_libraries(${PROJECT_NAME} diff --git a/config.toml b/config.toml index ff208c2..9402a9f 100644 --- a/config.toml +++ b/config.toml @@ -4,3 +4,9 @@ remote_server = "https://review.px4.io" email = "" upload_enabled = false public_logs = false + +[roboto] +upload_enabled = false +api_url = "https://api.roboto.ai" +api_token = "" +device_id = "" diff --git a/src/FlightReviewBackend.cpp b/src/FlightReviewBackend.cpp new file mode 100644 index 0000000..299bc45 --- /dev/null +++ b/src/FlightReviewBackend.cpp @@ -0,0 +1,144 @@ +#include "FlightReviewBackend.hpp" +#include "Log.hpp" + +#include +#include +#include +#include +#define CPPHTTPLIB_OPENSSL_SUPPORT +#include + +namespace fs = std::filesystem; + +FlightReviewBackend::FlightReviewBackend(const FlightReviewBackend::Settings& settings) + : UploadBackend(settings.db_path, settings.upload_enabled) + , _settings(settings) +{ + sanitize_url_and_determine_protocol(); +} + +void FlightReviewBackend::sanitize_url_and_determine_protocol() +{ + std::string url = _settings.server_url; + std::string sanitized_url; + Protocol protocol; + + std::string http_prefix = "http://"; + std::string https_prefix = "https://"; + + size_t pos = std::string::npos; + + if ((pos = url.find(https_prefix)) != std::string::npos) { + sanitized_url = url.substr(pos + https_prefix.length()); + protocol = Protocol::Https; + + } else if ((pos = url.find(http_prefix)) != std::string::npos) { + sanitized_url = url.substr(pos + http_prefix.length()); + protocol = Protocol::Http; + + } else { + sanitized_url = url; + protocol = Protocol::Https; + } + + _settings.server_url = sanitized_url; + _protocol = protocol; +} + +FlightReviewBackend::UploadResult FlightReviewBackend::upload(const std::string& filepath) +{ + // Skip files that are in progress (have a .lock file) + if (fs::exists(filepath + ".lock")) { + return {false, 0, "File is locked (currently being downloaded)"}; + } + + // Skip files that don't exist + if (!fs::exists(filepath)) { + return {false, 404, "Log file does not exist: " + filepath}; + } + + // Skip files with size zero + if (fs::file_size(filepath) == 0) { + return {false, 0, "Skipping zero-size log file: " + filepath}; + } + + if (!server_reachable()) { + return {false, 0, "Server unreachable: " + _settings.server_url}; + } + + std::ifstream file(filepath, std::ios::binary); + + if (!file) { + return {false, 0, "Could not open file: " + filepath}; + } + + // Build multi-part form data + httplib::MultipartFormDataItems items = { + {"type", _settings.public_logs ? "flightreport" : "personal", "", ""}, // NOTE: backend logic is funky + {"description", "Uploaded by logloader", "", ""}, + {"feedback", "", "", ""}, + {"email", _settings.user_email, "", ""}, + {"source", "auto", "", ""}, + {"videoUrl", "", "", ""}, + {"rating", "", "", ""}, + {"windSpeed", "", "", ""}, + {"public", _settings.public_logs ? "true" : "false", "", ""}, + }; + + std::string content((std::istreambuf_iterator(file)), std::istreambuf_iterator()); + items.push_back({"filearg", content, filepath, "application/octet-stream"}); + + LOG("Uploading " << fs::path(filepath).filename().string() << " to " << _settings.server_url); + + // Post multi-part form + httplib::Result res; + + if (_protocol == Protocol::Https) { + httplib::SSLClient cli(_settings.server_url); + cli.set_connection_timeout(10); + cli.set_read_timeout(30); + res = cli.Post("/upload", items); + + } else { + httplib::Client cli(_settings.server_url); + cli.set_connection_timeout(10); + cli.set_read_timeout(30); + res = cli.Post("/upload", items); + } + + if (res && res->status == 302) { + return {true, 302, "Success: " + _settings.server_url + res->get_header_value("Location")}; + + } else if (res && res->status == 400) { + return {false, 400, "Bad Request - Will not retry"}; + + } else { + return {false, res ? res->status : 0, "Will retry later"}; + } +} + +bool FlightReviewBackend::server_reachable() +{ + httplib::Result res; + + if (_protocol == Protocol::Https) { + httplib::SSLClient cli(_settings.server_url); + cli.set_connection_timeout(10); + cli.set_read_timeout(30); + res = cli.Get("/"); + + } else { + httplib::Client cli(_settings.server_url); + cli.set_connection_timeout(10); + cli.set_read_timeout(30); + res = cli.Get("/"); + } + + bool success = res && res->status == 200; + + if (!success) { + LOG("Connection to " << _settings.server_url << " failed: " << (res ? std::to_string(res->status) : "No response")); + } + + return success; +} diff --git a/src/FlightReviewBackend.hpp b/src/FlightReviewBackend.hpp new file mode 100644 index 0000000..ebca0e5 --- /dev/null +++ b/src/FlightReviewBackend.hpp @@ -0,0 +1,32 @@ +#pragma once + +#include "UploadBackend.hpp" + +class FlightReviewBackend : public UploadBackend +{ +public: + struct Settings { + std::string server_url; + std::string user_email; + std::string db_path; + bool upload_enabled {}; + bool public_logs {}; + }; + + FlightReviewBackend(const Settings& settings); + +protected: + UploadResult upload(const std::string& filepath) override; + +private: + enum class Protocol { + Http, + Https + }; + + void sanitize_url_and_determine_protocol(); + bool server_reachable(); + + Settings _settings; + Protocol _protocol {Protocol::Https}; +}; diff --git a/src/LogLoader.cpp b/src/LogLoader.cpp index 357b0fe..c06ec1a 100644 --- a/src/LogLoader.cpp +++ b/src/LogLoader.cpp @@ -3,8 +3,8 @@ #include #include #include -#include #include +#include namespace fs = std::filesystem; @@ -19,34 +19,55 @@ LogLoader::LogLoader(const LogLoader::Settings& settings) _logs_directory = _settings.application_directory + "logs/"; - // Setup local server interface - ServerInterface::Settings local_server_settings = { + // Initialize downloads database + if (!init_downloads_db()) { + std::cerr << "Failed to initialize downloads database" << std::endl; + } + + // Setup local flight review backend + FlightReviewBackend::Settings local_server_settings = { .server_url = settings.local_server, .user_email = "", - .logs_directory = _logs_directory, .db_path = _settings.application_directory + "local_server.db", .upload_enabled = true, // Always upload to local server .public_logs = true, // Public required true for searching using Web UI }; - // Setup remote server interface - ServerInterface::Settings remote_server_settings = { + // Setup remote flight review backend + FlightReviewBackend::Settings remote_server_settings = { .server_url = settings.remote_server, .user_email = settings.email, - .logs_directory = _logs_directory, .db_path = _settings.application_directory + "remote_server.db", .upload_enabled = settings.upload_enabled, .public_logs = settings.public_logs, }; - _local_server = std::make_shared(local_server_settings); - _remote_server = std::make_shared(remote_server_settings); + _local_server = std::make_shared(local_server_settings); + _remote_server = std::make_shared(remote_server_settings); + + // Setup Roboto backend + if (!settings.roboto_api_url.empty() && !settings.roboto_api_token.empty()) { + RobotoBackend::Settings roboto_settings = { + .api_url = settings.roboto_api_url, + .api_token = settings.roboto_api_token, + .device_id = settings.roboto_device_id, + .db_path = _settings.application_directory + "roboto.db", + .upload_enabled = settings.roboto_upload_enabled, + }; + + _roboto_backend = std::make_shared(roboto_settings); + } std::cout << std::fixed << std::setprecision(8); fs::create_directories(_logs_directory); } +LogLoader::~LogLoader() +{ + close_downloads_db(); +} + void LogLoader::stop() { { @@ -56,6 +77,237 @@ void LogLoader::stop() _exit_cv.notify_all(); } +// --- Download tracking database --- + +bool LogLoader::init_downloads_db() +{ + std::string db_path = _settings.application_directory + "downloads.db"; + int rc = sqlite3_open(db_path.c_str(), &_downloads_db); + + if (rc != SQLITE_OK) { + std::cerr << "Cannot open downloads database: " << sqlite3_errmsg(_downloads_db) << std::endl; + sqlite3_close(_downloads_db); + _downloads_db = nullptr; + return false; + } + + const char* create_table = + "CREATE TABLE IF NOT EXISTS logs (" + " uuid TEXT PRIMARY KEY," + " id INTEGER," + " date TEXT," + " size_bytes INTEGER," + " downloaded INTEGER DEFAULT 0" + ");"; + + char* error_msg = nullptr; + rc = sqlite3_exec(_downloads_db, create_table, nullptr, nullptr, &error_msg); + + if (rc != SQLITE_OK) { + std::cerr << "SQL error creating downloads table: " << error_msg << std::endl; + sqlite3_free(error_msg); + return false; + } + + return true; +} + +void LogLoader::close_downloads_db() +{ + if (_downloads_db) { + sqlite3_close(_downloads_db); + _downloads_db = nullptr; + } +} + +std::string LogLoader::generate_uuid(const mavsdk::LogFiles::Entry& entry) +{ + // Create a unique identifier based on date and size + std::stringstream ss; + ss << entry.date << "_" << entry.size_bytes; + + // Use a simple hash for the UUID + std::hash hasher; + size_t hash = hasher(ss.str()); + + ss.str(""); + ss << std::hex << std::setw(16) << std::setfill('0') << hash; + return ss.str(); +} + +bool LogLoader::add_log_entry(const mavsdk::LogFiles::Entry& entry) +{ + std::string uuid = generate_uuid(entry); + + // Check if already exists + sqlite3_stmt* stmt; + std::string check_query = "SELECT COUNT(*) FROM logs WHERE uuid = ?"; + + if (sqlite3_prepare_v2(_downloads_db, check_query.c_str(), -1, &stmt, nullptr) != SQLITE_OK) { + std::cerr << "SQL error preparing add_log_entry check: " << sqlite3_errmsg(_downloads_db) << std::endl; + return false; + } + + sqlite3_bind_text(stmt, 1, uuid.c_str(), -1, SQLITE_STATIC); + + bool exists = false; + + if (sqlite3_step(stmt) == SQLITE_ROW) { + exists = sqlite3_column_int(stmt, 0) > 0; + } + + sqlite3_finalize(stmt); + + if (exists) { + return true; + } + + // Insert + std::string insert_query = + "INSERT INTO logs (uuid, id, date, size_bytes, downloaded) " + "VALUES (?, ?, ?, ?, 0)"; + + if (sqlite3_prepare_v2(_downloads_db, insert_query.c_str(), -1, &stmt, nullptr) != SQLITE_OK) { + std::cerr << "SQL error preparing add_log_entry insert: " << sqlite3_errmsg(_downloads_db) << std::endl; + return false; + } + + sqlite3_bind_text(stmt, 1, uuid.c_str(), -1, SQLITE_STATIC); + sqlite3_bind_int(stmt, 2, entry.id); + sqlite3_bind_text(stmt, 3, entry.date.c_str(), -1, SQLITE_STATIC); + sqlite3_bind_int(stmt, 4, entry.size_bytes); + + bool success = sqlite3_step(stmt) == SQLITE_DONE; + sqlite3_finalize(stmt); + + return success; +} + +bool LogLoader::update_download_status(const std::string& uuid, bool downloaded) +{ + std::string query = "UPDATE logs SET downloaded = ? WHERE uuid = ?"; + sqlite3_stmt* stmt; + + if (sqlite3_prepare_v2(_downloads_db, query.c_str(), -1, &stmt, nullptr) != SQLITE_OK) { + std::cerr << "SQL error preparing update_download_status: " << sqlite3_errmsg(_downloads_db) << std::endl; + return false; + } + + sqlite3_bind_int(stmt, 1, downloaded ? 1 : 0); + sqlite3_bind_text(stmt, 2, uuid.c_str(), -1, SQLITE_STATIC); + + bool success = sqlite3_step(stmt) == SQLITE_DONE; + sqlite3_finalize(stmt); + + return success; +} + +uint32_t LogLoader::num_logs_to_download() +{ + sqlite3_stmt* stmt; + std::string query = "SELECT COUNT(*) FROM logs WHERE downloaded = 0"; + + if (sqlite3_prepare_v2(_downloads_db, query.c_str(), -1, &stmt, nullptr) != SQLITE_OK) { + std::cerr << "SQL error preparing num_logs_to_download: " << sqlite3_errmsg(_downloads_db) << std::endl; + return 0; + } + + uint32_t count = 0; + + if (sqlite3_step(stmt) == SQLITE_ROW) { + count = sqlite3_column_int(stmt, 0); + } + + sqlite3_finalize(stmt); + return count; +} + +LogLoader::DownloadEntry LogLoader::get_next_log_to_download() +{ + DownloadEntry empty{}; + + sqlite3_stmt* stmt; + std::string query = + "SELECT uuid, id, date, size_bytes FROM logs " + "WHERE downloaded = 0 " + "ORDER BY date DESC, size_bytes DESC LIMIT 1"; + + if (sqlite3_prepare_v2(_downloads_db, query.c_str(), -1, &stmt, nullptr) != SQLITE_OK) { + std::cerr << "SQL error preparing get_next_log_to_download: " << sqlite3_errmsg(_downloads_db) << std::endl; + return empty; + } + + DownloadEntry entry{}; + + if (sqlite3_step(stmt) == SQLITE_ROW) { + const unsigned char* uuid_text = sqlite3_column_text(stmt, 0); + + if (uuid_text != nullptr) { + entry.uuid = reinterpret_cast(uuid_text); + } + + entry.id = sqlite3_column_int(stmt, 1); + + const unsigned char* date_text = sqlite3_column_text(stmt, 2); + + if (date_text != nullptr) { + entry.date = reinterpret_cast(date_text); + } + + entry.size_bytes = sqlite3_column_int(stmt, 3); + } + + sqlite3_finalize(stmt); + return entry; +} + +std::string LogLoader::filepath_from_entry(const mavsdk::LogFiles::Entry& entry) const +{ + std::ostringstream ss; + ss << _logs_directory << "LOG" << std::setfill('0') << std::setw(4) << entry.id << "_" << entry.date << ".ulg"; + return ss.str(); +} + +std::string LogLoader::filepath_from_uuid(const std::string& uuid) const +{ + sqlite3_stmt* stmt; + std::string query = "SELECT id, date FROM logs WHERE uuid = ?"; + + if (sqlite3_prepare_v2(_downloads_db, query.c_str(), -1, &stmt, nullptr) != SQLITE_OK) { + std::cerr << "SQL error preparing filepath_from_uuid: " << sqlite3_errmsg(_downloads_db) << std::endl; + return ""; + } + + sqlite3_bind_text(stmt, 1, uuid.c_str(), -1, SQLITE_STATIC); + + std::string filepath; + + if (sqlite3_step(stmt) == SQLITE_ROW) { + int id = sqlite3_column_int(stmt, 0); + const unsigned char* date_text = sqlite3_column_text(stmt, 1); + + if (date_text != nullptr) { + std::string date = reinterpret_cast(date_text); + std::ostringstream ss; + ss << _logs_directory << "LOG" << std::setfill('0') << std::setw(4) << id << "_" << date << ".ulg"; + filepath = ss.str(); + } + } + + sqlite3_finalize(stmt); + return filepath; +} + +void LogLoader::register_log_with_backends(const std::string& uuid) +{ + _local_server->register_log(uuid); + _remote_server->register_log(uuid); + + if (_roboto_backend) _roboto_backend->register_log(uuid); +} + +// --- MAVSDK connection --- + bool LogLoader::wait_for_mavsdk_connection(double timeout_ms) { LOG("Connecting to " << _settings.mavsdk_connection_url); @@ -84,6 +336,8 @@ bool LogLoader::wait_for_mavsdk_connection(double timeout_ms) return true; } +// --- Main loop --- + void LogLoader::run() { auto upload_thread = std::thread(&LogLoader::upload_logs_thread, this); @@ -99,6 +353,9 @@ void LogLoader::run() _loop_disabled = true; _remote_server->stop(); _local_server->stop(); + + if (_roboto_backend) _roboto_backend->stop(); + std::this_thread::sleep_for(std::chrono::seconds(1)); continue; @@ -106,6 +363,9 @@ void LogLoader::run() _loop_disabled = false; _remote_server->start(); _local_server->start(); + + if (_roboto_backend) _roboto_backend->start(); + // Stall for a few seconds to allow logger to finish writing std::this_thread::sleep_for(std::chrono::seconds(3)); } @@ -119,14 +379,14 @@ void LogLoader::run() continue; } - uint32_t total_to_download = _local_server->num_logs_to_download(); + uint32_t total_to_download = num_logs_to_download(); uint32_t num_remaining = total_to_download; while (!_should_exit && num_remaining) { // Download logs until we should exit or there are none left to download LOG("Downloading log " << total_to_download - num_remaining + 1 << "/" << total_to_download); download_next_log(); - num_remaining = _local_server->num_logs_to_download(); + num_remaining = num_logs_to_download(); } // Periodically request log list @@ -140,6 +400,8 @@ void LogLoader::run() upload_thread.join(); } +// --- Download --- + bool LogLoader::request_log_entries() { LOG_DEBUG("Requesting log entries..."); @@ -154,7 +416,7 @@ bool LogLoader::request_log_entries() auto request_end = std::chrono::high_resolution_clock::now(); std::chrono::duration request_duration = request_end - request_start; - LOG_DEBUG("Received " << _log_entries.size() << "log entries in " << request_duration.count() << " seconds"); + LOG_DEBUG("Received " << _log_entries.size() << " log entries in " << request_duration.count() << " seconds"); if (entries_result.first != mavsdk::LogFiles::Result::Success) { LOG("Error getting log entries"); @@ -165,14 +427,13 @@ bool LogLoader::request_log_entries() auto db_start = std::chrono::high_resolution_clock::now(); for (const auto& entry : _log_entries) { - _local_server->add_log_entry(entry); - _remote_server->add_log_entry(entry); + add_log_entry(entry); } auto db_end = std::chrono::high_resolution_clock::now(); std::chrono::duration db_duration = db_end - db_start; - LOG_DEBUG("Added log entries to databases in " << db_duration.count() << " seconds"); + LOG_DEBUG("Added log entries to database in " << db_duration.count() << " seconds"); LOG_DEBUG("Total processing time: " << (request_duration + db_duration).count() << " seconds"); return true; @@ -180,8 +441,7 @@ bool LogLoader::request_log_entries() void LogLoader::download_next_log() { - // Get one undownloaded log, use the local server for query - ServerInterface::DatabaseEntry db_entry = _local_server->get_next_log_to_download(); + DownloadEntry db_entry = get_next_log_to_download(); if (db_entry.uuid.empty()) { return; @@ -189,14 +449,12 @@ void LogLoader::download_next_log() // Find the corresponding log entry in the list from the vehicle for (const auto& entry : _log_entries) { - // Match by UUID (which is based on date and size) - std::string uuid = ServerInterface::generate_uuid(entry); + std::string uuid = generate_uuid(entry); if (uuid == db_entry.uuid) { if (download_log(entry)) { - // Update downloaded status in both databases - _local_server->update_download_status(uuid, true); - _remote_server->update_download_status(uuid, true); + update_download_status(uuid, true); + register_log_with_backends(uuid); } return; @@ -205,18 +463,15 @@ void LogLoader::download_next_log() // Couldn't find matching entry in _log_entries // This could happen if the log is no longer available on the vehicle - // Mark it as processed to avoid trying again in both databases - _local_server->update_download_status(db_entry.uuid, true); - _remote_server->update_download_status(db_entry.uuid, true); - - return; + // Mark it as downloaded to avoid trying again + update_download_status(db_entry.uuid, true); } bool LogLoader::download_log(const mavsdk::LogFiles::Entry& entry) { auto prom = std::promise {}; auto future_result = prom.get_future(); - auto download_path = _local_server->filepath_from_entry(entry); + auto download_path = filepath_from_entry(entry); // Check and delete file if it already exists. This can occur due to partial download. if (fs::exists(download_path)) { @@ -286,6 +541,8 @@ bool LogLoader::download_log(const mavsdk::LogFiles::Entry& entry) return success; } +// --- Upload --- + void LogLoader::upload_logs_thread() { while (!_should_exit) { @@ -294,7 +551,7 @@ void LogLoader::upload_logs_thread() continue; } - // Query the number of pending log uploads for both servers + // Query the number of pending log uploads for all backends uint32_t num_logs_local = _local_server->num_logs_to_upload(); uint32_t num_logs_remote = _remote_server->num_logs_to_upload(); @@ -310,6 +567,16 @@ void LogLoader::upload_logs_thread() upload_pending_logs(_remote_server); } + // Process uploads for Roboto + if (!_should_exit && _roboto_backend) { + uint32_t num_logs_roboto = _roboto_backend->num_logs_to_upload(); + + if (num_logs_roboto) { + LOG_DEBUG("Uploading " << num_logs_roboto << " logs to ROBOTO"); + upload_pending_logs(_roboto_backend); + } + } + if (!_should_exit) { std::unique_lock lock(_exit_cv_mutex); _exit_cv.wait_for(lock, std::chrono::seconds(10), [this] { return _should_exit.load(); }); @@ -319,27 +586,29 @@ void LogLoader::upload_logs_thread() LOG_DEBUG("upload_logs_thread exiting"); } -void LogLoader::upload_pending_logs(std::shared_ptr server) +void LogLoader::upload_pending_logs(std::shared_ptr backend) { - // Upload all pending logs for this server - while (!_should_exit && server->num_logs_to_upload()) { + // Upload all pending logs for this backend + while (!_should_exit && backend->num_logs_to_upload()) { // Get one log at a time to upload - ServerInterface::DatabaseEntry log_entry = server->get_next_log_to_upload(); + std::string uuid = backend->get_next_log_to_upload(); - if (log_entry.uuid.empty()) { + if (uuid.empty()) { LOG("Log with empty uuid!"); - return; + break; } - std::string filepath = server->filepath_from_uuid(log_entry.uuid); + std::string filepath = filepath_from_uuid(uuid); if (filepath.empty()) { - LOG("Could not determine file path for UUID: " << log_entry.uuid); - return; + LOG("Could not determine file path for UUID: " << uuid); + // Blacklist to prevent infinite retry + backend->add_to_blacklist(uuid, "Could not determine file path"); + continue; } - ServerInterface::UploadResult result = server->upload_log(filepath); + UploadBackend::UploadResult result = backend->upload_log(filepath, uuid); if (result.success) { LOG("Log upload SUCCESS: " << result.message); diff --git a/src/LogLoader.hpp b/src/LogLoader.hpp index 0f2df34..275e459 100644 --- a/src/LogLoader.hpp +++ b/src/LogLoader.hpp @@ -5,8 +5,10 @@ #include #include #include +#include -#include "ServerInterface.hpp" +#include "FlightReviewBackend.hpp" +#include "RobotoBackend.hpp" class LogLoader { @@ -19,15 +21,40 @@ class LogLoader std::string application_directory; bool upload_enabled; bool public_logs; + // Roboto settings + std::string roboto_api_url; + std::string roboto_api_token; + std::string roboto_device_id; + bool roboto_upload_enabled; }; LogLoader(const Settings& settings); + ~LogLoader(); void run(); void stop(); bool wait_for_mavsdk_connection(double timeout_ms); private: + // Download tracking (single database for all backends) + bool init_downloads_db(); + void close_downloads_db(); + static std::string generate_uuid(const mavsdk::LogFiles::Entry& entry); + bool add_log_entry(const mavsdk::LogFiles::Entry& entry); + bool update_download_status(const std::string& uuid, bool downloaded); + uint32_t num_logs_to_download(); + + struct DownloadEntry { + std::string uuid; + uint32_t id; + std::string date; + uint32_t size_bytes; + }; + + DownloadEntry get_next_log_to_download(); + std::string filepath_from_entry(const mavsdk::LogFiles::Entry& entry) const; + std::string filepath_from_uuid(const std::string& uuid) const; + // Download bool request_log_entries(); void download_next_log(); @@ -35,14 +62,21 @@ class LogLoader // Upload void upload_logs_thread(); - void upload_pending_logs(std::shared_ptr server); + void upload_pending_logs(std::shared_ptr backend); + + // Register a downloaded log with all upload backends + void register_log_with_backends(const std::string& uuid); Settings _settings; std::string _logs_directory; - // Server objects (each with its own database) - std::shared_ptr _local_server; - std::shared_ptr _remote_server; + // Downloads database + sqlite3* _downloads_db = nullptr; + + // Upload backends (each with its own database) + std::shared_ptr _local_server; + std::shared_ptr _remote_server; + std::shared_ptr _roboto_backend; std::shared_ptr _mavsdk; std::shared_ptr _telemetry; @@ -55,5 +89,5 @@ class LogLoader std::condition_variable _exit_cv; std::mutex _exit_cv_mutex; - bool _loop_disabled = false; + std::atomic _loop_disabled{false}; }; diff --git a/src/RobotoBackend.cpp b/src/RobotoBackend.cpp new file mode 100644 index 0000000..f759da6 --- /dev/null +++ b/src/RobotoBackend.cpp @@ -0,0 +1,495 @@ +#include "RobotoBackend.hpp" +#include "Log.hpp" + +#include +#include +#include +#include +#include +#include +#include + +#define CPPHTTPLIB_OPENSSL_SUPPORT +#include +#include + +#include +#include +#include + +namespace fs = std::filesystem; +using json = nlohmann::json; + +RobotoBackend::RobotoBackend(const RobotoBackend::Settings& settings) + : UploadBackend(settings.db_path, settings.upload_enabled) + , _settings(settings) +{ + // Strip scheme from API URL to get host for httplib + std::string url = _settings.api_url; + const std::string https_prefix = "https://"; + const std::string http_prefix = "http://"; + + if (url.find(https_prefix) == 0) { + _api_host = url.substr(https_prefix.size()); + + } else if (url.find(http_prefix) == 0) { + _api_host = url.substr(http_prefix.size()); + + } else { + _api_host = url; + } +} + +UploadBackend::UploadResult RobotoBackend::upload(const std::string& filepath) +{ + // Skip files that are in progress (have a .lock file) + if (fs::exists(filepath + ".lock")) { + return {false, 0, "File is locked (currently being downloaded)"}; + } + + // Skip files that don't exist + if (!fs::exists(filepath)) { + return {false, 404, "Log file does not exist: " + filepath}; + } + + // Skip files with size zero + auto file_size = fs::file_size(filepath); + + if (file_size == 0) { + return {false, 0, "Skipping zero-size log file: " + filepath}; + } + + std::string filename = fs::path(filepath).filename().string(); + + LOG("Uploading " << filename << " to Roboto"); + + // Step 1: Create dataset + DatasetInfo dataset; + + if (!create_dataset(filename, dataset)) { + return {false, 0, "Failed to create Roboto dataset"}; + } + + LOG_DEBUG("Created Roboto dataset: " << dataset.dataset_id); + + // Step 2: Begin upload transaction + UploadTransactionInfo txn; + + if (!begin_upload(dataset.dataset_id, filename, file_size, txn)) { + return {false, 0, "Failed to begin Roboto upload transaction"}; + } + + LOG_DEBUG("Upload transaction: " << txn.transaction_id); + + // Step 3: Get S3 credentials + S3Credentials creds; + + if (!get_credentials(txn.transaction_id, creds)) { + return {false, 0, "Failed to get Roboto upload credentials"}; + } + + LOG_DEBUG("Got S3 credentials for bucket: " << creds.bucket << " region: " << creds.region); + + // Step 4: Upload file to S3 + if (!upload_to_s3(filepath, creds, txn.s3_uri)) { + return {false, 0, "Failed to upload file to S3"}; + } + + LOG_DEBUG("File uploaded to S3"); + + // Step 5: Report progress + if (!report_progress(txn.transaction_id, txn.s3_uri)) { + return {false, 0, "Failed to report upload progress"}; + } + + // Step 6: Complete transaction + if (!complete_upload(txn.transaction_id)) { + return {false, 0, "Failed to complete upload transaction"}; + } + + LOG("Uploaded " << filename << " to Roboto dataset " << dataset.dataset_id); + return {true, 200, "Uploaded to Roboto dataset " + dataset.dataset_id}; +} + +bool RobotoBackend::create_dataset(const std::string& filename, DatasetInfo& out) +{ + json body; + body["name"] = filename; + body["tags"] = json::array({"auto-upload", "logloader"}); + + json metadata; + metadata["source"] = "ark-logloader"; + + if (!_settings.device_id.empty()) { + metadata["device_id"] = _settings.device_id; + body["device_id"] = _settings.device_id; + } + + body["metadata"] = metadata; + + httplib::SSLClient cli(_api_host); + cli.set_connection_timeout(10); + cli.set_read_timeout(30); + cli.set_bearer_token_auth(_settings.api_token); + + auto res = cli.Post("/v1/datasets", + body.dump(), + "application/json"); + + if (!res) { + LOG("Roboto API unreachable"); + return false; + } + + if (res->status == 401 || res->status == 403) { + LOG("Roboto API auth failed: " << res->status); + return false; + } + + if (res->status < 200 || res->status >= 300) { + LOG("Roboto create dataset failed: " << res->status << " " << res->body); + return false; + } + + try { + auto resp = json::parse(res->body); + out.dataset_id = resp["data"]["dataset_id"].get(); + return true; + + } catch (const json::exception& e) { + LOG("Failed to parse create dataset response: " << e.what()); + return false; + } +} + +bool RobotoBackend::begin_upload(const std::string& dataset_id, const std::string& filename, + size_t file_size, UploadTransactionInfo& out) +{ + json association; + association["association_id"] = dataset_id; + association["association_type"] = "dataset"; + + json manifest; + manifest[filename] = file_size; + + json body; + body["association"] = association; + body["origination"] = "ark-logloader"; + body["resource_manifest"] = manifest; + + httplib::SSLClient cli(_api_host); + cli.set_connection_timeout(10); + cli.set_read_timeout(30); + cli.set_bearer_token_auth(_settings.api_token); + + auto res = cli.Post("/v1/files/upload", + body.dump(), + "application/json"); + + if (!res || res->status < 200 || res->status >= 300) { + LOG("Roboto begin upload failed: " << (res ? std::to_string(res->status) + " " + res->body : "No response")); + return false; + } + + try { + auto resp = json::parse(res->body); + out.transaction_id = resp["data"]["transaction_id"].get(); + auto& mappings = resp["data"]["upload_mappings"]; + + if (mappings.contains(filename)) { + out.s3_uri = mappings[filename].get(); + + } else if (!mappings.empty()) { + // Take the first mapping + out.s3_uri = mappings.begin().value().get(); + + } else { + LOG("No upload mappings in response"); + return false; + } + + return true; + + } catch (const json::exception& e) { + LOG("Failed to parse begin upload response: " << e.what()); + return false; + } +} + +bool RobotoBackend::get_credentials(const std::string& transaction_id, S3Credentials& out) +{ + httplib::SSLClient cli(_api_host); + cli.set_connection_timeout(10); + cli.set_read_timeout(30); + cli.set_bearer_token_auth(_settings.api_token); + + auto res = cli.Get(("/v1/files/upload/" + transaction_id + "/credentials").c_str()); + + if (!res || res->status < 200 || res->status >= 300) { + LOG("Roboto get credentials failed: " << (res ? std::to_string(res->status) + " " + res->body : "No response")); + return false; + } + + try { + auto resp = json::parse(res->body); + auto& creds_list = resp["data"]; + + if (creds_list.empty()) { + LOG("No credentials in response"); + return false; + } + + auto& creds = creds_list[0]; + out.access_key_id = creds["access_key_id"].get(); + out.secret_access_key = creds["secret_access_key"].get(); + out.session_token = creds["session_token"].get(); + out.bucket = creds["bucket"].get(); + out.region = creds["region"].get(); + return true; + + } catch (const json::exception& e) { + LOG("Failed to parse credentials response: " << e.what()); + return false; + } +} + +bool RobotoBackend::upload_to_s3(const std::string& filepath, const S3Credentials& creds, + const std::string& s3_uri) +{ + // Parse S3 URI + std::string bucket, key; + + if (!parse_s3_uri(s3_uri, bucket, key)) { + LOG("Failed to parse S3 URI: " << s3_uri); + return false; + } + + // Read file + std::ifstream file(filepath, std::ios::binary); + + if (!file) { + LOG("Could not open file: " << filepath); + return false; + } + + std::string content((std::istreambuf_iterator(file)), std::istreambuf_iterator()); + + // Compute payload hash + std::string payload_hash = sha256_hex(content.c_str(), content.size()); + + // Get current UTC time + auto now = std::chrono::system_clock::now(); + auto now_t = std::chrono::system_clock::to_time_t(now); + std::tm utc_tm; + gmtime_r(&now_t, &utc_tm); + + char date_iso[17]; // YYYYMMDDTHHMMSSZ + strftime(date_iso, sizeof(date_iso), "%Y%m%dT%H%M%SZ", &utc_tm); + + char date_stamp[9]; // YYYYMMDD + strftime(date_stamp, sizeof(date_stamp), "%Y%m%d", &utc_tm); + + // Build SigV4 Authorization header + std::string auth_header = sign_s3_request( + "PUT", bucket, key, creds.region, creds, + payload_hash, date_iso, date_stamp); + + // Construct S3 endpoint host + std::string s3_host = bucket + ".s3." + creds.region + ".amazonaws.com"; + + httplib::SSLClient s3_cli(s3_host); + s3_cli.set_read_timeout(300); // 5 minutes for large files + + httplib::Headers headers = { + {"Authorization", auth_header}, + {"x-amz-date", date_iso}, + {"x-amz-content-sha256", payload_hash}, + {"x-amz-security-token", creds.session_token}, + {"Host", s3_host} + }; + + std::string path = "/" + key; + auto res = s3_cli.Put(path, headers, content, "application/octet-stream"); + + if (!res) { + LOG("S3 upload failed: no response"); + return false; + } + + if (res->status == 200 || res->status == 204) { + return true; + } + + LOG("S3 upload failed: " << res->status << " " << res->body); + return false; +} + +bool RobotoBackend::report_progress(const std::string& transaction_id, const std::string& s3_uri) +{ + json body; + body["manifest_items"] = json::array({s3_uri}); + + httplib::SSLClient cli(_api_host); + cli.set_connection_timeout(10); + cli.set_read_timeout(30); + cli.set_bearer_token_auth(_settings.api_token); + + auto res = cli.Put(("/v1/files/upload/" + transaction_id + "/progress").c_str(), + body.dump(), + "application/json"); + + if (!res || res->status < 200 || res->status >= 300) { + LOG("Roboto report progress failed: " << (res ? std::to_string(res->status) + " " + res->body : "No response")); + return false; + } + + return true; +} + +bool RobotoBackend::complete_upload(const std::string& transaction_id) +{ + httplib::SSLClient cli(_api_host); + cli.set_connection_timeout(10); + cli.set_read_timeout(30); + cli.set_bearer_token_auth(_settings.api_token); + + auto res = cli.Put(("/v1/files/upload/" + transaction_id + "/complete").c_str(), + "", + "application/json"); + + if (!res || res->status < 200 || res->status >= 300) { + LOG("Roboto complete upload failed: " << (res ? std::to_string(res->status) + " " + res->body : "No response")); + return false; + } + + return true; +} + +// --- S3 Helpers --- + +bool RobotoBackend::parse_s3_uri(const std::string& uri, std::string& bucket, std::string& key) +{ + const std::string prefix = "s3://"; + + if (uri.substr(0, prefix.size()) != prefix) { + return false; + } + + auto rest = uri.substr(prefix.size()); + auto slash = rest.find('/'); + + if (slash == std::string::npos) { + return false; + } + + bucket = rest.substr(0, slash); + key = rest.substr(slash + 1); + return true; +} + +std::string RobotoBackend::sign_s3_request(const std::string& method, + const std::string& bucket, + const std::string& key, + const std::string& region, + const S3Credentials& creds, + const std::string& payload_hash, + const std::string& date_iso, + const std::string& date_stamp) +{ + std::string s3_host = bucket + ".s3." + region + ".amazonaws.com"; + std::string canonical_uri = "/" + key; + std::string canonical_querystring; + + // Canonical headers (must be sorted) + std::string canonical_headers = + "host:" + s3_host + "\n" + "x-amz-content-sha256:" + payload_hash + "\n" + "x-amz-date:" + date_iso + "\n" + "x-amz-security-token:" + creds.session_token + "\n"; + + std::string signed_headers = "host;x-amz-content-sha256;x-amz-date;x-amz-security-token"; + + // Canonical request + std::string canonical_request = + method + "\n" + + canonical_uri + "\n" + + canonical_querystring + "\n" + + canonical_headers + "\n" + + signed_headers + "\n" + + payload_hash; + + // Credential scope + std::string credential_scope = date_stamp + std::string("/") + region + "/s3/aws4_request"; + + // String to sign + std::string string_to_sign = + "AWS4-HMAC-SHA256\n" + + date_iso + std::string("\n") + + credential_scope + "\n" + + sha256_hex(canonical_request); + + // Derive signing key + std::string k_date = hmac_sha256_raw("AWS4" + creds.secret_access_key, date_stamp); + std::string k_region = hmac_sha256_raw(k_date, region); + std::string k_service = hmac_sha256_raw(k_region, "s3"); + std::string k_signing = hmac_sha256_raw(k_service, "aws4_request"); + + // Compute signature + std::string signature = hmac_sha256_hex(k_signing, string_to_sign); + + // Build Authorization header + std::string authorization = + "AWS4-HMAC-SHA256 Credential=" + creds.access_key_id + "/" + credential_scope + + ", SignedHeaders=" + signed_headers + + ", Signature=" + signature; + + return authorization; +} + +// --- Crypto Helpers --- + +std::string RobotoBackend::sha256_hex(const std::string& data) +{ + return sha256_hex(data.c_str(), data.size()); +} + +std::string RobotoBackend::sha256_hex(const char* data, size_t len) +{ + unsigned char hash[SHA256_DIGEST_LENGTH]; + SHA256(reinterpret_cast(data), len, hash); + return to_hex(hash, SHA256_DIGEST_LENGTH); +} + +std::string RobotoBackend::hmac_sha256_raw(const std::string& key, const std::string& data) +{ + unsigned char result[EVP_MAX_MD_SIZE]; + unsigned int result_len = 0; + HMAC(EVP_sha256(), + key.c_str(), static_cast(key.size()), + reinterpret_cast(data.c_str()), data.size(), + result, &result_len); + return std::string(reinterpret_cast(result), result_len); +} + +std::string RobotoBackend::hmac_sha256_hex(const std::string& key, const std::string& data) +{ + unsigned char result[EVP_MAX_MD_SIZE]; + unsigned int result_len = 0; + HMAC(EVP_sha256(), + key.c_str(), static_cast(key.size()), + reinterpret_cast(data.c_str()), data.size(), + result, &result_len); + return to_hex(result, result_len); +} + +std::string RobotoBackend::to_hex(const unsigned char* data, size_t len) +{ + std::ostringstream ss; + ss << std::hex << std::setfill('0'); + + for (size_t i = 0; i < len; i++) { + ss << std::setw(2) << static_cast(data[i]); + } + + return ss.str(); +} diff --git a/src/RobotoBackend.hpp b/src/RobotoBackend.hpp new file mode 100644 index 0000000..bdd7b3f --- /dev/null +++ b/src/RobotoBackend.hpp @@ -0,0 +1,70 @@ +#pragma once + +#include "UploadBackend.hpp" +#include + +class RobotoBackend : public UploadBackend +{ +public: + struct Settings { + std::string api_url; // e.g., "https://api.roboto.ai" + std::string api_token; // Bearer token + std::string device_id; // Device ID for dataset metadata + std::string db_path; + bool upload_enabled {}; + }; + + RobotoBackend(const Settings& settings); + +protected: + UploadResult upload(const std::string& filepath) override; + +private: + struct DatasetInfo { + std::string dataset_id; + }; + + struct UploadTransactionInfo { + std::string transaction_id; + std::string s3_uri; + }; + + struct S3Credentials { + std::string access_key_id; + std::string secret_access_key; + std::string session_token; + std::string bucket; + std::string region; + }; + + // Roboto API steps + bool create_dataset(const std::string& filename, DatasetInfo& out); + bool begin_upload(const std::string& dataset_id, const std::string& filename, + size_t file_size, UploadTransactionInfo& out); + bool get_credentials(const std::string& transaction_id, S3Credentials& out); + bool upload_to_s3(const std::string& filepath, const S3Credentials& creds, + const std::string& s3_uri); + bool report_progress(const std::string& transaction_id, const std::string& s3_uri); + bool complete_upload(const std::string& transaction_id); + + // S3 helpers + static bool parse_s3_uri(const std::string& uri, std::string& bucket, std::string& key); + static std::string sign_s3_request(const std::string& method, + const std::string& bucket, + const std::string& key, + const std::string& region, + const S3Credentials& creds, + const std::string& payload_hash, + const std::string& date_iso, + const std::string& date_stamp); + + // Crypto helpers + static std::string sha256_hex(const std::string& data); + static std::string sha256_hex(const char* data, size_t len); + static std::string hmac_sha256_raw(const std::string& key, const std::string& data); + static std::string hmac_sha256_hex(const std::string& key, const std::string& data); + static std::string to_hex(const unsigned char* data, size_t len); + + Settings _settings; + std::string _api_host; // Host extracted from api_url (scheme stripped) +}; diff --git a/src/ServerInterface.cpp b/src/ServerInterface.cpp deleted file mode 100644 index 554893b..0000000 --- a/src/ServerInterface.cpp +++ /dev/null @@ -1,600 +0,0 @@ -#include "ServerInterface.hpp" -#include "Log.hpp" - -#include -#include -#include -#include -#include -#include -#include -#include -#define CPPHTTPLIB_OPENSSL_SUPPORT -#include - -namespace fs = std::filesystem; - -ServerInterface::ServerInterface(const ServerInterface::Settings& settings) - : _settings(settings) -{ - // Sanitize the URL to strip off the prefix - sanitize_url_and_determine_protocol(); - - // Initialize the database - if (!init_database()) { - std::cerr << "Failed to initialize database for server: " << _settings.server_url << std::endl; - } -} - -ServerInterface::~ServerInterface() -{ - close_database(); -} - -void ServerInterface::sanitize_url_and_determine_protocol() -{ - std::string url = _settings.server_url; - std::string sanitized_url; - Protocol protocol; - - std::string http_prefix = "http://"; - std::string https_prefix = "https://"; - - size_t pos = std::string::npos; - - if ((pos = url.find(https_prefix)) != std::string::npos) { - sanitized_url = url.substr(pos + https_prefix.length()); - protocol = Protocol::Https; - - } else if ((pos = url.find(http_prefix)) != std::string::npos) { - sanitized_url = url.substr(pos + http_prefix.length()); - protocol = Protocol::Http; - - } else { - sanitized_url = url; - protocol = Protocol::Https; - } - - _settings.server_url = sanitized_url; - _protocol = protocol; -} - -void ServerInterface::start() -{ - _should_exit = false; -} - -void ServerInterface::stop() -{ - _should_exit = true; -} - -std::string ServerInterface::generate_uuid(const mavsdk::LogFiles::Entry& entry) -{ - // Create a unique identifier based on date and size - std::stringstream ss; - ss << entry.date << "_" << entry.size_bytes; - - // Use a simple hash for the UUID - std::hash hasher; - size_t hash = hasher(ss.str()); - - ss.str(""); - ss << std::hex << std::setw(16) << std::setfill('0') << hash; - return ss.str(); -} - -bool ServerInterface::add_log_entry(const mavsdk::LogFiles::Entry& entry) -{ - std::string uuid = generate_uuid(entry); - - // Check if the log already exists - sqlite3_stmt* stmt; - std::string check_query = "SELECT COUNT(*) FROM logs WHERE uuid = ?"; - - if (sqlite3_prepare_v2(_db, check_query.c_str(), -1, &stmt, nullptr) != SQLITE_OK) { - std::cerr << "SQL error preparing add_log_entry check: " << sqlite3_errmsg(_db) << std::endl; - return false; - } - - sqlite3_bind_text(stmt, 1, uuid.c_str(), -1, SQLITE_STATIC); - - bool exists = false; - - if (sqlite3_step(stmt) == SQLITE_ROW) { - exists = sqlite3_column_int(stmt, 0) > 0; - } - - sqlite3_finalize(stmt); - - if (exists) { - return true; // Already exists, no need to add - } - - // Insert the log - std::string insert_query = - "INSERT INTO logs (uuid, id, date, size_bytes, downloaded, uploaded) " - "VALUES (?, ?, ?, ?, 0, 0)"; - - if (sqlite3_prepare_v2(_db, insert_query.c_str(), -1, &stmt, nullptr) != SQLITE_OK) { - std::cerr << "SQL error preparing add_log_entry insert: " << sqlite3_errmsg(_db) << std::endl; - return false; - } - - sqlite3_bind_text(stmt, 1, uuid.c_str(), -1, SQLITE_STATIC); - sqlite3_bind_int(stmt, 2, entry.id); - sqlite3_bind_text(stmt, 3, entry.date.c_str(), -1, SQLITE_STATIC); - sqlite3_bind_int(stmt, 4, entry.size_bytes); - - bool success = sqlite3_step(stmt) == SQLITE_DONE; - sqlite3_finalize(stmt); - - return success; -} - -bool ServerInterface::update_download_status(const std::string& uuid, bool downloaded) -{ - std::string query = "UPDATE logs SET downloaded = ? WHERE uuid = ?"; - sqlite3_stmt* stmt; - - if (sqlite3_prepare_v2(_db, query.c_str(), -1, &stmt, nullptr) != SQLITE_OK) { - std::cerr << "SQL error preparing update_download_status: " << sqlite3_errmsg(_db) << std::endl; - return false; - } - - sqlite3_bind_int(stmt, 1, downloaded ? 1 : 0); - sqlite3_bind_text(stmt, 2, uuid.c_str(), -1, SQLITE_STATIC); - - bool success = sqlite3_step(stmt) == SQLITE_DONE; - sqlite3_finalize(stmt); - - return success; -} - -uint32_t ServerInterface::num_logs_to_upload() -{ - if (!_settings.upload_enabled || _should_exit) { - return false; - } - - sqlite3_stmt* stmt; - std::string query = - "SELECT COUNT(*) FROM logs " - "WHERE downloaded = 1 AND uploaded = 0 " - "AND uuid NOT IN (SELECT uuid FROM blacklist)"; - - if (sqlite3_prepare_v2(_db, query.c_str(), -1, &stmt, nullptr) != SQLITE_OK) { - std::cerr << "SQL error preparing has_logs_to_upload: " << sqlite3_errmsg(_db) << std::endl; - return false; - } - - uint32_t log_count = 0; - - if (sqlite3_step(stmt) == SQLITE_ROW) { - log_count = sqlite3_column_int(stmt, 0); - } - - sqlite3_finalize(stmt); - return log_count; -} - -ServerInterface::DatabaseEntry ServerInterface::get_next_log_to_upload() -{ - DatabaseEntry empty_entry; - empty_entry.uuid = ""; // Empty UUID indicates not found - - if (!_settings.upload_enabled || _should_exit) { - return empty_entry; - } - - sqlite3_stmt* stmt; - std::string query = - "SELECT uuid, id, date, size_bytes, downloaded, uploaded FROM logs " - "WHERE downloaded = 1 AND uploaded = 0 " - "AND uuid NOT IN (SELECT uuid FROM blacklist) " - "ORDER BY date DESC, size_bytes DESC LIMIT 1"; - - if (sqlite3_prepare_v2(_db, query.c_str(), -1, &stmt, nullptr) != SQLITE_OK) { - std::cerr << "SQL error preparing get_next_log_to_upload: " << sqlite3_errmsg(_db) << std::endl; - return empty_entry; - } - - DatabaseEntry entry = empty_entry; - - if (sqlite3_step(stmt) == SQLITE_ROW) { - entry = row_to_db_entry(stmt); - } - - sqlite3_finalize(stmt); - return entry; -} - -ServerInterface::UploadResult ServerInterface::upload_log(const std::string& filepath) -{ - if (!_settings.upload_enabled || _should_exit) { - return {false, 0, "Upload disabled or shutting down"}; - } - - // Extract UUID from filename - std::string filename = fs::path(filepath).filename().string(); - std::string uuid; - - // Parse the ID and date from filename (assuming format like LOG0001_2023-04-15T12:34:56Z.ulg) - size_t underscore_pos = filename.find('_'); - size_t dot_pos = filename.find_last_of('.'); - - if (underscore_pos != std::string::npos && dot_pos != std::string::npos) { - std::string id_part = filename.substr(3, underscore_pos - 3); // Skip "LOG" prefix - std::string date_part = filename.substr(underscore_pos + 1, dot_pos - underscore_pos - 1); - - uint32_t id = std::stoi(id_part); - uint32_t size = fs::exists(filepath) ? fs::file_size(filepath) : 0; - - // Create a log entry and generate UUID - mavsdk::LogFiles::Entry entry; - entry.id = id; - entry.date = date_part; - entry.size_bytes = size; - - uuid = generate_uuid(entry); - - // Add to database if not already there - sqlite3_stmt* stmt; - std::string check_query = "SELECT COUNT(*) FROM logs WHERE uuid = ?"; - - if (sqlite3_prepare_v2(_db, check_query.c_str(), -1, &stmt, nullptr) == SQLITE_OK) { - sqlite3_bind_text(stmt, 1, uuid.c_str(), -1, SQLITE_STATIC); - - if (sqlite3_step(stmt) == SQLITE_ROW && sqlite3_column_int(stmt, 0) == 0) { - // Log doesn't exist, add it - add_log_entry(entry); - update_download_status(uuid, true); // Mark as downloaded since we have the file - } - - sqlite3_finalize(stmt); - } - } - - if (uuid.empty()) { - return {false, 0, "Could not determine UUID from filename"}; - } - - // Check if already blacklisted - if (is_blacklisted(uuid)) { - return {false, 400, "Log is blacklisted"}; - } - - // Perform the upload - UploadResult result = upload(filepath); - - // Update database with result - if (result.success) { - std::string query = "UPDATE logs SET uploaded = 1 WHERE uuid = ?"; - sqlite3_stmt* stmt; - - if (sqlite3_prepare_v2(_db, query.c_str(), -1, &stmt, nullptr) == SQLITE_OK) { - sqlite3_bind_text(stmt, 1, uuid.c_str(), -1, SQLITE_STATIC); - sqlite3_step(stmt); - sqlite3_finalize(stmt); - } - - } else if (result.status_code == 400) { - // Permanent failure - add to blacklist - add_to_blacklist(uuid, "HTTP 400: Bad Request"); - } - - return result; -} - -bool ServerInterface::is_blacklisted(const std::string& uuid) -{ - std::string query = "SELECT COUNT(*) FROM blacklist WHERE uuid = ?"; - sqlite3_stmt* stmt; - - if (sqlite3_prepare_v2(_db, query.c_str(), -1, &stmt, nullptr) != SQLITE_OK) { - std::cerr << "SQL error preparing is_blacklisted: " << sqlite3_errmsg(_db) << std::endl; - return false; - } - - sqlite3_bind_text(stmt, 1, uuid.c_str(), -1, SQLITE_STATIC); - - bool blacklisted = false; - - if (sqlite3_step(stmt) == SQLITE_ROW) { - blacklisted = sqlite3_column_int(stmt, 0) > 0; - } - - sqlite3_finalize(stmt); - return blacklisted; -} - -uint32_t ServerInterface::num_logs_to_download() -{ - sqlite3_stmt* stmt; - std::string query = - "SELECT COUNT(*) FROM logs " - "WHERE downloaded = 0"; - - if (sqlite3_prepare_v2(_db, query.c_str(), -1, &stmt, nullptr) != SQLITE_OK) { - std::cerr << "SQL error preparing num_logs_to_download: " << sqlite3_errmsg(_db) << std::endl; - return 0; - } - - uint32_t log_count = 0; - - if (sqlite3_step(stmt) == SQLITE_ROW) { - log_count = sqlite3_column_int(stmt, 0); - } - - sqlite3_finalize(stmt); - return log_count; -} - -ServerInterface::DatabaseEntry ServerInterface::get_next_log_to_download() -{ - DatabaseEntry empty_entry; - empty_entry.uuid = ""; // Empty UUID indicates not found - - sqlite3_stmt* stmt; - std::string query = - "SELECT uuid, id, date, size_bytes, downloaded, uploaded " - "FROM logs WHERE downloaded = 0 " - "ORDER BY date DESC, size_bytes DESC LIMIT 1"; - - if (sqlite3_prepare_v2(_db, query.c_str(), -1, &stmt, nullptr) != SQLITE_OK) { - std::cerr << "SQL error preparing get_next_log_to_download: " << sqlite3_errmsg(_db) << std::endl; - return empty_entry; - } - - DatabaseEntry entry = empty_entry; - - if (sqlite3_step(stmt) == SQLITE_ROW) { - entry = row_to_db_entry(stmt); - } - - sqlite3_finalize(stmt); - return entry; -} - -std::string ServerInterface::filepath_from_entry(const mavsdk::LogFiles::Entry& entry) const -{ - std::ostringstream ss; - ss << _settings.logs_directory << "LOG" << std::setfill('0') << std::setw(4) << entry.id << "_" << entry.date << ".ulg"; - return ss.str(); -} - -std::string ServerInterface::filepath_from_uuid(const std::string& uuid) const -{ - // Look up the log entry by UUID - sqlite3_stmt* stmt; - std::string query = - "SELECT id, date FROM logs WHERE uuid = ?"; - - if (sqlite3_prepare_v2(_db, query.c_str(), -1, &stmt, nullptr) != SQLITE_OK) { - std::cerr << "SQL error preparing filepath_from_uuid: " << sqlite3_errmsg(_db) << std::endl; - return ""; - } - - sqlite3_bind_text(stmt, 1, uuid.c_str(), -1, SQLITE_STATIC); - - std::string filepath; - - if (sqlite3_step(stmt) == SQLITE_ROW) { - int id = sqlite3_column_int(stmt, 0); - const unsigned char* date_text = sqlite3_column_text(stmt, 1); - - if (date_text != nullptr) { - std::string date = reinterpret_cast(date_text); - std::ostringstream ss; - ss << _settings.logs_directory << "LOG" << std::setfill('0') << std::setw(4) << id << "_" << date << ".ulg"; - filepath = ss.str(); - } - } - - sqlite3_finalize(stmt); - return filepath; -} - -ServerInterface::UploadResult ServerInterface::upload(const std::string& filepath) -{ - // Skip files that are in progress (have a .lock file) - if (fs::exists(filepath + ".lock")) { - return {false, 0, "File is locked (currently being downloaded)"}; - } - - // Skip files that don't exist - if (!fs::exists(filepath)) { - return {false, 404, "Log file does not exist: " + filepath}; - } - - // Skip files with size zero - if (fs::file_size(filepath) == 0) { - return {false, 0, "Skipping zero-size log file: " + filepath}; - } - - if (!server_reachable()) { - return {false, 0, "Server unreachable: " + _settings.server_url}; - } - - std::ifstream file(filepath, std::ios::binary); - - if (!file) { - return {false, 0, "Could not open file: " + filepath}; - } - - // Build multi-part form data - httplib::MultipartFormDataItems items = { - {"type", _settings.public_logs ? "flightreport" : "personal", "", ""}, // NOTE: backend logic is funky - {"description", "Uploaded by logloader", "", ""}, - {"feedback", "", "", ""}, - {"email", _settings.user_email, "", ""}, - {"source", "auto", "", ""}, - {"videoUrl", "", "", ""}, - {"rating", "", "", ""}, - {"windSpeed", "", "", ""}, - {"public", _settings.public_logs ? "true" : "false", "", ""}, - }; - - std::string content((std::istreambuf_iterator(file)), std::istreambuf_iterator()); - items.push_back({"filearg", content, filepath, "application/octet-stream"}); - - LOG("Uploading " << fs::path(filepath).filename().string() << " to " << _settings.server_url); - - // Post multi-part form - httplib::Result res; - - if (_protocol == Protocol::Https) { - httplib::SSLClient cli(_settings.server_url); - res = cli.Post("/upload", items); - - } else { - httplib::Client cli(_settings.server_url); - res = cli.Post("/upload", items); - } - - if (res && res->status == 302) { - return {true, 302, "Success: " + _settings.server_url + res->get_header_value("Location")}; - - } else if (res && res->status == 400) { - return {false, 400, "Bad Request - Will not retry"}; - - } else { - return {false, res ? res->status : 0, "Will retry later"}; - } -} - -bool ServerInterface::server_reachable() -{ - httplib::Result res; - - if (_protocol == Protocol::Https) { - httplib::SSLClient cli(_settings.server_url); - res = cli.Get("/"); - - } else { - httplib::Client cli(_settings.server_url); - res = cli.Get("/"); - } - - bool success = res && res->status == 200; - - if (!success) { - LOG("Connection to " << _settings.server_url << " failed: " << (res ? std::to_string(res->status) : "No response")); - } - - return success; -} - -bool ServerInterface::init_database() -{ - int rc = sqlite3_open(_settings.db_path.c_str(), &_db); - - if (rc != SQLITE_OK) { - std::cerr << "Cannot open database: " << sqlite3_errmsg(_db) << std::endl; - sqlite3_close(_db); - _db = nullptr; - return false; - } - - // Create logs table - const char* create_logs_table = - "CREATE TABLE IF NOT EXISTS logs (" - " uuid TEXT PRIMARY KEY," // UUID of the log - " id INTEGER," // Original log ID - " date TEXT," // ISO8601 date from log - " size_bytes INTEGER," // Size in bytes - " downloaded INTEGER DEFAULT 0," // Has it been downloaded - " uploaded INTEGER DEFAULT 0" // Has it been uploaded - ");"; - - // Create blacklist table - const char* create_blacklist_table = - "CREATE TABLE IF NOT EXISTS blacklist (" - " uuid TEXT PRIMARY KEY," // UUID of the log - " reason TEXT," // Reason for blacklisting - " timestamp TEXT" // When the log was blacklisted - ");"; - - bool success = execute_query(create_logs_table) && execute_query(create_blacklist_table); - return success; -} - -void ServerInterface::close_database() -{ - if (_db) { - sqlite3_close(_db); - _db = nullptr; - } -} - -bool ServerInterface::add_to_blacklist(const std::string& uuid, const std::string& reason) -{ - // Get current timestamp - auto now = std::chrono::system_clock::now(); - auto now_c = std::chrono::system_clock::to_time_t(now); - std::stringstream ss; - ss << std::put_time(std::localtime(&now_c), "%Y-%m-%d %H:%M:%S"); - std::string timestamp = ss.str(); - - // Add to blacklist - std::string query = "INSERT OR REPLACE INTO blacklist (uuid, reason, timestamp) VALUES (?, ?, ?)"; - sqlite3_stmt* stmt; - - if (sqlite3_prepare_v2(_db, query.c_str(), -1, &stmt, nullptr) != SQLITE_OK) { - std::cerr << "SQL error preparing add_to_blacklist: " << sqlite3_errmsg(_db) << std::endl; - return false; - } - - sqlite3_bind_text(stmt, 1, uuid.c_str(), -1, SQLITE_STATIC); - sqlite3_bind_text(stmt, 2, reason.c_str(), -1, SQLITE_STATIC); - sqlite3_bind_text(stmt, 3, timestamp.c_str(), -1, SQLITE_STATIC); - - bool success = sqlite3_step(stmt) == SQLITE_DONE; - sqlite3_finalize(stmt); - - return success; -} - -bool ServerInterface::execute_query(const std::string& query) -{ - char* error_msg = nullptr; - int rc = sqlite3_exec(_db, query.c_str(), nullptr, nullptr, &error_msg); - - if (rc != SQLITE_OK) { - std::cerr << "SQL error: " << error_msg << std::endl; - sqlite3_free(error_msg); - return false; - } - - return true; -} - -ServerInterface::DatabaseEntry ServerInterface::row_to_db_entry(sqlite3_stmt* stmt) -{ - DatabaseEntry entry; - - const unsigned char* uuid_text = sqlite3_column_text(stmt, 0); - - if (uuid_text != nullptr) { - entry.uuid = reinterpret_cast(uuid_text); - - } else { - entry.uuid = ""; - } - - entry.id = sqlite3_column_int(stmt, 1); - - const unsigned char* date_text = sqlite3_column_text(stmt, 2); - - if (date_text != nullptr) { - entry.date = reinterpret_cast(date_text); - - } else { - entry.date = ""; - } - - entry.size_bytes = sqlite3_column_int(stmt, 3); - entry.downloaded = sqlite3_column_int(stmt, 4) != 0; - - return entry; -} diff --git a/src/ServerInterface.hpp b/src/ServerInterface.hpp deleted file mode 100644 index ea47559..0000000 --- a/src/ServerInterface.hpp +++ /dev/null @@ -1,81 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -class ServerInterface -{ -public: - struct Settings { - std::string server_url; - std::string user_email; - std::string logs_directory; - std::string db_path; // Path to this server's database - bool upload_enabled {}; - bool public_logs {}; - }; - - struct UploadResult { - bool success; - int status_code; // HTTP status code, or 0 if not applicable - std::string message; - }; - - struct DatabaseEntry { - std::string uuid; - uint32_t id; - std::string date; - uint32_t size_bytes; - bool downloaded; - }; - - ServerInterface(const Settings& settings); - ~ServerInterface(); - - // Database initialization - bool init_database(); - void close_database(); - - // Log entry management - static std::string generate_uuid(const mavsdk::LogFiles::Entry& entry); - bool add_log_entry(const mavsdk::LogFiles::Entry& entry); - bool update_download_status(const std::string& uuid, bool downloaded); - uint32_t num_logs_to_download(); - - // Upload management - uint32_t num_logs_to_upload(); - DatabaseEntry get_next_log_to_upload(); - UploadResult upload_log(const std::string& filepath); - - // Query methods - bool is_blacklisted(const std::string& uuid); - DatabaseEntry get_next_log_to_download(); - - std::string filepath_from_entry(const mavsdk::LogFiles::Entry& entry) const ; - std::string filepath_from_uuid(const std::string& uuid) const; - - void start(); - void stop(); - -private: - enum class Protocol { - Http, - Https - }; - - void sanitize_url_and_determine_protocol(); - UploadResult upload(const std::string& filepath); - bool server_reachable(); - - // Database operations - bool execute_query(const std::string& query); - bool add_to_blacklist(const std::string& uuid, const std::string& reason); - DatabaseEntry row_to_db_entry(sqlite3_stmt* stmt); - - Settings _settings; - Protocol _protocol {Protocol::Https}; - bool _should_exit = false; - sqlite3* _db = nullptr; -}; diff --git a/src/UploadBackend.cpp b/src/UploadBackend.cpp new file mode 100644 index 0000000..ab53ee0 --- /dev/null +++ b/src/UploadBackend.cpp @@ -0,0 +1,272 @@ +#include "UploadBackend.hpp" +#include "Log.hpp" + +#include +#include +#include +#include +#include + +UploadBackend::UploadBackend(const std::string& db_path, bool upload_enabled) + : _db_path(db_path) + , _upload_enabled(upload_enabled) +{ + if (!init_database()) { + std::cerr << "Failed to initialize database: " << _db_path << std::endl; + } +} + +UploadBackend::~UploadBackend() +{ + close_database(); +} + +void UploadBackend::start() +{ + _should_exit = false; +} + +void UploadBackend::stop() +{ + _should_exit = true; +} + +void UploadBackend::register_log(const std::string& uuid) +{ + if (uuid.empty()) { + return; + } + + // Check if already exists + sqlite3_stmt* stmt; + std::string check_query = "SELECT COUNT(*) FROM logs WHERE uuid = ?"; + + if (sqlite3_prepare_v2(_db, check_query.c_str(), -1, &stmt, nullptr) != SQLITE_OK) { + std::cerr << "SQL error preparing register_log check: " << sqlite3_errmsg(_db) << std::endl; + return; + } + + sqlite3_bind_text(stmt, 1, uuid.c_str(), -1, SQLITE_STATIC); + + bool exists = false; + + if (sqlite3_step(stmt) == SQLITE_ROW) { + exists = sqlite3_column_int(stmt, 0) > 0; + } + + sqlite3_finalize(stmt); + + if (exists) { + return; + } + + // Insert + std::string insert_query = "INSERT INTO logs (uuid, uploaded) VALUES (?, 0)"; + + if (sqlite3_prepare_v2(_db, insert_query.c_str(), -1, &stmt, nullptr) != SQLITE_OK) { + std::cerr << "SQL error preparing register_log insert: " << sqlite3_errmsg(_db) << std::endl; + return; + } + + sqlite3_bind_text(stmt, 1, uuid.c_str(), -1, SQLITE_STATIC); + sqlite3_step(stmt); + sqlite3_finalize(stmt); +} + +uint32_t UploadBackend::num_logs_to_upload() +{ + if (!_upload_enabled || _should_exit) { + return 0; + } + + sqlite3_stmt* stmt; + std::string query = + "SELECT COUNT(*) FROM logs " + "WHERE uploaded = 0 " + "AND uuid NOT IN (SELECT uuid FROM blacklist)"; + + if (sqlite3_prepare_v2(_db, query.c_str(), -1, &stmt, nullptr) != SQLITE_OK) { + std::cerr << "SQL error preparing num_logs_to_upload: " << sqlite3_errmsg(_db) << std::endl; + return 0; + } + + uint32_t log_count = 0; + + if (sqlite3_step(stmt) == SQLITE_ROW) { + log_count = sqlite3_column_int(stmt, 0); + } + + sqlite3_finalize(stmt); + return log_count; +} + +std::string UploadBackend::get_next_log_to_upload() +{ + if (!_upload_enabled || _should_exit) { + return ""; + } + + sqlite3_stmt* stmt; + std::string query = + "SELECT uuid FROM logs " + "WHERE uploaded = 0 " + "AND uuid NOT IN (SELECT uuid FROM blacklist) " + "LIMIT 1"; + + if (sqlite3_prepare_v2(_db, query.c_str(), -1, &stmt, nullptr) != SQLITE_OK) { + std::cerr << "SQL error preparing get_next_log_to_upload: " << sqlite3_errmsg(_db) << std::endl; + return ""; + } + + std::string uuid; + + if (sqlite3_step(stmt) == SQLITE_ROW) { + const unsigned char* uuid_text = sqlite3_column_text(stmt, 0); + + if (uuid_text != nullptr) { + uuid = reinterpret_cast(uuid_text); + } + } + + sqlite3_finalize(stmt); + return uuid; +} + +UploadBackend::UploadResult UploadBackend::upload_log(const std::string& filepath, const std::string& uuid) +{ + if (!_upload_enabled || _should_exit) { + return {false, 0, "Upload disabled or shutting down"}; + } + + if (uuid.empty()) { + return {false, 0, "Empty UUID"}; + } + + // Check if already blacklisted + if (is_blacklisted(uuid)) { + return {false, 400, "Log is blacklisted"}; + } + + // Perform the upload (implemented by subclass) + UploadResult result = upload(filepath); + + // Update database with result + if (result.success) { + std::string query = "UPDATE logs SET uploaded = 1 WHERE uuid = ?"; + sqlite3_stmt* stmt; + + if (sqlite3_prepare_v2(_db, query.c_str(), -1, &stmt, nullptr) == SQLITE_OK) { + sqlite3_bind_text(stmt, 1, uuid.c_str(), -1, SQLITE_STATIC); + sqlite3_step(stmt); + sqlite3_finalize(stmt); + } + + } else if (result.status_code == 400) { + // Permanent failure - add to blacklist + add_to_blacklist(uuid, "Permanent failure: " + result.message); + } + + return result; +} + +bool UploadBackend::is_blacklisted(const std::string& uuid) +{ + std::string query = "SELECT COUNT(*) FROM blacklist WHERE uuid = ?"; + sqlite3_stmt* stmt; + + if (sqlite3_prepare_v2(_db, query.c_str(), -1, &stmt, nullptr) != SQLITE_OK) { + std::cerr << "SQL error preparing is_blacklisted: " << sqlite3_errmsg(_db) << std::endl; + return false; + } + + sqlite3_bind_text(stmt, 1, uuid.c_str(), -1, SQLITE_STATIC); + + bool blacklisted = false; + + if (sqlite3_step(stmt) == SQLITE_ROW) { + blacklisted = sqlite3_column_int(stmt, 0) > 0; + } + + sqlite3_finalize(stmt); + return blacklisted; +} + +bool UploadBackend::init_database() +{ + int rc = sqlite3_open(_db_path.c_str(), &_db); + + if (rc != SQLITE_OK) { + std::cerr << "Cannot open database: " << sqlite3_errmsg(_db) << std::endl; + sqlite3_close(_db); + _db = nullptr; + return false; + } + + // Upload tracking table — just uuid and uploaded status + const char* create_logs_table = + "CREATE TABLE IF NOT EXISTS logs (" + " uuid TEXT PRIMARY KEY," + " uploaded INTEGER DEFAULT 0" + ");"; + + // Blacklist table + const char* create_blacklist_table = + "CREATE TABLE IF NOT EXISTS blacklist (" + " uuid TEXT PRIMARY KEY," + " reason TEXT," + " timestamp TEXT" + ");"; + + bool success = execute_query(create_logs_table) && execute_query(create_blacklist_table); + return success; +} + +void UploadBackend::close_database() +{ + if (_db) { + sqlite3_close(_db); + _db = nullptr; + } +} + +bool UploadBackend::add_to_blacklist(const std::string& uuid, const std::string& reason) +{ + // Get current timestamp + auto now = std::chrono::system_clock::now(); + auto now_c = std::chrono::system_clock::to_time_t(now); + std::stringstream ss; + ss << std::put_time(std::localtime(&now_c), "%Y-%m-%d %H:%M:%S"); + std::string timestamp = ss.str(); + + // Add to blacklist + std::string query = "INSERT OR REPLACE INTO blacklist (uuid, reason, timestamp) VALUES (?, ?, ?)"; + sqlite3_stmt* stmt; + + if (sqlite3_prepare_v2(_db, query.c_str(), -1, &stmt, nullptr) != SQLITE_OK) { + std::cerr << "SQL error preparing add_to_blacklist: " << sqlite3_errmsg(_db) << std::endl; + return false; + } + + sqlite3_bind_text(stmt, 1, uuid.c_str(), -1, SQLITE_STATIC); + sqlite3_bind_text(stmt, 2, reason.c_str(), -1, SQLITE_STATIC); + sqlite3_bind_text(stmt, 3, timestamp.c_str(), -1, SQLITE_STATIC); + + bool success = sqlite3_step(stmt) == SQLITE_DONE; + sqlite3_finalize(stmt); + + return success; +} + +bool UploadBackend::execute_query(const std::string& query) +{ + char* error_msg = nullptr; + int rc = sqlite3_exec(_db, query.c_str(), nullptr, nullptr, &error_msg); + + if (rc != SQLITE_OK) { + std::cerr << "SQL error: " << error_msg << std::endl; + sqlite3_free(error_msg); + return false; + } + + return true; +} diff --git a/src/UploadBackend.hpp b/src/UploadBackend.hpp new file mode 100644 index 0000000..5507d2f --- /dev/null +++ b/src/UploadBackend.hpp @@ -0,0 +1,48 @@ +#pragma once + +#include +#include +#include +#include + +class UploadBackend +{ +public: + struct UploadResult { + bool success; + int status_code; // HTTP status code, or 0 if not applicable + std::string message; + }; + + UploadBackend(const std::string& db_path, bool upload_enabled); + virtual ~UploadBackend(); + + // Database initialization + bool init_database(); + void close_database(); + + // Upload management + void register_log(const std::string& uuid); + uint32_t num_logs_to_upload(); + std::string get_next_log_to_upload(); + UploadResult upload_log(const std::string& filepath, const std::string& uuid); + + // Query methods + bool is_blacklisted(const std::string& uuid); + bool add_to_blacklist(const std::string& uuid, const std::string& reason); + + void start(); + void stop(); + +protected: + // Subclasses implement this to perform the actual upload + virtual UploadResult upload(const std::string& filepath) = 0; + + // Database operations + bool execute_query(const std::string& query); + + std::string _db_path; + bool _upload_enabled {}; + std::atomic _should_exit{false}; + sqlite3* _db = nullptr; +}; diff --git a/src/main.cpp b/src/main.cpp index 6000d2b..1d7a29e 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -15,10 +15,19 @@ int main() signal(SIGTERM, signal_handler); setbuf(stdout, NULL); // Disable stdout buffering + const char* home_env = getenv("HOME"); + + if (home_env == nullptr) { + std::cerr << "Error: HOME environment variable is not set\n"; + return -1; + } + + std::string home(home_env); + toml::table config; try { - config = toml::parse_file(std::string(getenv("HOME")) + "/.local/share/logloader/config.toml"); + config = toml::parse_file(home + "/.local/share/logloader/config.toml"); } catch (const toml::parse_error& err) { std::cerr << "Parsing failed:\n" << err << "\n"; @@ -29,15 +38,22 @@ int main() return -1; } + // Parse Roboto settings from [roboto] table + auto roboto_table = config["roboto"]; + // Setup the LogLoader LogLoader::Settings settings = { .email = config["email"].value_or(""), .local_server = config["local_server"].value_or("http://127.0.0.1:5006"), .remote_server = config["remote_server"].value_or("https://logs.px4.io"), .mavsdk_connection_url = config["connection_url"].value_or("0.0.0"), - .application_directory = std::string(getenv("HOME")) + "/.local/share/logloader/", + .application_directory = home + "/.local/share/logloader/", .upload_enabled = config["upload_enabled"].value_or(false), - .public_logs = config["public_logs"].value_or(false) + .public_logs = config["public_logs"].value_or(false), + .roboto_api_url = roboto_table["api_url"].value_or(""), + .roboto_api_token = roboto_table["api_token"].value_or(""), + .roboto_device_id = roboto_table["device_id"].value_or(""), + .roboto_upload_enabled = roboto_table["upload_enabled"].value_or(false), }; _log_loader = std::make_shared(settings); diff --git a/third_party/nlohmann b/third_party/nlohmann new file mode 160000 index 0000000..f8eee1b --- /dev/null +++ b/third_party/nlohmann @@ -0,0 +1 @@ +Subproject commit f8eee1bb7953c6a4bff384d45052d5acc3d69698