diff --git a/include/arrow_utils.hpp b/include/arrow_utils.hpp index a876fc2..f5d305f 100644 --- a/include/arrow_utils.hpp +++ b/include/arrow_utils.hpp @@ -2,11 +2,14 @@ #define ARROW_UTILS_HPP #include -#include -#include +#include +#include namespace tundradb { +arrow::Result> get_ids_from_table( + const std::shared_ptr& table); + // Initialize Arrow Compute module - should be called once at startup bool initialize_arrow_compute(); diff --git a/include/query.hpp b/include/query.hpp index 709c932..1a5b437 100644 --- a/include/query.hpp +++ b/include/query.hpp @@ -4,12 +4,18 @@ #include #include #include +#include +#include +#include +#include +#include #include #include #include #include #include +#include #include #include "node.hpp" @@ -1012,6 +1018,114 @@ class QueryResult { QueryExecutionStats stats_; }; +// Forward declarations +class NodeManager; +class SchemaRegistry; + +/** + * @brief Query execution state container + * + * Holds all state needed during query execution including tables, node IDs, + * schema mappings, and graph connections. Optimized for performance with + * LLVM containers and object pooling. + */ +struct QueryState { + SchemaRef from; + std::unordered_map> tables; + llvm::StringMap> ids; + std::unordered_map aliases; + + // Precomputed fully-qualified field names per alias (SchemaRef::value()) + llvm::StringMap> fq_field_names; + + // Field index optimization: replace string-based field lookups with integer + // indices + llvm::StringMap> + schema_field_indices; // "User" -> [0, 1, 2] + llvm::SmallDenseMap + field_id_to_name; // 0 -> "user.name" + llvm::StringMap field_name_to_index; // "user.name" -> 0 + std::atomic next_field_id{0}; // Global field ID counter + + llvm::StringMap< + llvm::DenseMap>> + connections; // outgoing + llvm::DenseMap> incoming; + + std::shared_ptr node_manager; + std::shared_ptr schema_registry; + std::vector traversals; + + // Temporal context for time-travel queries (nullptr = current version) + std::unique_ptr temporal_context; + + // Connection object pooling to avoid repeated allocations + class ConnectionPool { + private: + std::vector pool_; + size_t next_index_ = 0; + + public: + explicit ConnectionPool(size_t initial_size = 1000) : pool_(initial_size) {} + + GraphConnection& get() { + if (next_index_ >= pool_.size()) { + pool_.resize(pool_.size() * 2); // Grow pool if needed + } + return pool_[next_index_++]; + } + + void reset() { next_index_ = 0; } + size_t size() const { return next_index_; } + }; + + mutable ConnectionPool connection_pool_; + + // Simple inline methods + [[nodiscard]] llvm::DenseSet& get_ids(const SchemaRef& schema_ref) { + return ids[schema_ref.value()]; + } + + [[nodiscard]] const llvm::DenseSet& get_ids( + const SchemaRef& schema_ref) const { + // For const access, use find() to avoid returning temporary from lookup() + auto it = ids.find(schema_ref.value()); + if (it != ids.end()) { + return it->second; + } + // Return reference to static empty set for non-existent keys + static const llvm::DenseSet empty_set; + return empty_set; + } + + [[nodiscard]] bool has_outgoing(const SchemaRef& schema_ref, + int64_t node_id) const { + return connections.contains(schema_ref.value()) && + connections.at(schema_ref.value()).contains(node_id) && + !connections.at(schema_ref.value()).at(node_id).empty(); + } + + // Complex methods - implemented in query.cpp + void reserve_capacity(const Query& query); + + arrow::Result register_schema(const SchemaRef& schema_ref); + + arrow::Result resolve_schema(const SchemaRef& schema_ref) const; + + arrow::Result compute_fully_qualified_names( + const SchemaRef& schema_ref); + + arrow::Result compute_fully_qualified_names( + const SchemaRef& schema_ref, const std::string& resolved_schema); + + void remove_node(int64_t node_id, const SchemaRef& schema_ref); + + arrow::Result update_table(const std::shared_ptr& table, + const SchemaRef& schema_ref); + + std::string ToString() const; +}; + } // namespace tundradb #endif // QUERY_HPP diff --git a/include/utils.hpp b/include/utils.hpp index e245c25..8a13592 100644 --- a/include/utils.hpp +++ b/include/utils.hpp @@ -85,33 +85,6 @@ static arrow::Result> filter_table_by_id( return filtered_table.table(); } -static arrow::Result> get_ids_from_table( - std::shared_ptr table) { - log_debug("Extracting IDs from table with {} rows", table->num_rows()); - - auto id_idx = table->schema()->GetFieldIndex("id"); - if (id_idx == -1) { - log_error("Table does not have an 'id' column"); - return arrow::Status::Invalid("table does not have an 'id' column"); - } - - auto id_column = table->column(id_idx); - llvm::DenseSet result_ids; - result_ids.reserve(table->num_rows()); - - for (int chunk_idx = 0; chunk_idx < id_column->num_chunks(); chunk_idx++) { - auto chunk = std::static_pointer_cast( - id_column->chunk(chunk_idx)); - log_debug("Processing chunk {} with {} rows", chunk_idx, chunk->length()); - for (int i = 0; i < chunk->length(); i++) { - result_ids.insert(chunk->Value(i)); - } - } - - log_debug("Extracted {} unique IDs from table", result_ids.size()); - return result_ids; -} - static arrow::Result> create_table( const std::shared_ptr& schema, const std::vector>& nodes, size_t chunk_size, diff --git a/src/arrow_utils.cpp b/src/arrow_utils.cpp index 46c7a77..0f01397 100644 --- a/src/arrow_utils.cpp +++ b/src/arrow_utils.cpp @@ -1,11 +1,47 @@ #include "../include/arrow_utils.hpp" +#include +#include +#include +#include +#include +#include +#include +#include + #include #include "../include/logger.hpp" namespace tundradb { +arrow::Result> get_ids_from_table( + const std::shared_ptr& table) { + log_debug("Extracting IDs from table with {} rows", table->num_rows()); + + const auto id_idx = table->schema()->GetFieldIndex("id"); + if (id_idx == -1) { + log_error("Table does not have an 'id' column"); + return arrow::Status::Invalid("table does not have an 'id' column"); + } + + const auto id_column = table->column(id_idx); + llvm::DenseSet result_ids; + result_ids.reserve(table->num_rows()); + + for (int chunk_idx = 0; chunk_idx < id_column->num_chunks(); chunk_idx++) { + const auto chunk = std::static_pointer_cast( + id_column->chunk(chunk_idx)); + log_debug("Processing chunk {} with {} rows", chunk_idx, chunk->length()); + for (int i = 0; i < chunk->length(); i++) { + result_ids.insert(chunk->Value(i)); + } + } + + log_debug("Extracted {} unique IDs from table", result_ids.size()); + return result_ids; +} + // Initialize Arrow Compute module - should be called once at startup bool initialize_arrow_compute() { static bool initialized = false; diff --git a/src/core.cpp b/src/core.cpp index 2c1d9a3..37d86ff 100644 --- a/src/core.cpp +++ b/src/core.cpp @@ -420,243 +420,6 @@ std::set get_roots( return roots; } -struct QueryState { - SchemaRef from; - std::unordered_map> tables; - llvm::StringMap> ids; - std::unordered_map aliases; - // Precomputed fully-qualified field names per alias (SchemaRef::value()) - llvm::StringMap> fq_field_names; - - // Field index optimization: replace string-based field lookups with integer - // indices - llvm::StringMap> - schema_field_indices; // "User" -> [0, 1, 2], "Company -> [3,4,5]" - llvm::SmallDenseMap - field_id_to_name; // 0 -> "user.name" - llvm::StringMap field_name_to_index; // "user.name" -> 0 - std::atomic next_field_id{0}; // Global field ID counter - - llvm::StringMap< - llvm::DenseMap>> - connections; // outgoing - llvm::DenseMap> incoming; - - std::shared_ptr node_manager; - std::shared_ptr schema_registry; - std::vector traversals; - - // Temporal context for time-travel queries (nullptr = current version) - std::unique_ptr temporal_context; - - // Connection object pooling to avoid repeated allocations - class ConnectionPool { - private: - std::vector pool_; - size_t next_index_ = 0; - - public: - explicit ConnectionPool(size_t initial_size = 1000) : pool_(initial_size) {} - - GraphConnection& get() { - if (next_index_ >= pool_.size()) { - pool_.resize(pool_.size() * 2); // Grow pool if needed - } - return pool_[next_index_++]; - } - - void reset() { next_index_ = 0; } // Reset for reuse - size_t size() const { return next_index_; } - }; - - mutable ConnectionPool connection_pool_; // Mutable for const methods - - // Pre-size hash maps to avoid expensive resizing during query execution - void reserve_capacity(const Query& query) { - // Estimate schema count from FROM + TRAVERSE clauses - size_t estimated_schemas = 1; // FROM clause - for (const auto& clause : query.clauses()) { - if (clause->type() == Clause::Type::TRAVERSE) { - estimated_schemas += 2; // source + target schemas - } - } - - // Pre-size standard containers (LLVM containers don't support reserve) - tables.reserve(estimated_schemas); - aliases.reserve(estimated_schemas); - - // Estimate nodes per schema (conservative estimate) - size_t estimated_nodes_per_schema = 1000; - incoming.reserve(estimated_nodes_per_schema); - - // Pre-size field mappings - field_id_to_name.reserve(estimated_schemas * 8); // ~8 fields per schema - } - - arrow::Result resolve_schema(const SchemaRef& schema_ref) { - // todo we need to separate functions: assign alias , resolve - if (aliases.contains(schema_ref.value()) && schema_ref.is_declaration()) { - IF_DEBUG_ENABLED { - log_debug("duplicated schema alias '" + schema_ref.value() + - "' already assigned to '" + aliases[schema_ref.value()] + - "'"); - } - return aliases[schema_ref.value()]; - } - if (schema_ref.is_declaration()) { - aliases[schema_ref.value()] = schema_ref.schema(); - return schema_ref.schema(); - } - return aliases[schema_ref.value()]; - } - - // Precompute fully-qualified field names for source and target aliases - arrow::Result compute_fully_qualified_names( - const SchemaRef& schema_ref) { - const auto it = aliases.find(schema_ref.value()); - if (it == aliases.end()) { - return arrow::Status::KeyError("keyset does not contain alias '{}'", - schema_ref.value()); - } - return compute_fully_qualified_names(schema_ref, it->second); - } - - // Precompute fully-qualified field names for source and target aliases - arrow::Result compute_fully_qualified_names( - const SchemaRef& schema_ref, const std::string& resolved_schema) { - const std::string& alias = schema_ref.value(); - if (fq_field_names.contains(alias)) { - return false; - } - auto schema_res = schema_registry->get(resolved_schema); - if (!schema_res.ok()) { - return schema_res.status(); - } - const auto& schema = schema_res.ValueOrDie(); - std::vector names; - std::vector indices; - names.reserve(schema->num_fields()); - indices.reserve(schema->num_fields()); - - for (const auto& f : schema->fields()) { - std::string fq_name = alias + "." + f->name(); - int field_id = next_field_id.fetch_add(1); - names.emplace_back(fq_name); - indices.emplace_back(field_id); - field_id_to_name[field_id] = fq_name; - field_name_to_index[fq_name] = field_id; - } - - fq_field_names[alias] = std::move(names); - schema_field_indices[alias] = std::move(indices); - return true; - } - - const llvm::DenseSet& get_ids(const SchemaRef& schema_ref) { - return ids[schema_ref.value()]; - } - - // removes node_id and updates all connections and ids - void remove_node(int64_t node_id, const SchemaRef& schema_ref) { - ids[schema_ref.value()].erase(node_id); - } - - arrow::Result update_table(const std::shared_ptr& table, - const SchemaRef& schema_ref) { - this->tables[schema_ref.value()] = table; - auto ids_result = get_ids_from_table(table); - if (!ids_result.ok()) { - log_error("Failed to get IDs from table: {}", schema_ref.value()); - return ids_result.status(); - } - ids[schema_ref.value()] = ids_result.ValueOrDie(); - return true; - } - - bool has_outgoing(const SchemaRef& schema_ref, int64_t node_id) const { - return connections.contains(schema_ref.value()) && - connections.at(schema_ref.value()).contains(node_id) && - !connections.at(schema_ref.value()).at(node_id).empty(); - } - - std::string ToString() const { - std::stringstream ss; - ss << "QueryState {\n"; - ss << " From: " << from.toString() << "\n"; - - ss << " Tables (" << tables.size() << "):\n"; - for (const auto& [alias, table_ptr] : tables) { - if (table_ptr) { - ss << " - " << alias << ": " << table_ptr->num_rows() << " rows, " - << table_ptr->num_columns() << " columns\n"; - } else { - ss << " - " << alias << ": (nullptr)\n"; - } - } - - ss << " IDs (" << ids.size() << "):\n"; - for (const auto& [alias, id_set] : ids) { - ss << " - " << alias.str() << ": " << id_set.size() << " IDs\n"; - } - - ss << " Aliases (" << aliases.size() << "):\n"; - for (const auto& [alias, schema_name] : aliases) { - ss << " - " << alias << " -> " << schema_name << "\n"; - } - - ss << " Connections (Outgoing) (" << connections.size() - << " source nodes):"; - for (const auto& [from, conns] : connections) { - for (const auto& [from_id, conn_vec] : conns) { - ss << "from " << from.str() << ":" << from_id << ":\n"; - for (const auto& conn : conn_vec) { - ss << " - " << conn.target.value() << ":" << conn.target_id - << "\n"; - } - } - } - - ss << " Connections (Incoming) (" << incoming.size() << " target nodes):"; - int target_nodes_printed = 0; - for (const auto& [target_id, conns_vec] : incoming) { - if (target_nodes_printed >= 3 && - incoming.size() > 5) { // Limit nodes printed - ss << " ... and " << (incoming.size() - target_nodes_printed) - << " more target nodes ...\n"; - break; - } - ss << " - Target ID " << target_id << " (" << conns_vec.size() - << " incoming):"; - int conns_printed_for_target = 0; - for (const auto& conn : conns_vec) { - if (conns_printed_for_target >= 3 && - conns_vec.size() > 5) { // Limit connections per node - ss << " ... and " - << (conns_vec.size() - conns_printed_for_target) - << " more connections ...\n"; - break; - } - ss << " <- " << conn.source.value() << ":" << conn.source_id - << " (via '" << conn.edge_type << "')\n"; - conns_printed_for_target++; - } - target_nodes_printed++; - } - - ss << " Traversals (" << traversals.size() << "):\n"; - for (size_t i = 0; i < traversals.size(); ++i) { - const auto& trav = traversals[i]; - ss << " - [" << i << "]: " << trav.source().value() << " -[" - << trav.edge_type() << "]-> " << trav.target().value() << " (Type: " - << (trav.traverse_type() == TraverseType::Inner ? "Inner" : "Other") - << ")\n"; - } - - ss << "}"; - return ss.str(); - } -}; - arrow::Result> build_denormalized_schema( const QueryState& query_state) { IF_DEBUG_ENABLED { log_debug("Building schema for denormalized table"); } @@ -1311,9 +1074,9 @@ populate_rows_bfs(int64_t node_id, const SchemaRef& start_schema, while (size-- > 0) { auto item = queue.front(); queue.pop(); - auto item_schema = item.schema_ref.is_declaration() - ? item.schema_ref.schema() - : query_state.aliases.at(item.schema_ref.value()); + ARROW_ASSIGN_OR_RAISE(const auto item_schema, + query_state.resolve_schema(item.schema_ref)); + auto node = query_state.node_manager->get_node(item_schema, item.node_id) .ValueOrDie(); const auto& it_fq = @@ -1888,7 +1651,7 @@ arrow::Status prepare_query(Query& query, QueryState& query_state) { // Phase 1: Process FROM clause to populate aliases { ARROW_ASSIGN_OR_RAISE(auto from_schema, - query_state.resolve_schema(query.from())); + query_state.register_schema(query.from())); // FROM clause already processed in main query() function } @@ -1899,9 +1662,9 @@ arrow::Status prepare_query(Query& query, QueryState& query_state) { // Resolve schemas and populate aliases ARROW_ASSIGN_OR_RAISE(auto source_schema, - query_state.resolve_schema(traverse->source())); + query_state.register_schema(traverse->source())); ARROW_ASSIGN_OR_RAISE(auto target_schema, - query_state.resolve_schema(traverse->target())); + query_state.register_schema(traverse->target())); if (!traverse->source().is_declaration()) { traverse->mutable_source().set_schema(source_schema); @@ -1995,7 +1758,7 @@ arrow::Result> Database::query( query_state.from = query.from(); query_state.from.set_tag(compute_tag(query_state.from)); ARROW_ASSIGN_OR_RAISE(auto source_schema, - query_state.resolve_schema(query.from())); + query_state.register_schema(query.from())); if (!this->schema_registry_->exists(source_schema)) { log_error("schema '{}' doesn't exist", source_schema); return arrow::Status::KeyError("schema doesn't exit: {}", source_schema); @@ -2100,27 +1863,16 @@ arrow::Result> Database::query( // Tags and schemas are already set during preparation phase // Get resolved schemas using const resolve_schema (read-only) - auto source_schema = - traverse->source().is_declaration() - ? traverse->source().schema() - : query_state.aliases.at(traverse->source().value()); - auto target_schema = - traverse->target().is_declaration() - ? traverse->target().schema() - : query_state.aliases.at(traverse->target().value()); - + ARROW_ASSIGN_OR_RAISE(const auto source_schema, + query_state.resolve_schema(traverse->source())); + ARROW_ASSIGN_OR_RAISE(const auto target_schema, + query_state.resolve_schema(traverse->target())); // Fully-qualified field names should also be precomputed during // preparation - if (auto res = query_state.compute_fully_qualified_names( - traverse->source(), source_schema); - !res.ok()) { - return res.status(); - } - if (auto res = query_state.compute_fully_qualified_names( - traverse->target(), target_schema); - !res.ok()) { - return res.status(); - } + ARROW_RETURN_NOT_OK(query_state.compute_fully_qualified_names( + traverse->source(), source_schema)); + ARROW_RETURN_NOT_OK(query_state.compute_fully_qualified_names( + traverse->target(), target_schema)); std::vector> where_clauses; if (query.inline_where()) { @@ -2239,7 +1991,7 @@ arrow::Result> Database::query( } IF_DEBUG_ENABLED { log_debug("rebuild table for schema {}:{}", source.value(), - query_state.aliases[source.value()]); + query_state.resolve_schema(source)); } auto table_result = filter_table_by_id(query_state.tables[source.value()], diff --git a/src/query.cpp b/src/query.cpp index 43d68d6..ba3ce3e 100644 --- a/src/query.cpp +++ b/src/query.cpp @@ -1,7 +1,196 @@ #include "query.hpp" +#include +#include +#include +#include +#include + +#include "arrow_utils.hpp" +#include "logger.hpp" + namespace tundradb { +// QueryState method implementations + +void QueryState::reserve_capacity(const Query& query) { + // Estimate schema count from FROM + TRAVERSE clauses + size_t estimated_schemas = 1; // FROM clause + for (const auto& clause : query.clauses()) { + if (clause->type() == Clause::Type::TRAVERSE) { + estimated_schemas += 2; // source + target schemas + } + } + + // Pre-size standard containers (LLVM containers don't support reserve) + tables.reserve(estimated_schemas); + aliases.reserve(estimated_schemas); + + // Estimate nodes per schema (conservative estimate) + size_t estimated_nodes_per_schema = 1000; + incoming.reserve(estimated_nodes_per_schema); + + // Pre-size field mappings + field_id_to_name.reserve(estimated_schemas * 8); // ~8 fields per schema +} + +arrow::Result QueryState::register_schema( + const SchemaRef& schema_ref) { + if (aliases.contains(schema_ref.value()) && schema_ref.is_declaration()) { + IF_DEBUG_ENABLED { + log_debug("duplicated schema alias '{}' already assigned to '{}'", + schema_ref.value(), aliases.at(schema_ref.value())); + } + return aliases[schema_ref.value()]; + } + if (schema_ref.is_declaration()) { + aliases[schema_ref.value()] = schema_ref.schema(); + return schema_ref.schema(); + } + return aliases[schema_ref.value()]; +} + +arrow::Result QueryState::resolve_schema( + const SchemaRef& schema_ref) const { + if (schema_ref.is_declaration()) { + return schema_ref.schema(); + } + + if (!aliases.contains(schema_ref.value())) { + return arrow::Status::KeyError("no alias for '{}'", schema_ref.value()); + } + return aliases.at(schema_ref.value()); +} + +arrow::Result QueryState::compute_fully_qualified_names( + const SchemaRef& schema_ref) { + const auto it = aliases.find(schema_ref.value()); + if (it == aliases.end()) { + return arrow::Status::KeyError("keyset does not contain alias '{}'", + schema_ref.value()); + } + return compute_fully_qualified_names(schema_ref, it->second); +} + +arrow::Result QueryState::compute_fully_qualified_names( + const SchemaRef& schema_ref, const std::string& resolved_schema) { + const std::string& alias = schema_ref.value(); + if (fq_field_names.contains(alias)) { + return false; + } + auto schema_res = schema_registry->get(resolved_schema); + if (!schema_res.ok()) { + return schema_res.status(); + } + const auto& schema = schema_res.ValueOrDie(); + std::vector names; + std::vector indices; + names.reserve(schema->num_fields()); + indices.reserve(schema->num_fields()); + + for (const auto& f : schema->fields()) { + std::string fq_name = alias + "." + f->name(); + int field_id = next_field_id.fetch_add(1); + names.emplace_back(fq_name); + indices.emplace_back(field_id); + field_id_to_name[field_id] = fq_name; + field_name_to_index[fq_name] = field_id; + } + + fq_field_names[alias] = std::move(names); + schema_field_indices[alias] = std::move(indices); + return true; +} + +void QueryState::remove_node(int64_t node_id, const SchemaRef& schema_ref) { + ids[schema_ref.value()].erase(node_id); +} + +arrow::Result QueryState::update_table( + const std::shared_ptr& table, const SchemaRef& schema_ref) { + this->tables[schema_ref.value()] = table; + auto ids_result = get_ids_from_table(table); + if (!ids_result.ok()) { + log_error("Failed to get IDs from table: {}", schema_ref.value()); + return ids_result.status(); + } + ids[schema_ref.value()] = ids_result.ValueOrDie(); + return true; +} + +std::string QueryState::ToString() const { + std::stringstream ss; + ss << "QueryState {\n"; + ss << " From: " << from.toString() << "\n"; + + ss << " Tables (" << tables.size() << "):\n"; + for (const auto& [alias, table_ptr] : tables) { + if (table_ptr) { + ss << " - " << alias << ": " << table_ptr->num_rows() << " rows, " + << table_ptr->num_columns() << " columns\n"; + } else { + ss << " - " << alias << ": (nullptr)\n"; + } + } + + ss << " IDs (" << ids.size() << "):\n"; + for (const auto& [alias, id_set] : ids) { + ss << " - " << alias.str() << ": " << id_set.size() << " IDs\n"; + } + + ss << " Aliases (" << aliases.size() << "):\n"; + for (const auto& [alias, schema_name] : aliases) { + ss << " - " << alias << " -> " << schema_name << "\n"; + } + + ss << " Connections (Outgoing) (" << connections.size() << " source nodes):"; + for (const auto& [from, conns] : connections) { + for (const auto& [from_id, conn_vec] : conns) { + ss << "from " << from.str() << ":" << from_id << ":\n"; + for (const auto& conn : conn_vec) { + ss << " - " << conn.target.value() << ":" << conn.target_id << "\n"; + } + } + } + + ss << " Connections (Incoming) (" << incoming.size() << " target nodes):"; + int target_nodes_printed = 0; + for (const auto& [target_id, conns_vec] : incoming) { + if (target_nodes_printed >= 3 && incoming.size() > 5) { + ss << " ... and " << (incoming.size() - target_nodes_printed) + << " more target nodes ...\n"; + break; + } + ss << " - Target ID " << target_id << " (" << conns_vec.size() + << " incoming):"; + int conns_printed_for_target = 0; + for (const auto& conn : conns_vec) { + if (conns_printed_for_target >= 3 && conns_vec.size() > 5) { + ss << " ... and " + << (conns_vec.size() - conns_printed_for_target) + << " more connections ...\n"; + break; + } + ss << " <- " << conn.source.value() << ":" << conn.source_id + << " (via '" << conn.edge_type << "')\n"; + conns_printed_for_target++; + } + target_nodes_printed++; + } + + ss << " Traversals (" << traversals.size() << "):\n"; + for (size_t i = 0; i < traversals.size(); ++i) { + const auto& trav = traversals[i]; + ss << " - [" << i << "]: " << trav.source().value() << " -[" + << trav.edge_type() << "]-> " << trav.target().value() << " (Type: " + << (trav.traverse_type() == TraverseType::Inner ? "Inner" : "Other") + << ")\n"; + } + + ss << "}"; + return ss.str(); +} + // FieldRef implementation FieldRef FieldRef::from_string(const std::string& field_str) { const size_t dot_pos = field_str.find('.');