diff --git a/sqlmesh/core/context.py b/sqlmesh/core/context.py index 9660243753..a8715adc4c 100644 --- a/sqlmesh/core/context.py +++ b/sqlmesh/core/context.py @@ -588,7 +588,7 @@ def state_sync(self) -> StateSync: if self._state_sync.get_versions(validate=False).schema_version == 0: self.console.log_status_update("Initializing new project state...") - self._state_sync.migrate(default_catalog=self.default_catalog) + self._state_sync.migrate() self._state_sync.get_versions() self._state_sync = CachingStateSync(self._state_sync) # type: ignore return self._state_sync @@ -2356,7 +2356,6 @@ def migrate(self) -> None: self._load_materializations() try: self._new_state_sync().migrate( - default_catalog=self.default_catalog, promoted_snapshots_only=self.config.migration.promoted_snapshots_only, ) except Exception as e: diff --git a/sqlmesh/core/state_sync/base.py b/sqlmesh/core/state_sync/base.py index a8f73b6937..4219472cb6 100644 --- a/sqlmesh/core/state_sync/base.py +++ b/sqlmesh/core/state_sync/base.py @@ -61,11 +61,14 @@ def _schema_version_validator(cls, v: t.Any) -> int: return 0 if v is None else int(v) +MIN_SCHEMA_VERSION = 60 +MIN_SQLMESH_VERSION = "0.134.0" MIGRATIONS = [ importlib.import_module(f"sqlmesh.migrations.{migration}") for migration in sorted(info.name for info in pkgutil.iter_modules(migrations.__path__)) ] -SCHEMA_VERSION: int = len(MIGRATIONS) +# -1 to account for the baseline script +SCHEMA_VERSION: int = MIN_SCHEMA_VERSION + len(MIGRATIONS) - 1 class PromotionResult(PydanticModel): @@ -469,7 +472,6 @@ def compact_intervals(self) -> None: @abc.abstractmethod def migrate( self, - default_catalog: t.Optional[str], skip_backup: bool = False, promoted_snapshots_only: bool = True, ) -> None: diff --git a/sqlmesh/core/state_sync/db/facade.py b/sqlmesh/core/state_sync/db/facade.py index 85bebcc5d6..93c4b87e9e 100644 --- a/sqlmesh/core/state_sync/db/facade.py +++ b/sqlmesh/core/state_sync/db/facade.py @@ -22,7 +22,6 @@ from pathlib import Path from datetime import datetime -from sqlglot import exp from sqlmesh.core.console import Console, get_console from sqlmesh.core.engine_adapter import EngineAdapter @@ -90,7 +89,6 @@ def __init__( console: t.Optional[Console] = None, cache_dir: Path = Path(), ): - self.plan_dags_table = exp.table_("_plan_dags", db=schema) self.interval_state = IntervalState(engine_adapter, schema=schema) self.environment_state = EnvironmentState(engine_adapter, schema=schema) self.snapshot_state = SnapshotState(engine_adapter, schema=schema, cache_dir=cache_dir) @@ -101,7 +99,6 @@ def __init__( snapshot_state=self.snapshot_state, environment_state=self.environment_state, interval_state=self.interval_state, - plan_dags_table=self.plan_dags_table, console=console, ) # Make sure that if an empty string is provided that we treat it as None @@ -308,7 +305,6 @@ def remove_state(self, including_backup: bool = False) -> None: self.environment_state.environments_table, self.environment_state.environment_statements_table, self.interval_state.intervals_table, - self.plan_dags_table, self.version_state.versions_table, ): self.engine_adapter.drop_table(table) @@ -453,14 +449,12 @@ def close(self) -> None: @transactional() def migrate( self, - default_catalog: t.Optional[str], skip_backup: bool = False, promoted_snapshots_only: bool = True, ) -> None: """Migrate the state sync to the latest SQLMesh / SQLGlot version.""" self.migrator.migrate( self, - default_catalog, skip_backup=skip_backup, promoted_snapshots_only=promoted_snapshots_only, ) diff --git a/sqlmesh/core/state_sync/db/migrator.py b/sqlmesh/core/state_sync/db/migrator.py index 616bd8659f..b803a5cc40 100644 --- a/sqlmesh/core/state_sync/db/migrator.py +++ b/sqlmesh/core/state_sync/db/migrator.py @@ -27,6 +27,8 @@ ) from sqlmesh.core.state_sync.base import ( MIGRATIONS, + MIN_SCHEMA_VERSION, + MIN_SQLMESH_VERSION, ) from sqlmesh.core.state_sync.base import StateSync from sqlmesh.core.state_sync.db.environment import EnvironmentState @@ -41,7 +43,7 @@ from sqlmesh.utils import major_minor from sqlmesh.utils.dag import DAG from sqlmesh.utils.date import now_timestamp -from sqlmesh.utils.errors import SQLMeshError +from sqlmesh.utils.errors import SQLMeshError, StateMigrationError logger = logging.getLogger(__name__) @@ -61,7 +63,6 @@ def __init__( snapshot_state: SnapshotState, environment_state: EnvironmentState, interval_state: IntervalState, - plan_dags_table: TableName, console: t.Optional[Console] = None, ): self.engine_adapter = engine_adapter @@ -70,7 +71,6 @@ def __init__( self.snapshot_state = snapshot_state self.environment_state = environment_state self.interval_state = interval_state - self.plan_dags_table = plan_dags_table self._state_tables = [ self.snapshot_state.snapshots_table, @@ -79,7 +79,6 @@ def __init__( ] self._optional_state_tables = [ self.interval_state.intervals_table, - self.plan_dags_table, self.snapshot_state.auto_restatements_table, self.environment_state.environment_statements_table, ] @@ -87,7 +86,6 @@ def __init__( def migrate( self, state_sync: StateSync, - default_catalog: t.Optional[str], skip_backup: bool = False, promoted_snapshots_only: bool = True, ) -> None: @@ -96,15 +94,13 @@ def migrate( migration_start_ts = time.perf_counter() try: - migrate_rows = self._apply_migrations(state_sync, default_catalog, skip_backup) + migrate_rows = self._apply_migrations(state_sync, skip_backup) if not migrate_rows and major_minor(SQLMESH_VERSION) == versions.minor_sqlmesh_version: return if migrate_rows: self._migrate_rows(promoted_snapshots_only) - # Cleanup plan DAGs since we currently don't migrate snapshot records that are in there. - self.engine_adapter.delete_from(self.plan_dags_table, "TRUE") self.version_state.update_versions() analytics.collector.on_migration_end( @@ -126,6 +122,8 @@ def migrate( ) self.console.log_migration_status(success=False) + if isinstance(e, StateMigrationError): + raise raise SQLMeshError("SQLMesh migration failed.") from e self.console.log_migration_status() @@ -156,11 +154,20 @@ def rollback(self) -> None: def _apply_migrations( self, state_sync: StateSync, - default_catalog: t.Optional[str], skip_backup: bool, ) -> bool: versions = self.version_state.get_versions() - migrations = MIGRATIONS[versions.schema_version :] + first_script_index = 0 + if versions.schema_version and versions.schema_version < MIN_SCHEMA_VERSION: + raise StateMigrationError( + "The current state belongs to an old version of SQLMesh that is no longer supported. " + f"Please upgrade to {MIN_SQLMESH_VERSION} first before upgrading to {SQLMESH_VERSION}." + ) + elif versions.schema_version > 0: + # -1 to skip the baseline migration script + first_script_index = versions.schema_version - (MIN_SCHEMA_VERSION - 1) + + migrations = MIGRATIONS[first_script_index:] should_backup = any( [ migrations, @@ -177,10 +184,10 @@ def _apply_migrations( for migration in migrations: logger.info(f"Applying migration {migration}") - migration.migrate_schemas(state_sync, default_catalog=default_catalog) + migration.migrate_schemas(state_sync) if state_table_exist: # No need to run DML for the initial migration since all tables are empty - migration.migrate_rows(state_sync, default_catalog=default_catalog) + migration.migrate_rows(state_sync) snapshot_count_after = self.snapshot_state.count() diff --git a/sqlmesh/migrations/v0056_restore_table_indexes.py b/sqlmesh/migrations/v0000_baseline.py similarity index 52% rename from sqlmesh/migrations/v0056_restore_table_indexes.py rename to sqlmesh/migrations/v0000_baseline.py index b460c1ebf7..4891900a76 100644 --- a/sqlmesh/migrations/v0056_restore_table_indexes.py +++ b/sqlmesh/migrations/v0000_baseline.py @@ -1,31 +1,27 @@ -"""Readds indexes and primary keys in case tables were restored from a backup.""" +"""The baseline migration script that sets up the initial state tables.""" from sqlglot import exp -from sqlmesh.utils import random_id -from sqlmesh.utils.migration import index_text_type -from sqlmesh.utils.migration import blob_text_type +from sqlmesh.utils.migration import blob_text_type, index_text_type def migrate_schemas(state_sync, **kwargs): # type: ignore schema = state_sync.schema engine_adapter = state_sync.engine_adapter - if not engine_adapter.SUPPORTS_INDEXES: - return intervals_table = "_intervals" snapshots_table = "_snapshots" environments_table = "_environments" + versions_table = "_versions" if state_sync.schema: + engine_adapter.create_schema(schema) intervals_table = f"{schema}.{intervals_table}" snapshots_table = f"{schema}.{snapshots_table}" environments_table = f"{schema}.{environments_table}" - - table_suffix = random_id(short=True) + versions_table = f"{schema}.{versions_table}" index_type = index_text_type(engine_adapter.dialect) blob_type = blob_text_type(engine_adapter.dialect) - new_snapshots_table = f"{snapshots_table}__{table_suffix}" snapshots_columns_to_types = { "name": exp.DataType.build(index_type), "identifier": exp.DataType.build(index_type), @@ -38,7 +34,6 @@ def migrate_schemas(state_sync, **kwargs): # type: ignore "unrestorable": exp.DataType.build("boolean"), } - new_environments_table = f"{environments_table}__{table_suffix}" environments_columns_to_types = { "name": exp.DataType.build(index_type), "snapshots": exp.DataType.build(blob_type), @@ -53,9 +48,9 @@ def migrate_schemas(state_sync, **kwargs): # type: ignore "catalog_name_override": exp.DataType.build("text"), "previous_finalized_snapshots": exp.DataType.build(blob_type), "normalize_name": exp.DataType.build("boolean"), + "requirements": exp.DataType.build(blob_type), } - new_intervals_table = f"{intervals_table}__{table_suffix}" intervals_columns_to_types = { "id": exp.DataType.build(index_type), "created_ts": exp.DataType.build("bigint"), @@ -69,53 +64,34 @@ def migrate_schemas(state_sync, **kwargs): # type: ignore "is_compacted": exp.DataType.build("boolean"), } - # Recreate the snapshots table and its indexes. - engine_adapter.create_table( - new_snapshots_table, snapshots_columns_to_types, primary_key=("name", "identifier") - ) - engine_adapter.create_index( - new_snapshots_table, "_snapshots_name_version_idx", ("name", "version") - ) - engine_adapter.insert_append( - new_snapshots_table, - exp.select("*").from_(snapshots_table), - target_columns_to_types=snapshots_columns_to_types, - ) + versions_columns_to_types = { + "schema_version": exp.DataType.build("int"), + "sqlglot_version": exp.DataType.build(index_type), + "sqlmesh_version": exp.DataType.build(index_type), + } - # Recreate the environments table and its indexes. - engine_adapter.create_table( - new_environments_table, environments_columns_to_types, primary_key=("name",) - ) - engine_adapter.insert_append( - new_environments_table, - exp.select("*").from_(environments_table), - target_columns_to_types=environments_columns_to_types, + # Create the versions table. + engine_adapter.create_state_table(versions_table, versions_columns_to_types) + + # Create the snapshots table and its indexes. + engine_adapter.create_state_table( + snapshots_table, snapshots_columns_to_types, primary_key=("name", "identifier") ) + engine_adapter.create_index(snapshots_table, "_snapshots_name_version_idx", ("name", "version")) - # Recreate the intervals table and its indexes. - engine_adapter.create_table( - new_intervals_table, intervals_columns_to_types, primary_key=("id",) + # Create the environments table and its indexes. + engine_adapter.create_state_table( + environments_table, environments_columns_to_types, primary_key=("name",) ) - engine_adapter.create_index( - new_intervals_table, "_intervals_name_identifier_idx", ("name", "identifier") + + # Create the intervals table and its indexes. + engine_adapter.create_state_table( + intervals_table, intervals_columns_to_types, primary_key=("id",) ) engine_adapter.create_index( - new_intervals_table, "_intervals_name_version_idx", ("name", "version") + intervals_table, "_intervals_name_identifier_idx", ("name", "identifier") ) - engine_adapter.insert_append( - new_intervals_table, - exp.select("*").from_(intervals_table), - target_columns_to_types=intervals_columns_to_types, - ) - - # Drop old tables. - for table in (snapshots_table, environments_table, intervals_table): - engine_adapter.drop_table(table) - - # Replace old tables with new ones. - engine_adapter.rename_table(new_snapshots_table, snapshots_table) - engine_adapter.rename_table(new_environments_table, environments_table) - engine_adapter.rename_table(new_intervals_table, intervals_table) + engine_adapter.create_index(intervals_table, "_intervals_name_version_idx", ("name", "version")) def migrate_rows(state_sync, **kwargs): # type: ignore diff --git a/sqlmesh/migrations/v0001_init.py b/sqlmesh/migrations/v0001_init.py deleted file mode 100644 index 42d623d1d0..0000000000 --- a/sqlmesh/migrations/v0001_init.py +++ /dev/null @@ -1,64 +0,0 @@ -"""All migrations should be named _XXXX.py, they will be executed sequentially. - -If a migration alters the payload of any pydantic models, you should not actually use them because -the running model may not be able to load them. Make sure that these migration files are standalone. -""" - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - environments_table = "_environments" - versions_table = "_versions" - - if schema: - engine_adapter.create_schema(schema) - snapshots_table = f"{schema}.{snapshots_table}" - environments_table = f"{schema}.{environments_table}" - versions_table = f"{schema}.{versions_table}" - - index_type = index_text_type(engine_adapter.dialect) - - engine_adapter.create_state_table( - snapshots_table, - { - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build("text"), - }, - primary_key=("name", "identifier"), - ) - - engine_adapter.create_index(snapshots_table, "name_version_idx", ("name", "version")) - - engine_adapter.create_state_table( - environments_table, - { - "name": exp.DataType.build(index_type), - "snapshots": exp.DataType.build("text"), - "start_at": exp.DataType.build("text"), - "end_at": exp.DataType.build("text"), - "plan_id": exp.DataType.build("text"), - "previous_plan_id": exp.DataType.build("text"), - "expiration_ts": exp.DataType.build("bigint"), - }, - primary_key=("name",), - ) - - engine_adapter.create_state_table( - versions_table, - { - "schema_version": exp.DataType.build("int"), - "sqlglot_version": exp.DataType.build("text"), - }, - ) - - -def migrate_rows(state_sync, **kwargs): # type: ignore - pass diff --git a/sqlmesh/migrations/v0002_remove_identify.py b/sqlmesh/migrations/v0002_remove_identify.py deleted file mode 100644 index d8f9a1c0cd..0000000000 --- a/sqlmesh/migrations/v0002_remove_identify.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Remove identify=True kwarg for rendering sql""" - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - pass diff --git a/sqlmesh/migrations/v0003_move_batch_size.py b/sqlmesh/migrations/v0003_move_batch_size.py deleted file mode 100644 index e8efff6162..0000000000 --- a/sqlmesh/migrations/v0003_move_batch_size.py +++ /dev/null @@ -1,38 +0,0 @@ -"""Move batch_size from the model and into the kind.""" - -import json - -from sqlglot import exp - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - snapshots_table = "_snapshots" - if state_sync.schema: - snapshots_table = f"{state_sync.schema}.{snapshots_table}" - - for row in state_sync.engine_adapter.fetchall( - exp.select("*").from_(snapshots_table), quote_identifiers=True - ): - name, identifier, _, snapshot = row - snapshot = json.loads(snapshot) - model = snapshot["model"] - if "batch_size" in model: - batch_size = model.pop("batch_size") - kind = model.get("kind") - - if kind: - if kind["name"] in ("INCREMENTAL_BY_TIME_RANGE", "INCREMENTAL_BY_UNIQUE_KEY"): - kind["batch_size"] = batch_size - - # this is not efficient, i'm doing this because i'm lazy and no one has snapshots at the time of writing this migration - # do not copy this code in future migrations - - state_sync.engine_adapter.update_table( - snapshots_table, - {"snapshot": json.dumps(snapshot)}, - where=f"name = '{name}' and identifier = '{identifier}'", - ) diff --git a/sqlmesh/migrations/v0004_environmnent_add_finalized_at.py b/sqlmesh/migrations/v0004_environmnent_add_finalized_at.py deleted file mode 100644 index bddbef5971..0000000000 --- a/sqlmesh/migrations/v0004_environmnent_add_finalized_at.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Add support for environment finalization.""" - -from sqlglot import exp - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - engine_adapter = state_sync.engine_adapter - environments_table = "_environments" - if state_sync.schema: - environments_table = f"{state_sync.schema}.{environments_table}" - - alter_table_exp = exp.Alter( - this=exp.to_table(environments_table), - kind="TABLE", - actions=[ - exp.ColumnDef( - this=exp.to_column("finalized_ts"), - kind=exp.DataType.build("bigint"), - ) - ], - ) - - engine_adapter.execute(alter_table_exp) - - -def migrate_rows(state_sync, **kwargs): # type: ignore - pass diff --git a/sqlmesh/migrations/v0005_create_seed_table.py b/sqlmesh/migrations/v0005_create_seed_table.py deleted file mode 100644 index 803a47f724..0000000000 --- a/sqlmesh/migrations/v0005_create_seed_table.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Create a dedicated table to store the content of seeds.""" - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - engine_adapter = state_sync.engine_adapter - seeds_table = "_seeds" - if state_sync.schema: - seeds_table = f"{state_sync.schema}.{seeds_table}" - - index_type = index_text_type(engine_adapter.dialect) - - engine_adapter.create_state_table( - seeds_table, - { - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "content": exp.DataType.build("text"), - }, - primary_key=("name", "identifier"), - ) - - -def migrate_rows(state_sync, **kwargs): # type: ignore - pass diff --git a/sqlmesh/migrations/v0006_change_seed_hash.py b/sqlmesh/migrations/v0006_change_seed_hash.py deleted file mode 100644 index c9f771a912..0000000000 --- a/sqlmesh/migrations/v0006_change_seed_hash.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Seed hashes moved from to_string to to_json for performance.""" - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - pass diff --git a/sqlmesh/migrations/v0007_env_table_info_to_kind.py b/sqlmesh/migrations/v0007_env_table_info_to_kind.py deleted file mode 100644 index 52d483b3cb..0000000000 --- a/sqlmesh/migrations/v0007_env_table_info_to_kind.py +++ /dev/null @@ -1,103 +0,0 @@ -"""Change environments because snapshot table info now stores model kind name.""" - -import json -import zlib - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type - - -def _hash(data): # type: ignore - return str(zlib.crc32(";".join("" if d is None else d for d in data).encode("utf-8"))) - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - environments_table = "_environments" - snapshots_table = "_snapshots" - if schema: - environments_table = f"{schema}.{environments_table}" - snapshots_table = f"{schema}.{snapshots_table}" - snapshots_to_kind = {} - - for name, identifier, snapshot in engine_adapter.fetchall( - exp.select("name", "identifier", "snapshot").from_(snapshots_table), - quote_identifiers=True, - ): - snapshot = json.loads(snapshot) - snapshots_to_kind[(name, identifier)] = snapshot["model"]["kind"]["name"] - - environments = engine_adapter.fetchall( - exp.select("*").from_(environments_table), quote_identifiers=True - ) - new_environments = [] - - for ( - name, - snapshots, - start_at, - end_at, - plan_id, - previous_plan_id, - expiration_ts, - finalized_ts, - ) in environments: - new_snapshots = [] - - for snapshot in json.loads(snapshots): - snapshot.pop("is_materialized", None) - snapshot.pop("is_embedded_kind", None) - - fingerprint = snapshot["fingerprint"] - identifier = _hash( - [ - fingerprint["data_hash"], - fingerprint["metadata_hash"], - fingerprint["parent_data_hash"], - fingerprint["parent_metadata_hash"], - ] - ) - - snapshot["kind_name"] = snapshots_to_kind.get((snapshot["name"], identifier), "VIEW") - new_snapshots.append(snapshot) - - new_environments.append( - { - "name": name, - "snapshots": json.dumps(new_snapshots), - "start_at": start_at, - "end_at": end_at, - "plan_id": plan_id, - "previous_plan_id": previous_plan_id, - "expiration_ts": expiration_ts, - "finalized_ts": finalized_ts, - } - ) - - if new_environments: - engine_adapter.delete_from(environments_table, "TRUE") - - index_type = index_text_type(engine_adapter.dialect) - - engine_adapter.insert_append( - environments_table, - pd.DataFrame(new_environments), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "snapshots": exp.DataType.build("text"), - "start_at": exp.DataType.build("text"), - "end_at": exp.DataType.build("text"), - "plan_id": exp.DataType.build("text"), - "previous_plan_id": exp.DataType.build("text"), - "expiration_ts": exp.DataType.build("bigint"), - "finalized_ts": exp.DataType.build("bigint"), - }, - ) diff --git a/sqlmesh/migrations/v0008_create_intervals_table.py b/sqlmesh/migrations/v0008_create_intervals_table.py deleted file mode 100644 index 7ba8888608..0000000000 --- a/sqlmesh/migrations/v0008_create_intervals_table.py +++ /dev/null @@ -1,42 +0,0 @@ -"""Create a dedicated table to store snapshot intervals.""" - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - engine_adapter = state_sync.engine_adapter - intervals_table = "_intervals" - if state_sync.schema: - intervals_table = f"{state_sync.schema}.{intervals_table}" - - index_type = index_text_type(engine_adapter.dialect) - - engine_adapter.create_state_table( - intervals_table, - { - "id": exp.DataType.build(index_type), - "created_ts": exp.DataType.build("bigint"), - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "start_ts": exp.DataType.build("bigint"), - "end_ts": exp.DataType.build("bigint"), - "is_dev": exp.DataType.build("boolean"), - "is_removed": exp.DataType.build("boolean"), - "is_compacted": exp.DataType.build("boolean"), - }, - primary_key=("id",), - ) - - engine_adapter.create_index( - intervals_table, "name_version_idx", ("name", "version", "created_ts") - ) - engine_adapter.create_index( - intervals_table, "name_identifier_idx", ("name", "identifier", "created_ts") - ) - - -def migrate_rows(state_sync, **kwargs): # type: ignore - pass diff --git a/sqlmesh/migrations/v0009_remove_pre_post_hooks.py b/sqlmesh/migrations/v0009_remove_pre_post_hooks.py deleted file mode 100644 index 534f366d69..0000000000 --- a/sqlmesh/migrations/v0009_remove_pre_post_hooks.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Remove pre- / post- hooks from existing snapshots.""" - -import json - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - new_snapshots = [] - - for name, identifier, version, snapshopt in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot").from_(snapshots_table), - quote_identifiers=True, - ): - snapshot = json.loads(snapshopt) - pre_hooks = snapshot["model"].pop("pre", []) - post_hooks = snapshot["model"].pop("post", []) - - expressions = snapshot["model"].pop("expressions", None) - if expressions and snapshot["model"]["source_type"] == "sql": - snapshot["model"]["pre_statements"] = expressions - - if pre_hooks or post_hooks: - print( - "WARNING: Hooks are no longer supported by SQLMesh, use pre and post SQL statements instead. " - f"Removing 'pre' and 'post' attributes from snapshot name='{name}', identifier='{identifier}'" - ) - - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": json.dumps(snapshot), - } - ) - - if new_snapshots: - engine_adapter.delete_from(snapshots_table, "TRUE") - - index_type = index_text_type(engine_adapter.dialect) - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build("text"), - }, - ) diff --git a/sqlmesh/migrations/v0010_seed_hash_batch_size.py b/sqlmesh/migrations/v0010_seed_hash_batch_size.py deleted file mode 100644 index 20186e0068..0000000000 --- a/sqlmesh/migrations/v0010_seed_hash_batch_size.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Seed metadata hashes now correctly include the batch_size.""" - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - pass diff --git a/sqlmesh/migrations/v0011_add_model_kind_name.py b/sqlmesh/migrations/v0011_add_model_kind_name.py deleted file mode 100644 index 3d76d61597..0000000000 --- a/sqlmesh/migrations/v0011_add_model_kind_name.py +++ /dev/null @@ -1,73 +0,0 @@ -"""Add the kind_name column to the snapshots table.""" - -import json - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - index_type = index_text_type(engine_adapter.dialect) - - alter_table_exp = exp.Alter( - this=exp.to_table(snapshots_table), - kind="TABLE", - actions=[ - exp.ColumnDef( - this=exp.to_column("kind_name"), - kind=exp.DataType.build(index_type), - ) - ], - ) - engine_adapter.execute(alter_table_exp) - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - index_type = index_text_type(engine_adapter.dialect) - - new_snapshots = [] - - for name, identifier, version, snapshot in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot").from_(snapshots_table), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": snapshot, - "kind_name": parsed_snapshot["model"]["kind"]["name"], - } - ) - - if new_snapshots: - engine_adapter.delete_from(snapshots_table, "TRUE") - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build("text"), - "kind_name": exp.DataType.build(index_type), - }, - ) diff --git a/sqlmesh/migrations/v0012_update_jinja_expressions.py b/sqlmesh/migrations/v0012_update_jinja_expressions.py deleted file mode 100644 index 99897fa59d..0000000000 --- a/sqlmesh/migrations/v0012_update_jinja_expressions.py +++ /dev/null @@ -1,90 +0,0 @@ -"""Fix expressions that contain jinja.""" - -import json -import typing as t - -from sqlglot import exp - -from sqlmesh.utils.jinja import has_jinja -from sqlmesh.utils.migration import index_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - new_snapshots = [] - - for name, identifier, version, snapshot, kind_name in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot", "kind_name").from_(snapshots_table), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - audits = parsed_snapshot.get("audits", []) - model = parsed_snapshot["model"] - - if "query" in model and has_jinja(model["query"]): - model["query"] = _wrap_query(model["query"]) - - _wrap_statements(model, "pre_statements") - _wrap_statements(model, "post_statements") - - for audit in audits: - if has_jinja(audit["query"]): - audit["query"] = _wrap_query(audit["query"]) - _wrap_statements(audit, "expressions") - - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": json.dumps(parsed_snapshot), - "kind_name": kind_name, - } - ) - - if new_snapshots: - engine_adapter.delete_from(snapshots_table, "TRUE") - - index_type = index_text_type(engine_adapter.dialect) - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build("text"), - "kind_name": exp.DataType.build(index_type), - }, - ) - - -def _wrap_statements(obj: t.Dict, key: str) -> None: - updated_statements = [] - for statement in obj.get(key, []): - if has_jinja(statement): - statement = _wrap_statement(statement) - updated_statements.append(statement) - - if updated_statements: - obj[key] = updated_statements - - -def _wrap_query(sql: str) -> str: - return f"JINJA_QUERY_BEGIN;\n{sql}\nJINJA_END;" - - -def _wrap_statement(sql: str) -> str: - return f"JINJA_STATEMENT_BEGIN;\n{sql}\nJINJA_END;" diff --git a/sqlmesh/migrations/v0013_serde_using_model_dialects.py b/sqlmesh/migrations/v0013_serde_using_model_dialects.py deleted file mode 100644 index 5d865930e7..0000000000 --- a/sqlmesh/migrations/v0013_serde_using_model_dialects.py +++ /dev/null @@ -1,91 +0,0 @@ -"""Serialize SQL using the dialect of each model.""" - -import json -import typing as t - -from sqlglot import exp, parse_one - -from sqlmesh.utils.jinja import has_jinja -from sqlmesh.utils.migration import index_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - new_snapshots = [] - - for name, identifier, version, snapshot, kind_name in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot", "kind_name").from_(snapshots_table), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - model = parsed_snapshot["model"] - dialect = model["dialect"] - - _update_expression(model, "query", dialect) - _update_expression_list(model, "pre_statements", dialect) - _update_expression_list(model, "post_statements", dialect) - - for audit in parsed_snapshot.get("audits", []): - dialect = audit["dialect"] - _update_expression(audit, "query", dialect) - _update_expression_list(audit, "expressions", dialect) - - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": json.dumps(parsed_snapshot), - "kind_name": kind_name, - } - ) - - if new_snapshots: - engine_adapter.delete_from(snapshots_table, "TRUE") - - index_type = index_text_type(engine_adapter.dialect) - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build("text"), - "kind_name": exp.DataType.build(index_type), - }, - ) - - -# Note: previously we used to do serde using the SQLGlot dialect, so we need to parse the -# stored queries using that dialect and then write them back using the correct dialect. - - -def _update_expression(obj: t.Dict, key: str, dialect: str) -> None: - if key in obj and not has_jinja(obj[key]): - obj[key] = parse_one(obj[key]).sql(dialect=dialect) - - -def _update_expression_list(obj: t.Dict, key: str, dialect: str) -> None: - if key in obj: - obj[key] = [ - ( - parse_one(expression).sql(dialect=dialect) - if not has_jinja(expression) - else expression - ) - for expression in obj[key] - if expression - ] diff --git a/sqlmesh/migrations/v0014_fix_dev_intervals.py b/sqlmesh/migrations/v0014_fix_dev_intervals.py deleted file mode 100644 index d5f4d86f9d..0000000000 --- a/sqlmesh/migrations/v0014_fix_dev_intervals.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Fix snapshot intervals that have been erroneously marked as dev.""" - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - schema = state_sync.schema - intervals_table = "_intervals" - if schema: - intervals_table = f"{schema}.{intervals_table}" - - state_sync.engine_adapter.update_table( - intervals_table, - {"is_dev": False}, - where="1=1", - ) diff --git a/sqlmesh/migrations/v0015_environment_add_promoted_snapshot_ids.py b/sqlmesh/migrations/v0015_environment_add_promoted_snapshot_ids.py deleted file mode 100644 index b1e42e1eb7..0000000000 --- a/sqlmesh/migrations/v0015_environment_add_promoted_snapshot_ids.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Include a set of snapshot IDs filtered for promotion.""" - -from sqlglot import exp -from sqlmesh.utils.migration import blob_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - engine_adapter = state_sync.engine_adapter - environments_table = "_environments" - if state_sync.schema: - environments_table = f"{state_sync.schema}.{environments_table}" - - blob_type = blob_text_type(engine_adapter.dialect) - - alter_table_exp = exp.Alter( - this=exp.to_table(environments_table), - kind="TABLE", - actions=[ - exp.ColumnDef( - this=exp.to_column("promoted_snapshot_ids"), - kind=exp.DataType.build(blob_type), - ) - ], - ) - - engine_adapter.execute(alter_table_exp) - - -def migrate_rows(state_sync, **kwargs): # type: ignore - pass diff --git a/sqlmesh/migrations/v0016_fix_windows_path.py b/sqlmesh/migrations/v0016_fix_windows_path.py deleted file mode 100644 index 3570cc368e..0000000000 --- a/sqlmesh/migrations/v0016_fix_windows_path.py +++ /dev/null @@ -1,63 +0,0 @@ -"""Fix paths that have a Windows forward slash in them.""" - -import json - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - new_snapshots = [] - - for name, identifier, version, snapshot, kind_name in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot", "kind_name").from_(snapshots_table), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - model = parsed_snapshot["model"] - python_env = model.get("python_env") - if python_env: - for py_definition in python_env.values(): - path = py_definition.get("path") - if path: - py_definition["path"] = path.replace("\\", "/") - - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": json.dumps(parsed_snapshot), - "kind_name": kind_name, - } - ) - - if new_snapshots: - engine_adapter.delete_from(snapshots_table, "TRUE") - - index_type = index_text_type(engine_adapter.dialect) - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build("text"), - "kind_name": exp.DataType.build(index_type), - }, - ) diff --git a/sqlmesh/migrations/v0017_fix_windows_seed_path.py b/sqlmesh/migrations/v0017_fix_windows_seed_path.py deleted file mode 100644 index 57bdd3609d..0000000000 --- a/sqlmesh/migrations/v0017_fix_windows_seed_path.py +++ /dev/null @@ -1,59 +0,0 @@ -"""Fix seed paths that have a Windows forward slash in them.""" - -import json - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - new_snapshots = [] - - for name, identifier, version, snapshot, kind_name in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot", "kind_name").from_(snapshots_table), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - model_kind = parsed_snapshot["model"]["kind"] - if "path" in model_kind: - model_kind["path"] = model_kind["path"].replace("\\", "/") - - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": json.dumps(parsed_snapshot), - "kind_name": kind_name, - } - ) - - if new_snapshots: - engine_adapter.delete_from(snapshots_table, "TRUE") - - index_type = index_text_type(engine_adapter.dialect) - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build("text"), - "kind_name": exp.DataType.build(index_type), - }, - ) diff --git a/sqlmesh/migrations/v0018_rename_snapshot_model_to_node.py b/sqlmesh/migrations/v0018_rename_snapshot_model_to_node.py deleted file mode 100644 index e17eeded61..0000000000 --- a/sqlmesh/migrations/v0018_rename_snapshot_model_to_node.py +++ /dev/null @@ -1,57 +0,0 @@ -"""Replace snapshot model field with node.""" - -import json - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - new_snapshots = [] - - for name, identifier, version, snapshot, kind_name in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot", "kind_name").from_(snapshots_table), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - parsed_snapshot["node"] = parsed_snapshot.pop("model") - - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": json.dumps(parsed_snapshot), - "kind_name": kind_name, - } - ) - - if new_snapshots: - engine_adapter.delete_from(snapshots_table, "TRUE") - - index_type = index_text_type(engine_adapter.dialect) - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build("text"), - "kind_name": exp.DataType.build(index_type), - }, - ) diff --git a/sqlmesh/migrations/v0019_add_env_suffix_target.py b/sqlmesh/migrations/v0019_add_env_suffix_target.py deleted file mode 100644 index 88227c8fdd..0000000000 --- a/sqlmesh/migrations/v0019_add_env_suffix_target.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Add support for environment suffix target.""" - -from sqlglot import exp - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - engine_adapter = state_sync.engine_adapter - environments_table = "_environments" - if state_sync.schema: - environments_table = f"{state_sync.schema}.{environments_table}" - - alter_table_exp = exp.Alter( - this=exp.to_table(environments_table), - kind="TABLE", - actions=[ - exp.ColumnDef( - this=exp.to_column("suffix_target"), - kind=exp.DataType.build("text"), - ) - ], - ) - engine_adapter.execute(alter_table_exp) - - -def migrate_rows(state_sync, **kwargs): # type: ignore - engine_adapter = state_sync.engine_adapter - environments_table = "_environments" - if state_sync.schema: - environments_table = f"{state_sync.schema}.{environments_table}" - - state_sync.engine_adapter.update_table( - environments_table, - {"suffix_target": "schema"}, - where="1=1", - ) diff --git a/sqlmesh/migrations/v0020_remove_redundant_attributes_from_dbt_models.py b/sqlmesh/migrations/v0020_remove_redundant_attributes_from_dbt_models.py deleted file mode 100644 index 788974ccee..0000000000 --- a/sqlmesh/migrations/v0020_remove_redundant_attributes_from_dbt_models.py +++ /dev/null @@ -1,84 +0,0 @@ -"""Remove redundant attributes from dbt models.""" - -import json - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - new_snapshots = [] - - for name, identifier, version, snapshot, kind_name in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot", "kind_name").from_(snapshots_table), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - jinja_macros_global_objs = parsed_snapshot["node"]["jinja_macros"]["global_objs"] - if "config" in jinja_macros_global_objs and isinstance( - jinja_macros_global_objs["config"], dict - ): - for key in CONFIG_ATTRIBUTE_KEYS_TO_REMOVE: - jinja_macros_global_objs["config"].pop(key, None) - - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": json.dumps(parsed_snapshot), - "kind_name": kind_name, - } - ) - - if new_snapshots: - engine_adapter.delete_from(snapshots_table, "TRUE") - - index_type = index_text_type(engine_adapter.dialect) - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build("text"), - "kind_name": exp.DataType.build(index_type), - }, - ) - - -CONFIG_ATTRIBUTE_KEYS_TO_REMOVE = [ - "config", - "config_call_dict", - "depends_on", - "dependencies", - "metrics", - "original_file_path", - "packages", - "patch_path", - "path", - "post-hook", - "pre-hook", - "raw_code", - "refs", - "resource_type", - "sources", - "sql", - "tests", - "unrendered_config", -] diff --git a/sqlmesh/migrations/v0021_fix_table_properties.py b/sqlmesh/migrations/v0021_fix_table_properties.py deleted file mode 100644 index c878cedb8b..0000000000 --- a/sqlmesh/migrations/v0021_fix_table_properties.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Fix table properties that have extra quoting due to a bug.""" - -import json - -from sqlglot import exp - -from sqlmesh.core import dialect as d -from sqlmesh.utils.migration import index_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - new_snapshots = [] - found_table_properties = False - for name, identifier, version, snapshot, kind_name in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot", "kind_name").from_(snapshots_table), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - table_properties = parsed_snapshot["node"].get("table_properties") - if table_properties: - found_table_properties = True - dialect = parsed_snapshot["node"].get("dialect") - parsed_snapshot["node"]["table_properties"] = exp.Tuple( - expressions=[ - exp.Literal.string(k).eq(d.parse_one(v)) for k, v in table_properties.items() - ] - ).sql(dialect=dialect) - - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": json.dumps(parsed_snapshot), - "kind_name": kind_name, - } - ) - - if found_table_properties: - engine_adapter.delete_from(snapshots_table, "TRUE") - - index_type = index_text_type(engine_adapter.dialect) - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build("text"), - "kind_name": exp.DataType.build(index_type), - }, - ) diff --git a/sqlmesh/migrations/v0022_move_project_to_model.py b/sqlmesh/migrations/v0022_move_project_to_model.py deleted file mode 100644 index 5a4eaa77f0..0000000000 --- a/sqlmesh/migrations/v0022_move_project_to_model.py +++ /dev/null @@ -1,58 +0,0 @@ -"""Move project attr from snapshot to model.""" - -import json - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - new_snapshots = [] - - for name, identifier, version, snapshot, kind_name in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot", "kind_name").from_(snapshots_table), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - - parsed_snapshot["node"]["project"] = parsed_snapshot.pop("project", "") - - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": json.dumps(parsed_snapshot), - "kind_name": kind_name, - } - ) - - engine_adapter.delete_from(snapshots_table, "TRUE") - - index_type = index_text_type(engine_adapter.dialect) - - if new_snapshots: - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build("text"), - "kind_name": exp.DataType.build(index_type), - }, - ) diff --git a/sqlmesh/migrations/v0023_fix_added_models_with_forward_only_parents.py b/sqlmesh/migrations/v0023_fix_added_models_with_forward_only_parents.py deleted file mode 100644 index 2fa490b0ce..0000000000 --- a/sqlmesh/migrations/v0023_fix_added_models_with_forward_only_parents.py +++ /dev/null @@ -1,69 +0,0 @@ -"""Fix snapshots of added models with forward only parents.""" - -import json -import typing as t - -from sqlglot import exp - -from sqlmesh.utils.dag import DAG - - -def migrate_schemas(state_sync: t.Any, **kwargs) -> None: # type: ignore - pass - - -def migrate_rows(state_sync: t.Any, **kwargs) -> None: # type: ignore - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - environments_table = "_environments" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - environments_table = f"{schema}.{environments_table}" - - dag: DAG[t.Tuple[str, str]] = DAG() - snapshot_mapping: t.Dict[t.Tuple[str, str], t.Dict[str, t.Any]] = {} - - for identifier, snapshot in engine_adapter.fetchall( - exp.select("identifier", "snapshot").from_(snapshots_table), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - - snapshot_id = (parsed_snapshot["name"], identifier) - snapshot_mapping[snapshot_id] = parsed_snapshot - - parent_ids = [ - (parent["name"], parent["identifier"]) for parent in parsed_snapshot["parents"] - ] - dag.add(snapshot_id, parent_ids) - - snapshots_to_delete = set() - - for snapshot_id in dag: - if snapshot_id not in snapshot_mapping: - continue - parsed_snapshot = snapshot_mapping[snapshot_id] - is_breaking = parsed_snapshot.get("change_category") == 1 - has_previous_versions = bool(parsed_snapshot.get("previous_versions", [])) - - has_paused_forward_only_parent = False - if is_breaking and not has_previous_versions: - for upstream_id in dag.upstream(snapshot_id): - if upstream_id not in snapshot_mapping: - continue - upstream_snapshot = snapshot_mapping[upstream_id] - upstream_change_category = upstream_snapshot.get("change_category") - is_forward_only_upstream = upstream_change_category == 3 - if is_forward_only_upstream and not upstream_snapshot.get("unpaused_ts"): - has_paused_forward_only_parent = True - break - - if has_paused_forward_only_parent: - snapshots_to_delete.add(snapshot_id) - - if snapshots_to_delete: - where = t.cast(exp.Tuple, exp.convert((exp.column("name"), exp.column("identifier")))).isin( - *snapshots_to_delete - ) - engine_adapter.delete_from(snapshots_table, where) diff --git a/sqlmesh/migrations/v0024_replace_model_kind_name_enum_with_value.py b/sqlmesh/migrations/v0024_replace_model_kind_name_enum_with_value.py deleted file mode 100644 index 81a9f79dde..0000000000 --- a/sqlmesh/migrations/v0024_replace_model_kind_name_enum_with_value.py +++ /dev/null @@ -1,59 +0,0 @@ -"""Replace snapshot model_kind_name enum with value.""" - -import json - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - new_snapshots = [] - - for name, identifier, version, snapshot, kind_name in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot", "kind_name").from_(snapshots_table), - quote_identifiers=True, - ): - corrected_kind_name = None - parsed_snapshot = json.loads(snapshot) - if "kind" in parsed_snapshot["node"]: - corrected_kind_name = parsed_snapshot["node"]["kind"].get("name") - - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": snapshot, - "kind_name": corrected_kind_name, - } - ) - - if new_snapshots: - engine_adapter.delete_from(snapshots_table, "TRUE") - - index_type = index_text_type(engine_adapter.dialect) - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build("text"), - "kind_name": exp.DataType.build(index_type), - }, - ) diff --git a/sqlmesh/migrations/v0025_fix_intervals_and_missing_change_category.py b/sqlmesh/migrations/v0025_fix_intervals_and_missing_change_category.py deleted file mode 100644 index 08c03c6a87..0000000000 --- a/sqlmesh/migrations/v0025_fix_intervals_and_missing_change_category.py +++ /dev/null @@ -1,121 +0,0 @@ -"""Normalize intervals and fix missing change category.""" - -import json -import zlib - -from sqlglot import exp - -from sqlmesh.utils import random_id -from sqlmesh.utils.date import now_timestamp -from sqlmesh.utils.migration import index_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - intervals_table = "_intervals" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - intervals_table = f"{schema}.{intervals_table}" - - migration_required = False - new_snapshots = [] - new_intervals = [] - - for name, identifier, version, snapshot, kind_name in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot", "kind_name").from_(snapshots_table), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - - if not parsed_snapshot.get("change_category"): - fingerprint = parsed_snapshot.get("fingerprint") - version = _hash( - [ - fingerprint["data_hash"], - fingerprint["parent_data_hash"], - ] - ) - parsed_snapshot["change_category"] = ( - 4 if version == parsed_snapshot.get("version") else 5 - ) - migration_required = True - - def _add_interval(start_ts: int, end_ts: int, is_dev: bool) -> None: - new_intervals.append( - { - "id": random_id(), - "created_ts": now_timestamp(), - "name": name, - "identifier": identifier, - "version": version, - "start_ts": start_ts, - "end_ts": end_ts, - "is_dev": is_dev, - "is_removed": False, - "is_compacted": True, - } - ) - - for interval in parsed_snapshot.pop("intervals", []): - _add_interval(interval[0], interval[1], False) - migration_required = True - - for interval in parsed_snapshot.pop("dev_intervals", []): - _add_interval(interval[0], interval[1], True) - migration_required = True - - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": json.dumps(parsed_snapshot), - "kind_name": kind_name, - } - ) - - if migration_required: - index_type = index_text_type(engine_adapter.dialect) - - engine_adapter.delete_from(snapshots_table, "TRUE") - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build("text"), - "kind_name": exp.DataType.build(index_type), - }, - ) - - if new_intervals: - engine_adapter.insert_append( - intervals_table, - pd.DataFrame(new_intervals), - target_columns_to_types={ - "id": exp.DataType.build(index_type), - "created_ts": exp.DataType.build("bigint"), - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "start_ts": exp.DataType.build("bigint"), - "end_ts": exp.DataType.build("bigint"), - "is_dev": exp.DataType.build("boolean"), - "is_removed": exp.DataType.build("boolean"), - "is_compacted": exp.DataType.build("boolean"), - }, - ) - - -def _hash(data): # type: ignore - return str(zlib.crc32(";".join("" if d is None else d for d in data).encode("utf-8"))) diff --git a/sqlmesh/migrations/v0026_remove_dialect_from_seed.py b/sqlmesh/migrations/v0026_remove_dialect_from_seed.py deleted file mode 100644 index 10d77b430b..0000000000 --- a/sqlmesh/migrations/v0026_remove_dialect_from_seed.py +++ /dev/null @@ -1,59 +0,0 @@ -"""Remove dialect from seeds.""" - -import json - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - new_snapshots = [] - - for name, identifier, version, snapshot, kind_name in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot", "kind_name").from_(snapshots_table), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - node = parsed_snapshot["node"] - if "seed" in node: - node["seed"].pop("dialect", None) - - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": json.dumps(parsed_snapshot), - "kind_name": kind_name, - } - ) - - if new_snapshots: - engine_adapter.delete_from(snapshots_table, "TRUE") - - index_type = index_text_type(engine_adapter.dialect) - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build("text"), - "kind_name": exp.DataType.build(index_type), - }, - ) diff --git a/sqlmesh/migrations/v0027_minute_interval_to_five.py b/sqlmesh/migrations/v0027_minute_interval_to_five.py deleted file mode 100644 index 8878536b6f..0000000000 --- a/sqlmesh/migrations/v0027_minute_interval_to_five.py +++ /dev/null @@ -1,61 +0,0 @@ -"""Change any interval unit of minute to five_minute.""" - -import json - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - new_snapshots = [] - - for name, identifier, version, snapshot, kind_name in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot", "kind_name").from_(snapshots_table), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - - node = parsed_snapshot["node"] - - if node.get("interval_unit") == "minute": - node["interval_unit"] = "five_minute" - - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": json.dumps(parsed_snapshot), - "kind_name": kind_name, - } - ) - - if new_snapshots: - engine_adapter.delete_from(snapshots_table, "TRUE") - - index_type = index_text_type(engine_adapter.dialect) - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build("text"), - "kind_name": exp.DataType.build(index_type), - }, - ) diff --git a/sqlmesh/migrations/v0028_add_plan_dags_table.py b/sqlmesh/migrations/v0028_add_plan_dags_table.py deleted file mode 100644 index b03fa45bba..0000000000 --- a/sqlmesh/migrations/v0028_add_plan_dags_table.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Creates the '_plan_dags' table if Airflow is used.""" - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - plan_dags_table = "_plan_dags" - - if schema: - engine_adapter.create_schema(schema) - plan_dags_table = f"{schema}.{plan_dags_table}" - - index_type = index_text_type(engine_adapter.dialect) - - engine_adapter.create_state_table( - plan_dags_table, - { - "request_id": exp.DataType.build(index_type), - "dag_id": exp.DataType.build(index_type), - "dag_spec": exp.DataType.build("text"), - }, - primary_key=("request_id",), - ) - - engine_adapter.create_index(plan_dags_table, "dag_id_idx", ("dag_id",)) - - -def migrate_rows(state_sync, **kwargs): # type: ignore - pass diff --git a/sqlmesh/migrations/v0029_generate_schema_types_using_dialect.py b/sqlmesh/migrations/v0029_generate_schema_types_using_dialect.py deleted file mode 100644 index a8b2800fe0..0000000000 --- a/sqlmesh/migrations/v0029_generate_schema_types_using_dialect.py +++ /dev/null @@ -1,73 +0,0 @@ -"""Generate mapping schema data types using the corresponding model's dialect.""" - -import json - -from sqlglot import exp, parse_one - -from sqlmesh.utils.migration import index_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - new_snapshots = [] - for name, identifier, version, snapshot, kind_name in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot", "kind_name").from_(snapshots_table), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - node = parsed_snapshot["node"] - - mapping_schema = node.get("mapping_schema") - if mapping_schema: - node["mapping_schema"] = _convert_schema_types(mapping_schema, node["dialect"]) - - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": json.dumps(parsed_snapshot), - "kind_name": kind_name, - } - ) - - if new_snapshots: - engine_adapter.delete_from(snapshots_table, "TRUE") - - index_type = index_text_type(engine_adapter.dialect) - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build("text"), - "kind_name": exp.DataType.build(index_type), - }, - ) - - -def _convert_schema_types(schema, dialect): # type: ignore - if not schema: - return schema - - for k, v in schema.items(): - if isinstance(v, dict): - _convert_schema_types(v, dialect) - else: - schema[k] = parse_one(v).sql(dialect=dialect) - - return schema diff --git a/sqlmesh/migrations/v0030_update_unrestorable_snapshots.py b/sqlmesh/migrations/v0030_update_unrestorable_snapshots.py deleted file mode 100644 index 5f2d7f1dbf..0000000000 --- a/sqlmesh/migrations/v0030_update_unrestorable_snapshots.py +++ /dev/null @@ -1,69 +0,0 @@ -"""Update unrestorable snapshots.""" - -import json -import typing as t -from collections import defaultdict - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type - - -def migrate_schemas(state_sync: t.Any, **kwargs: t.Any) -> None: # type: ignore - pass - - -def migrate_rows(state_sync: t.Any, **kwargs: t.Any) -> None: # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - new_snapshots = [] - snapshots_by_version = defaultdict(list) - - for name, identifier, version, snapshot, kind_name in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot", "kind_name").from_(snapshots_table), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - snapshots_by_version[(name, version)].append((identifier, kind_name, parsed_snapshot)) - - for (name, version), snapshots in snapshots_by_version.items(): - has_forward_only = any(s["change_category"] == 3 for _, _, s in snapshots) - for identifier, kind_name, snapshot in snapshots: - if ( - has_forward_only - and snapshot["change_category"] != 3 - and not snapshot.get("unpaused_ts") - ): - snapshot["unrestorable"] = True - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": json.dumps(snapshot), - "kind_name": kind_name, - } - ) - - if new_snapshots: - engine_adapter.delete_from(snapshots_table, "TRUE") - - index_type = index_text_type(engine_adapter.dialect) - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build("text"), - "kind_name": exp.DataType.build(index_type), - }, - ) diff --git a/sqlmesh/migrations/v0031_remove_dbt_target_fields.py b/sqlmesh/migrations/v0031_remove_dbt_target_fields.py deleted file mode 100644 index e99aaa7fa4..0000000000 --- a/sqlmesh/migrations/v0031_remove_dbt_target_fields.py +++ /dev/null @@ -1,69 +0,0 @@ -"""Remove dbt target fields from snapshots outside of limited list of approved fields""" - -import json - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - new_snapshots = [] - found_dbt_target = False - for name, identifier, version, snapshot, kind_name in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot", "kind_name").from_(snapshots_table), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - node = parsed_snapshot["node"] - dbt_target = node.get("jinja_macros", {}).get("global_objs", {}).get("target", {}) - # Double check that `target_name` exists as a field since we know that all dbt targets have `target_name` - # We do this in case someone has a target macro defined that is not related to dbt - if dbt_target and dbt_target.get("target_name"): - found_dbt_target = True - node["jinja_macros"]["global_objs"]["target"] = { - "type": dbt_target.get("type", "None"), - "name": dbt_target.get("name", "None"), - "schema": dbt_target.get("schema", "None"), - "database": dbt_target.get("database", "None"), - "target_name": dbt_target["target_name"], - } - - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": json.dumps(parsed_snapshot), - "kind_name": kind_name, - } - ) - - if found_dbt_target: - engine_adapter.delete_from(snapshots_table, "TRUE") - - index_type = index_text_type(engine_adapter.dialect) - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build("text"), - "kind_name": exp.DataType.build(index_type), - }, - ) diff --git a/sqlmesh/migrations/v0032_add_sqlmesh_version.py b/sqlmesh/migrations/v0032_add_sqlmesh_version.py deleted file mode 100644 index 032709f889..0000000000 --- a/sqlmesh/migrations/v0032_add_sqlmesh_version.py +++ /dev/null @@ -1,29 +0,0 @@ -"""Add new 'sqlmesh_version' column to the version state table.""" - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - engine_adapter = state_sync.engine_adapter - versions_table = "_versions" - if state_sync.schema: - versions_table = f"{state_sync.schema}.{versions_table}" - index_type = index_text_type(engine_adapter.dialect) - alter_table_exp = exp.Alter( - this=exp.to_table(versions_table), - kind="TABLE", - actions=[ - exp.ColumnDef( - this=exp.to_column("sqlmesh_version"), - kind=exp.DataType.build(index_type), - ) - ], - ) - - engine_adapter.execute(alter_table_exp) - - -def migrate_rows(state_sync, **kwargs): # type: ignore - pass diff --git a/sqlmesh/migrations/v0033_mysql_fix_blob_text_type.py b/sqlmesh/migrations/v0033_mysql_fix_blob_text_type.py deleted file mode 100644 index 5b3d0f2347..0000000000 --- a/sqlmesh/migrations/v0033_mysql_fix_blob_text_type.py +++ /dev/null @@ -1,49 +0,0 @@ -"""Use LONGTEXT type for blob fields in MySQL.""" - -from sqlglot import exp - -from sqlmesh.utils.migration import blob_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - engine_adapter = state_sync.engine_adapter - if engine_adapter.dialect != "mysql": - return - - schema = state_sync.schema - environments_table = "_environments" - snapshots_table = "_snapshots" - seeds_table = "_seeds" - plan_dags_table = "_plan_dags" - - if schema: - environments_table = f"{schema}.{environments_table}" - snapshots_table = f"{schema}.{snapshots_table}" - seeds_table = f"{state_sync.schema}.{seeds_table}" - plan_dags_table = f"{schema}.{plan_dags_table}" - - targets = [ - (environments_table, "snapshots"), - (snapshots_table, "snapshot"), - (seeds_table, "content"), - (plan_dags_table, "dag_spec"), - ] - - for table_name, column_name in targets: - blob_type = blob_text_type(engine_adapter.dialect) - alter_table_exp = exp.Alter( - this=exp.to_table(table_name), - kind="TABLE", - actions=[ - exp.AlterColumn( - this=exp.to_column(column_name), - dtype=exp.DataType.build(blob_type), - ) - ], - ) - - engine_adapter.execute(alter_table_exp) - - -def migrate_rows(state_sync, **kwargs): # type: ignore - pass diff --git a/sqlmesh/migrations/v0034_add_default_catalog.py b/sqlmesh/migrations/v0034_add_default_catalog.py deleted file mode 100644 index 15a040364f..0000000000 --- a/sqlmesh/migrations/v0034_add_default_catalog.py +++ /dev/null @@ -1,371 +0,0 @@ -"""Add default catalog to snapshots and update names to match new normalization rules.""" - -from __future__ import annotations - -import json -import typing as t - -from sqlglot import exp -from sqlglot.dialects.dialect import DialectType -from sqlglot.helper import dict_depth, seq_get -from sqlglot.optimizer.normalize_identifiers import normalize_identifiers - -from sqlmesh.utils.migration import index_text_type -from sqlmesh.utils.migration import blob_text_type - - -def set_default_catalog( - table: exp.Table, - default_catalog: t.Optional[str], -) -> exp.Table: - if default_catalog and not table.catalog and table.db: - table.set("catalog", exp.parse_identifier(default_catalog)) - - return table - - -def normalize_model_name( - table: str | exp.Table, - default_catalog: t.Optional[str], - dialect: DialectType = None, -) -> str: - table = exp.to_table(table, dialect=dialect) - - table = set_default_catalog(table, default_catalog) - return exp.table_name(normalize_identifiers(table, dialect=dialect), identify=True) - - -def normalize_mapping_schema(mapping_schema: t.Dict, dialect: str) -> t.Dict: - # Example input: {'"catalog"': {'schema': {'table': {'column': 'INT'}}}} - # Example output: {'"catalog"': {'"schema"': {'"table"': {'column': 'INT'}}}} - normalized_mapping_schema = {} - for key, value in mapping_schema.items(): - if isinstance(value, dict): - normalized_mapping_schema[normalize_model_name(key, None, dialect)] = ( - normalize_mapping_schema(value, dialect) - ) - else: - normalized_mapping_schema[key] = value - return normalized_mapping_schema - - -def update_dbt_relations( - source: t.Optional[t.Dict], keys: t.List[str], default_catalog: t.Optional[str] -) -> None: - if not default_catalog or not source: - return - for key in keys: - relations = source.get(key) - if relations: - relations = [relations] if "database" in relations else relations.values() - for relation in relations: - if not relation["database"]: - relation["database"] = default_catalog - - -def migrate_schemas(state_sync, default_catalog: t.Optional[str], **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, default_catalog: t.Optional[str], **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - environments_table = "_environments" - intervals_table = "_intervals" - seeds_table = "_seeds" - - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - environments_table = f"{schema}.{environments_table}" - intervals_table = f"{schema}.{intervals_table}" - seeds_table = f"{schema}.{seeds_table}" - - new_snapshots = [] - snapshot_to_dialect = {} - index_type = index_text_type(engine_adapter.dialect) - blob_type = blob_text_type(engine_adapter.dialect) - - for name, identifier, version, snapshot, kind_name in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot", "kind_name").from_(snapshots_table), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - # This is here in the case where the user originally had catalog in this model name, and therefore - # we would have before created the table with the catalog in the name. New logic removes the catalog, - # and therefore we need to make sure the table name is the same as the original table name, so we include - # this override - parsed_snapshot["base_table_name_override"] = parsed_snapshot["name"] - node = parsed_snapshot["node"] - dialect = node.get("dialect") - normalized_name = ( - normalize_model_name(name, default_catalog=default_catalog, dialect=dialect) - if node["source_type"] != "audit" - else name - ) - parsed_snapshot["name"] = normalized_name - # At the time of migration all nodes had default catalog, so we don't have to check type - node["default_catalog"] = default_catalog - snapshot_to_dialect[name] = dialect - mapping_schema = node.get("mapping_schema", {}) - if mapping_schema: - normalized_default_catalog = ( - normalize_model_name(default_catalog, default_catalog=None, dialect=dialect) - if default_catalog - else None - ) - mapping_schema_depth = dict_depth(mapping_schema) - if mapping_schema_depth == 3 and normalized_default_catalog: - mapping_schema = {normalized_default_catalog: mapping_schema} - node["mapping_schema"] = normalize_mapping_schema(mapping_schema, dialect) - depends_on = node.get("depends_on", []) - if depends_on: - node["depends_on"] = [ - normalize_model_name(dep, default_catalog, dialect) for dep in depends_on - ] - if parsed_snapshot["parents"]: - parsed_snapshot["parents"] = [ - { - "name": normalize_model_name(parent["name"], default_catalog, dialect), - "identifier": parent["identifier"], - } - for parent in parsed_snapshot["parents"] - ] - if parsed_snapshot["indirect_versions"]: - parsed_snapshot["indirect_versions"] = { - normalize_model_name(name, default_catalog, dialect): snapshot_data_versions - for name, snapshot_data_versions in parsed_snapshot["indirect_versions"].items() - } - # dbt specific migration - jinja_macros = node.get("jinja_macros") - if ( - default_catalog - and jinja_macros - and jinja_macros.get("create_builtins_module") == "sqlmesh.dbt" - ): - update_dbt_relations( - jinja_macros.get("global_objs"), ["refs", "sources", "this"], default_catalog - ) - - new_snapshots.append( - { - "name": normalized_name, - "identifier": identifier, - "version": version, - "snapshot": json.dumps(parsed_snapshot), - "kind_name": kind_name, - } - ) - - if new_snapshots: - engine_adapter.delete_from(snapshots_table, "TRUE") - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build(blob_type), - "kind_name": exp.DataType.build(index_type), - }, - ) - - new_environments = [] - default_dialect = seq_get(list(snapshot_to_dialect.values()), 0) - for ( - name, - snapshots, - start_at, - end_at, - plan_id, - previous_plan_id, - expiration_ts, - finalized_ts, - promoted_snapshot_ids, - suffix_target, - ) in engine_adapter.fetchall( - exp.select( - "name", - "snapshots", - "start_at", - "end_at", - "plan_id", - "previous_plan_id", - "expiration_ts", - "finalized_ts", - "promoted_snapshot_ids", - "suffix_target", - ).from_(environments_table), - quote_identifiers=True, - ): - new_snapshots = [] - for snapshot in json.loads(snapshots): - snapshot_name = snapshot["name"] - snapshot["base_table_name_override"] = snapshot_name - dialect = snapshot_to_dialect.get(snapshot_name, default_dialect) - node_type = snapshot.get("node_type") - normalized_name = ( - normalize_model_name(snapshot_name, default_catalog, dialect) - if node_type is None or node_type == "model" - else snapshot_name - ) - snapshot["name"] = normalized_name - if snapshot["parents"]: - snapshot["parents"] = [ - { - "name": normalize_model_name(parent["name"], default_catalog, dialect), - "identifier": parent["identifier"], - } - for parent in snapshot["parents"] - ] - new_snapshots.append(snapshot) - - new_environments.append( - { - "name": name, - "snapshots": json.dumps(new_snapshots), - "start_at": start_at, - "end_at": end_at, - "plan_id": plan_id, - "previous_plan_id": previous_plan_id, - "expiration_ts": expiration_ts, - "finalized_ts": finalized_ts, - "promoted_snapshot_ids": promoted_snapshot_ids, - "suffix_target": suffix_target, - } - ) - - if new_environments: - engine_adapter.delete_from(environments_table, "TRUE") - - engine_adapter.insert_append( - environments_table, - pd.DataFrame(new_environments), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "snapshots": exp.DataType.build(blob_type), - "start_at": exp.DataType.build("text"), - "end_at": exp.DataType.build("text"), - "plan_id": exp.DataType.build("text"), - "previous_plan_id": exp.DataType.build("text"), - "expiration_ts": exp.DataType.build("bigint"), - "finalized_ts": exp.DataType.build("bigint"), - "promoted_snapshot_ids": exp.DataType.build(blob_type), - "suffix_target": exp.DataType.build("text"), - }, - ) - - # We update environment to not be finalized in order to force them to update their views - # in order to make sure the views now have the fully qualified names - # We only do this if a default catalog was applied otherwise the current views are fine - # We do this post creating the new environments in order to avoid having to find a way to - # expression a null timestamp value in pandas that works across all engines - if default_catalog: - engine_adapter.execute( - exp.update(environments_table, {"finalized_ts": None}, where="1=1"), - quote_identifiers=True, - ) - - new_intervals = [] - for ( - id, - created_ts, - name, - identifier, - version, - start_ts, - end_ts, - is_dev, - is_removed, - is_compacted, - ) in engine_adapter.fetchall( - exp.select( - "id", - "created_ts", - "name", - "identifier", - "version", - "start_ts", - "end_ts", - "is_dev", - "is_removed", - "is_compacted", - ).from_(intervals_table), - quote_identifiers=True, - ): - dialect = snapshot_to_dialect.get(name, default_dialect) - normalized_name = normalize_model_name(name, default_catalog, dialect) - new_intervals.append( - { - "id": id, - "created_ts": created_ts, - "name": normalized_name, - "identifier": identifier, - "version": version, - "start_ts": start_ts, - "end_ts": end_ts, - "is_dev": is_dev, - "is_removed": is_removed, - "is_compacted": is_compacted, - } - ) - - if new_intervals: - engine_adapter.delete_from(intervals_table, "TRUE") - - engine_adapter.insert_append( - intervals_table, - pd.DataFrame(new_intervals), - target_columns_to_types={ - "id": exp.DataType.build(index_type), - "created_ts": exp.DataType.build("bigint"), - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "start_ts": exp.DataType.build("bigint"), - "end_ts": exp.DataType.build("bigint"), - "is_dev": exp.DataType.build("boolean"), - "is_removed": exp.DataType.build("boolean"), - "is_compacted": exp.DataType.build("boolean"), - }, - ) - - new_seeds = [] - for ( - name, - identifier, - content, - ) in engine_adapter.fetchall( - exp.select( - "name", - "identifier", - "content", - ).from_(seeds_table), - quote_identifiers=True, - ): - dialect = snapshot_to_dialect.get(name, default_dialect) - normalized_name = normalize_model_name(name, default_catalog, dialect) - new_seeds.append( - { - "name": normalized_name, - "identifier": identifier, - "content": content, - } - ) - - if new_seeds: - engine_adapter.delete_from(seeds_table, "TRUE") - - engine_adapter.insert_append( - seeds_table, - pd.DataFrame(new_seeds), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "content": exp.DataType.build("text"), - }, - ) diff --git a/sqlmesh/migrations/v0035_add_catalog_name_override.py b/sqlmesh/migrations/v0035_add_catalog_name_override.py deleted file mode 100644 index 3e2a42bd60..0000000000 --- a/sqlmesh/migrations/v0035_add_catalog_name_override.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Add support for environment catalog name override.""" - -from sqlglot import exp - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - engine_adapter = state_sync.engine_adapter - environments_table = "_environments" - if state_sync.schema: - environments_table = f"{state_sync.schema}.{environments_table}" - - alter_table_exp = exp.Alter( - this=exp.to_table(environments_table), - kind="TABLE", - actions=[ - exp.ColumnDef( - this=exp.to_column("catalog_name_override"), - kind=exp.DataType.build("text"), - ) - ], - ) - engine_adapter.execute(alter_table_exp) - - -def migrate_rows(state_sync, **kwargs): # type: ignore - pass diff --git a/sqlmesh/migrations/v0036_delete_plan_dags_bug_fix.py b/sqlmesh/migrations/v0036_delete_plan_dags_bug_fix.py deleted file mode 100644 index 9cd10ccbe0..0000000000 --- a/sqlmesh/migrations/v0036_delete_plan_dags_bug_fix.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Add missing delete from migration #34.""" - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - plan_dags_table = "_plan_dags" - if state_sync.schema: - plan_dags_table = f"{schema}.{plan_dags_table}" - - # At the time of migration plan_dags table is only needed for in-flight DAGs and therefore we can safely - # just delete it instead of migrating it - # If reusing this code verify that this is still the case - engine_adapter.delete_from(plan_dags_table, "TRUE") diff --git a/sqlmesh/migrations/v0037_remove_dbt_is_incremental_macro.py b/sqlmesh/migrations/v0037_remove_dbt_is_incremental_macro.py deleted file mode 100644 index 083f8301b4..0000000000 --- a/sqlmesh/migrations/v0037_remove_dbt_is_incremental_macro.py +++ /dev/null @@ -1,65 +0,0 @@ -"""Remove dbt is_incremental macro""" - -import json - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type -from sqlmesh.utils.migration import blob_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - blob_type = blob_text_type(engine_adapter.dialect) - new_snapshots = [] - found_dbt_package = False - for name, identifier, version, snapshot, kind_name in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot", "kind_name").from_(snapshots_table), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - node = parsed_snapshot["node"] - dbt_package = node.get("jinja_macros", {}).get("packages", {}).get("dbt", {}) - - if dbt_package: - found_dbt_package = True - dbt_package.pop("is_incremental", None) - dbt_package.pop("should_full_refresh", None) - - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": json.dumps(parsed_snapshot), - "kind_name": kind_name, - } - ) - - if found_dbt_package: - engine_adapter.delete_from(snapshots_table, "TRUE") - - index_type = index_text_type(engine_adapter.dialect) - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build(blob_type), - "kind_name": exp.DataType.build(index_type), - }, - ) diff --git a/sqlmesh/migrations/v0038_add_expiration_ts_to_snapshot.py b/sqlmesh/migrations/v0038_add_expiration_ts_to_snapshot.py deleted file mode 100644 index 5ddb3a4ee7..0000000000 --- a/sqlmesh/migrations/v0038_add_expiration_ts_to_snapshot.py +++ /dev/null @@ -1,80 +0,0 @@ -"""Add the expiration_ts column to the snapshots table.""" - -import json - -from sqlglot import exp - -from sqlmesh.utils.date import to_datetime, to_timestamp -from sqlmesh.utils.migration import index_text_type -from sqlmesh.utils.migration import blob_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - alter_table_exp = exp.Alter( - this=exp.to_table(snapshots_table), - kind="TABLE", - actions=[ - exp.ColumnDef( - this=exp.to_column("expiration_ts"), - kind=exp.DataType.build("bigint"), - ) - ], - ) - engine_adapter.execute(alter_table_exp) - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - index_type = index_text_type(engine_adapter.dialect) - blob_type = blob_text_type(engine_adapter.dialect) - - new_snapshots = [] - for name, identifier, version, snapshot, kind_name in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot", "kind_name").from_(snapshots_table), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - - updated_ts = parsed_snapshot["updated_ts"] - ttl = parsed_snapshot["ttl"] - expiration_ts = to_timestamp(ttl, relative_base=to_datetime(updated_ts)) - - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": snapshot, - "kind_name": kind_name, - "expiration_ts": expiration_ts, - } - ) - - if new_snapshots: - engine_adapter.delete_from(snapshots_table, "TRUE") - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build(blob_type), - "kind_name": exp.DataType.build(index_type), - "expiration_ts": exp.DataType.build("bigint"), - }, - ) diff --git a/sqlmesh/migrations/v0039_include_environment_in_plan_dag_spec.py b/sqlmesh/migrations/v0039_include_environment_in_plan_dag_spec.py deleted file mode 100644 index fb1c0b1ec7..0000000000 --- a/sqlmesh/migrations/v0039_include_environment_in_plan_dag_spec.py +++ /dev/null @@ -1,72 +0,0 @@ -"""Include environment in plan dag spec.""" - -import json - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type -from sqlmesh.utils.migration import blob_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - plan_dags_table = "_plan_dags" - if state_sync.schema: - plan_dags_table = f"{schema}.{plan_dags_table}" - - new_specs = [] - - for request_id, dag_id, dag_spec in engine_adapter.fetchall( - exp.select("request_id", "dag_id", "dag_spec").from_(plan_dags_table), - quote_identifiers=True, - ): - parsed_dag_spec = json.loads(dag_spec) - - environment_naming_info = parsed_dag_spec.pop("environment_naming_info") - promoted_snapshots = parsed_dag_spec.pop("promoted_snapshots", []) - start = parsed_dag_spec.pop("start") - parsed_dag_spec.pop("end", None) - plan_id = parsed_dag_spec.pop("plan_id") - previous_plan_id = parsed_dag_spec.pop("previous_plan_id", None) - expiration_ts = parsed_dag_spec.pop("environment_expiration_ts", None) - - parsed_dag_spec["environment"] = { - **environment_naming_info, - "snapshots": promoted_snapshots, - "start_at": start, - "end_at": start, - "plan_id": plan_id, - "previous_plan_id": previous_plan_id, - "expiration_ts": expiration_ts, - } - - new_specs.append( - { - "request_id": request_id, - "dag_id": dag_id, - "dag_spec": json.dumps(parsed_dag_spec), - } - ) - - if new_specs: - engine_adapter.delete_from(plan_dags_table, "TRUE") - - index_type = index_text_type(engine_adapter.dialect) - blob_type = blob_text_type(engine_adapter.dialect) - - engine_adapter.insert_append( - plan_dags_table, - pd.DataFrame(new_specs), - target_columns_to_types={ - "request_id": exp.DataType.build(index_type), - "dag_id": exp.DataType.build(index_type), - "dag_spec": exp.DataType.build(blob_type), - }, - ) diff --git a/sqlmesh/migrations/v0040_add_previous_finalized_snapshots.py b/sqlmesh/migrations/v0040_add_previous_finalized_snapshots.py deleted file mode 100644 index f15bd69eed..0000000000 --- a/sqlmesh/migrations/v0040_add_previous_finalized_snapshots.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Add support for environment previous finalized snapshots.""" - -from sqlglot import exp - -from sqlmesh.utils.migration import blob_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - engine_adapter = state_sync.engine_adapter - environments_table = "_environments" - if state_sync.schema: - environments_table = f"{state_sync.schema}.{environments_table}" - - blob_type = blob_text_type(engine_adapter.dialect) - - alter_table_exp = exp.Alter( - this=exp.to_table(environments_table), - kind="TABLE", - actions=[ - exp.ColumnDef( - this=exp.to_column("previous_finalized_snapshots"), - kind=exp.DataType.build(blob_type), - ) - ], - ) - engine_adapter.execute(alter_table_exp) - - -def migrate_rows(state_sync, **kwargs): # type: ignore - pass diff --git a/sqlmesh/migrations/v0041_remove_hash_raw_query_attribute.py b/sqlmesh/migrations/v0041_remove_hash_raw_query_attribute.py deleted file mode 100644 index a99e96b686..0000000000 --- a/sqlmesh/migrations/v0041_remove_hash_raw_query_attribute.py +++ /dev/null @@ -1,63 +0,0 @@ -"""Remove hash_raw_query from existing snapshots.""" - -import json - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type -from sqlmesh.utils.migration import blob_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - new_snapshots = [] - - for name, identifier, version, snapshot, kind_name, expiration_ts in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot", "kind_name", "expiration_ts").from_( - snapshots_table - ), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - parsed_snapshot["node"].pop("hash_raw_query", None) - - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": json.dumps(parsed_snapshot), - "kind_name": kind_name, - "expiration_ts": expiration_ts, - } - ) - - if new_snapshots: - engine_adapter.delete_from(snapshots_table, "TRUE") - - index_type = index_text_type(engine_adapter.dialect) - blob_type = blob_text_type(engine_adapter.dialect) - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build(blob_type), - "kind_name": exp.DataType.build(index_type), - "expiration_ts": exp.DataType.build("bigint"), - }, - ) diff --git a/sqlmesh/migrations/v0042_trim_indirect_versions.py b/sqlmesh/migrations/v0042_trim_indirect_versions.py deleted file mode 100644 index 5a8f6285b4..0000000000 --- a/sqlmesh/migrations/v0042_trim_indirect_versions.py +++ /dev/null @@ -1,70 +0,0 @@ -"""Trim irrelevant attributes from indirect versions.""" - -import json - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type -from sqlmesh.utils.migration import blob_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - new_snapshots = [] - - for name, identifier, version, snapshot, kind_name, expiration_ts in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot", "kind_name", "expiration_ts").from_( - snapshots_table - ), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - for indirect_versions in parsed_snapshot["indirect_versions"].values(): - for indirect_version in indirect_versions: - # Only keep version and change_category. - version = indirect_version.get("version") - change_category = indirect_version.get("change_category") - indirect_version.clear() - indirect_version["version"] = version - indirect_version["change_category"] = change_category - - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": json.dumps(parsed_snapshot), - "kind_name": kind_name, - "expiration_ts": expiration_ts, - } - ) - - if new_snapshots: - engine_adapter.delete_from(snapshots_table, "TRUE") - - index_type = index_text_type(engine_adapter.dialect) - blob_type = blob_text_type(engine_adapter.dialect) - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build(blob_type), - "kind_name": exp.DataType.build(index_type), - "expiration_ts": exp.DataType.build("bigint"), - }, - ) diff --git a/sqlmesh/migrations/v0043_fix_remove_obsolete_attributes_in_plan_dags.py b/sqlmesh/migrations/v0043_fix_remove_obsolete_attributes_in_plan_dags.py deleted file mode 100644 index 767f4b236b..0000000000 --- a/sqlmesh/migrations/v0043_fix_remove_obsolete_attributes_in_plan_dags.py +++ /dev/null @@ -1,65 +0,0 @@ -"""Trim irrelevant attributes from the plan DAGs state.""" - -import json - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type -from sqlmesh.utils.migration import blob_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - plan_dags_table = "_plan_dags" - if schema: - plan_dags_table = f"{schema}.{plan_dags_table}" - - new_dag_specs = [] - - for request_id, dag_id, dag_spec in engine_adapter.fetchall( - exp.select("request_id", "dag_id", "dag_spec").from_(plan_dags_table), - quote_identifiers=True, - ): - parsed_dag_spec = json.loads(dag_spec) - for snapshot in parsed_dag_spec.get("new_snapshots", []): - snapshot["node"].pop("hash_raw_query", None) - - for indirect_versions in snapshot.get("indirect_versions", {}).values(): - for indirect_version in indirect_versions: - # Only keep version and change_category. - version = indirect_version.get("version") - change_category = indirect_version.get("change_category") - indirect_version.clear() - indirect_version["version"] = version - indirect_version["change_category"] = change_category - - new_dag_specs.append( - { - "request_id": request_id, - "dag_id": dag_id, - "dag_spec": json.dumps(parsed_dag_spec), - } - ) - - if new_dag_specs: - engine_adapter.delete_from(plan_dags_table, "TRUE") - - index_type = index_text_type(engine_adapter.dialect) - blob_type = blob_text_type(engine_adapter.dialect) - - engine_adapter.insert_append( - plan_dags_table, - pd.DataFrame(new_dag_specs), - target_columns_to_types={ - "request_id": exp.DataType.build(index_type), - "dag_id": exp.DataType.build(index_type), - "dag_spec": exp.DataType.build(blob_type), - }, - ) diff --git a/sqlmesh/migrations/v0044_quote_identifiers_in_model_attributes.py b/sqlmesh/migrations/v0044_quote_identifiers_in_model_attributes.py deleted file mode 100644 index de5344d4ce..0000000000 --- a/sqlmesh/migrations/v0044_quote_identifiers_in_model_attributes.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Quoted identifiers in model SQL attributes.""" - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - pass diff --git a/sqlmesh/migrations/v0045_move_gateway_variable.py b/sqlmesh/migrations/v0045_move_gateway_variable.py deleted file mode 100644 index 754f958fac..0000000000 --- a/sqlmesh/migrations/v0045_move_gateway_variable.py +++ /dev/null @@ -1,74 +0,0 @@ -"""Move the gateway variable.""" - -import ast -import json - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type -from sqlmesh.utils.migration import blob_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - migration_needed = False - new_snapshots = [] - - for name, identifier, version, snapshot, kind_name, expiration_ts in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot", "kind_name", "expiration_ts").from_( - snapshots_table - ), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - python_env = parsed_snapshot["node"].get("python_env") - if python_env: - gateway = python_env.pop("gateway", None) - if gateway is not None: - migration_needed = True - sqlmesh_vars = {"gateway": ast.literal_eval(gateway["payload"])} - python_env["__sqlmesh__vars__"] = { - "payload": repr(sqlmesh_vars), - "kind": "value", - } - - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": json.dumps(parsed_snapshot), - "kind_name": kind_name, - "expiration_ts": expiration_ts, - } - ) - - if migration_needed and new_snapshots: - engine_adapter.delete_from(snapshots_table, "TRUE") - - index_type = index_text_type(engine_adapter.dialect) - blob_type = blob_text_type(engine_adapter.dialect) - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build(blob_type), - "kind_name": exp.DataType.build(index_type), - "expiration_ts": exp.DataType.build("bigint"), - }, - ) diff --git a/sqlmesh/migrations/v0046_add_batch_concurrency.py b/sqlmesh/migrations/v0046_add_batch_concurrency.py deleted file mode 100644 index f23d27e80a..0000000000 --- a/sqlmesh/migrations/v0046_add_batch_concurrency.py +++ /dev/null @@ -1,12 +0,0 @@ -"""Add the batch_concurrency attribute to the incremental model kinds. - -This results in a change to the metadata hash. -""" - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - pass diff --git a/sqlmesh/migrations/v0047_change_scd_string_to_column.py b/sqlmesh/migrations/v0047_change_scd_string_to_column.py deleted file mode 100644 index 9233a54ca9..0000000000 --- a/sqlmesh/migrations/v0047_change_scd_string_to_column.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Changes the SCD Type 2 columns from strings to columns.""" - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - pass diff --git a/sqlmesh/migrations/v0048_drop_indirect_versions.py b/sqlmesh/migrations/v0048_drop_indirect_versions.py deleted file mode 100644 index 31874268dd..0000000000 --- a/sqlmesh/migrations/v0048_drop_indirect_versions.py +++ /dev/null @@ -1,63 +0,0 @@ -"""Drop the indirect_versions attribute in snapshots.""" - -import json - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type -from sqlmesh.utils.migration import blob_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - new_snapshots = [] - - for name, identifier, version, snapshot, kind_name, expiration_ts in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot", "kind_name", "expiration_ts").from_( - snapshots_table - ), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - parsed_snapshot.pop("indirect_versions", None) - - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": json.dumps(parsed_snapshot), - "kind_name": kind_name, - "expiration_ts": expiration_ts, - } - ) - - if new_snapshots: - engine_adapter.delete_from(snapshots_table, "TRUE") - - index_type = index_text_type(engine_adapter.dialect) - blob_type = blob_text_type(engine_adapter.dialect) - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build(blob_type), - "kind_name": exp.DataType.build(index_type), - "expiration_ts": exp.DataType.build("bigint"), - }, - ) diff --git a/sqlmesh/migrations/v0049_replace_identifier_with_version_in_seeds_table.py b/sqlmesh/migrations/v0049_replace_identifier_with_version_in_seeds_table.py deleted file mode 100644 index b01bee41e1..0000000000 --- a/sqlmesh/migrations/v0049_replace_identifier_with_version_in_seeds_table.py +++ /dev/null @@ -1,61 +0,0 @@ -"""Use version instead of identifier in the seeds table.""" - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - engine_adapter = state_sync.engine_adapter - - snapshots_table = "_snapshots" - seeds_table = "_seeds" - new_seeds_table = f"{seeds_table}_v49" - - if state_sync.schema: - snapshots_table = f"{state_sync.schema}.{snapshots_table}" - seeds_table = f"{state_sync.schema}.{seeds_table}" - new_seeds_table = f"{state_sync.schema}.{new_seeds_table}" - - index_type = index_text_type(engine_adapter.dialect) - - engine_adapter.drop_table(new_seeds_table) - engine_adapter.create_state_table( - new_seeds_table, - { - "name": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "content": exp.DataType.build("text"), - }, - primary_key=("name", "version"), - ) - - name_col = exp.column("name", table="seeds") - version_col = exp.column("version", table="snapshots") - query = ( - exp.select( - name_col, - version_col, - exp.func("MAX", exp.column("content", table="seeds")).as_("content"), - ) - .from_(exp.to_table(seeds_table).as_("seeds")) - .join( - exp.to_table(snapshots_table).as_("snapshots"), - on=exp.and_( - exp.column("name", table="seeds").eq(exp.column("name", table="snapshots")), - exp.column("identifier", table="seeds").eq( - exp.column("identifier", table="snapshots") - ), - ), - ) - .where(exp.column("version", table="snapshots").is_(exp.null()).not_()) - .group_by(name_col, version_col) - ) - - engine_adapter.insert_append(new_seeds_table, query) - engine_adapter.drop_table(seeds_table) - engine_adapter.rename_table(new_seeds_table, seeds_table) diff --git a/sqlmesh/migrations/v0050_drop_seeds_table.py b/sqlmesh/migrations/v0050_drop_seeds_table.py deleted file mode 100644 index 0236284061..0000000000 --- a/sqlmesh/migrations/v0050_drop_seeds_table.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Drop the seeds table.""" - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - engine_adapter = state_sync.engine_adapter - - seeds_table = "_seeds" - if state_sync.schema: - seeds_table = f"{state_sync.schema}.{seeds_table}" - - engine_adapter.drop_table(seeds_table) - - -def migrate_rows(state_sync, **kwargs): # type: ignore - pass diff --git a/sqlmesh/migrations/v0051_rename_column_descriptions.py b/sqlmesh/migrations/v0051_rename_column_descriptions.py deleted file mode 100644 index f76a4a05a6..0000000000 --- a/sqlmesh/migrations/v0051_rename_column_descriptions.py +++ /dev/null @@ -1,69 +0,0 @@ -"""Rename the node attribute `column_descriptions_` to `column_descriptions` in snapshots.""" - -import json - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type -from sqlmesh.utils.migration import blob_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - new_snapshots = [] - found_col_descriptions = False - - for name, identifier, version, snapshot, kind_name, expiration_ts in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot", "kind_name", "expiration_ts").from_( - snapshots_table - ), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - - if "column_descriptions_" in parsed_snapshot["node"]: - found_col_descriptions = True - parsed_snapshot["node"]["column_descriptions"] = parsed_snapshot["node"].pop( - "column_descriptions_" - ) - - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": json.dumps(parsed_snapshot), - "kind_name": kind_name, - "expiration_ts": expiration_ts, - } - ) - - if found_col_descriptions: - engine_adapter.delete_from(snapshots_table, "TRUE") - - index_type = index_text_type(engine_adapter.dialect) - blob_type = blob_text_type(engine_adapter.dialect) - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build(blob_type), - "kind_name": exp.DataType.build(index_type), - "expiration_ts": exp.DataType.build("bigint"), - }, - ) diff --git a/sqlmesh/migrations/v0052_add_normalize_name_in_environment_naming_info.py b/sqlmesh/migrations/v0052_add_normalize_name_in_environment_naming_info.py deleted file mode 100644 index 27980033fa..0000000000 --- a/sqlmesh/migrations/v0052_add_normalize_name_in_environment_naming_info.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Add flag that controls whether environment names will be normalized.""" - -from sqlglot import exp - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - engine_adapter = state_sync.engine_adapter - environments_table = "_environments" - if state_sync.schema: - environments_table = f"{state_sync.schema}.{environments_table}" - - alter_table_exp = exp.Alter( - this=exp.to_table(environments_table), - kind="TABLE", - actions=[ - exp.ColumnDef( - this=exp.to_column("normalize_name"), - kind=exp.DataType.build("boolean"), - ) - ], - ) - engine_adapter.execute(alter_table_exp) - - -def migrate_rows(state_sync, **kwargs): # type: ignore - engine_adapter = state_sync.engine_adapter - environments_table = "_environments" - if state_sync.schema: - environments_table = f"{state_sync.schema}.{environments_table}" - - state_sync.engine_adapter.update_table( - environments_table, - {"normalize_name": False}, - where=exp.true(), - ) diff --git a/sqlmesh/migrations/v0053_custom_model_kind_extra_attributes.py b/sqlmesh/migrations/v0053_custom_model_kind_extra_attributes.py deleted file mode 100644 index d1c83658e8..0000000000 --- a/sqlmesh/migrations/v0053_custom_model_kind_extra_attributes.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Add batch_size, batch_concurrency, and batch_interval to the CUSTOM model kind.""" - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - pass diff --git a/sqlmesh/migrations/v0054_fix_trailing_comments.py b/sqlmesh/migrations/v0054_fix_trailing_comments.py deleted file mode 100644 index 8e7de52067..0000000000 --- a/sqlmesh/migrations/v0054_fix_trailing_comments.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Fix support for trailing comments in SQL model definitions.""" - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - pass diff --git a/sqlmesh/migrations/v0055_add_updated_ts_unpaused_ts_ttl_ms_unrestorable_to_snapshot.py b/sqlmesh/migrations/v0055_add_updated_ts_unpaused_ts_ttl_ms_unrestorable_to_snapshot.py deleted file mode 100644 index 96f39772cd..0000000000 --- a/sqlmesh/migrations/v0055_add_updated_ts_unpaused_ts_ttl_ms_unrestorable_to_snapshot.py +++ /dev/null @@ -1,140 +0,0 @@ -"""Add updated_ts, unpaused_ts, ttl_ms, and unrestorable columns to the snapshots table.""" - -import json - -from sqlglot import exp - -from sqlmesh.utils.date import to_datetime, to_timestamp -from sqlmesh.utils.migration import index_text_type -from sqlmesh.utils.migration import blob_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - add_column_exps = [ - exp.Alter( - this=exp.to_table(snapshots_table), - kind="TABLE", - actions=[ - exp.ColumnDef( - this=exp.to_column(column_name), - kind=exp.DataType.build("bigint"), - ) - ], - ) - for column_name in ["updated_ts", "unpaused_ts", "ttl_ms"] - ] + [ - exp.Alter( - this=exp.to_table(snapshots_table), - kind="TABLE", - actions=[ - exp.ColumnDef( - this=exp.to_column("unrestorable"), - kind=exp.DataType.build("boolean"), - ) - ], - ) - ] - engine_adapter.execute(add_column_exps) - - if engine_adapter.dialect == "databricks": - # Databricks will throw an error like: - # > databricks.sql.exc.ServerOperationError: [DELTA_UNSUPPORTED_DROP_COLUMN] DROP COLUMN is not supported for your Delta table. - # when we try to drop `expiration_ts` below unless we set delta.columnMapping.mode to 'name' - alter_table_exp = exp.Alter( - this=exp.to_table(snapshots_table), - kind="TABLE", - actions=[ - exp.AlterSet( - expressions=[ - exp.Properties( - expressions=[ - exp.Property( - this=exp.Literal.string("delta.columnMapping.mode"), - value=exp.Literal.string("name"), - ) - ] - ) - ] - ) - ], - ) - engine_adapter.execute(alter_table_exp) - - drop_column_exp = exp.Alter( - this=exp.to_table(snapshots_table), - kind="TABLE", - actions=[exp.Drop(this=exp.to_column("expiration_ts"), kind="COLUMN")], - ) - engine_adapter.execute(drop_column_exp) - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - index_type = index_text_type(engine_adapter.dialect) - blob_type = blob_text_type(engine_adapter.dialect) - - new_snapshots = [] - - for name, identifier, version, snapshot, kind_name in engine_adapter.fetchall( - exp.select("name", "identifier", "version", "snapshot", "kind_name").from_(snapshots_table), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - updated_ts = parsed_snapshot.pop("updated_ts") - unpaused_ts = parsed_snapshot.pop("unpaused_ts", None) - ttl_ms = max( - to_timestamp( - parsed_snapshot["ttl"], - relative_base=to_datetime(updated_ts), - check_categorical_relative_expression=False, - ) - - updated_ts, - 0, - ) - unrestorable = parsed_snapshot.pop("unrestorable", False) - - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": json.dumps(parsed_snapshot), - "kind_name": kind_name, - "updated_ts": updated_ts, - "unpaused_ts": unpaused_ts, - "ttl_ms": ttl_ms, - "unrestorable": unrestorable, - } - ) - - if new_snapshots: - engine_adapter.delete_from(snapshots_table, "TRUE") - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build(blob_type), - "kind_name": exp.DataType.build(index_type), - "updated_ts": exp.DataType.build("bigint"), - "unpaused_ts": exp.DataType.build("bigint"), - "ttl_ms": exp.DataType.build("bigint"), - "unrestorable": exp.DataType.build("boolean"), - }, - ) diff --git a/sqlmesh/migrations/v0057_add_table_format.py b/sqlmesh/migrations/v0057_add_table_format.py deleted file mode 100644 index b59911ef3a..0000000000 --- a/sqlmesh/migrations/v0057_add_table_format.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Add table_format to the model top-level properties""" - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - pass diff --git a/sqlmesh/migrations/v0058_add_requirements.py b/sqlmesh/migrations/v0058_add_requirements.py deleted file mode 100644 index 73de67d4e5..0000000000 --- a/sqlmesh/migrations/v0058_add_requirements.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Add requirements to environments table""" - -from sqlglot import exp - -from sqlmesh.utils.migration import blob_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - engine_adapter = state_sync.engine_adapter - environments_table = "_environments" - if state_sync.schema: - environments_table = f"{state_sync.schema}.{environments_table}" - - blob_type = blob_text_type(engine_adapter.dialect) - alter_table_exp = exp.Alter( - this=exp.to_table(environments_table), - kind="TABLE", - actions=[ - exp.ColumnDef( - this=exp.to_column("requirements"), - kind=exp.DataType.build(blob_type), - ) - ], - ) - - engine_adapter.execute(alter_table_exp) - - -def migrate_rows(state_sync, **kwargs): # type: ignore - pass diff --git a/sqlmesh/migrations/v0059_add_physical_version.py b/sqlmesh/migrations/v0059_add_physical_version.py deleted file mode 100644 index a8dfa24b7a..0000000000 --- a/sqlmesh/migrations/v0059_add_physical_version.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Add the physical_version model attribute.""" - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - pass diff --git a/sqlmesh/migrations/v0060_move_audits_to_model.py b/sqlmesh/migrations/v0060_move_audits_to_model.py deleted file mode 100644 index b4d351cf5c..0000000000 --- a/sqlmesh/migrations/v0060_move_audits_to_model.py +++ /dev/null @@ -1,90 +0,0 @@ -"""Move audits from snapshots to models.""" - -import json - -from sqlglot import exp - -from sqlmesh.utils.migration import index_text_type - - -def migrate_schemas(state_sync, **kwargs): # type: ignore - pass - - -def migrate_rows(state_sync, **kwargs): # type: ignore - import pandas as pd - - engine_adapter = state_sync.engine_adapter - schema = state_sync.schema - snapshots_table = "_snapshots" - index_type = index_text_type(engine_adapter.dialect) - if schema: - snapshots_table = f"{schema}.{snapshots_table}" - - new_snapshots = [] - - for ( - name, - identifier, - version, - snapshot, - kind_name, - updated_ts, - unpaused_ts, - ttl_ms, - unrestorable, - ) in engine_adapter.fetchall( - exp.select( - "name", - "identifier", - "version", - "snapshot", - "kind_name", - "updated_ts", - "unpaused_ts", - "ttl_ms", - "unrestorable", - ).from_(snapshots_table), - quote_identifiers=True, - ): - parsed_snapshot = json.loads(snapshot) - - audit_definitions = parsed_snapshot.pop("audits", []) - node = parsed_snapshot["node"] - node.pop("inline_audits", None) - - if audit_definitions: - node["audit_definitions"] = {audit["name"]: audit for audit in audit_definitions} - - new_snapshots.append( - { - "name": name, - "identifier": identifier, - "version": version, - "snapshot": json.dumps(parsed_snapshot), - "kind_name": kind_name, - "updated_ts": updated_ts, - "unpaused_ts": unpaused_ts, - "ttl_ms": ttl_ms, - "unrestorable": unrestorable, - } - ) - - if new_snapshots: - engine_adapter.delete_from(snapshots_table, "TRUE") - - engine_adapter.insert_append( - snapshots_table, - pd.DataFrame(new_snapshots), - target_columns_to_types={ - "name": exp.DataType.build(index_type), - "identifier": exp.DataType.build(index_type), - "version": exp.DataType.build(index_type), - "snapshot": exp.DataType.build("text"), - "kind_name": exp.DataType.build(index_type), - "updated_ts": exp.DataType.build("bigint"), - "unpaused_ts": exp.DataType.build("bigint"), - "ttl_ms": exp.DataType.build("bigint"), - "unrestorable": exp.DataType.build("boolean"), - }, - ) diff --git a/sqlmesh/migrations/v0061_mysql_fix_blob_text_type.py b/sqlmesh/migrations/v0061_mysql_fix_blob_text_type.py index 9e66db9f66..34b765b3ad 100644 --- a/sqlmesh/migrations/v0061_mysql_fix_blob_text_type.py +++ b/sqlmesh/migrations/v0061_mysql_fix_blob_text_type.py @@ -17,12 +17,10 @@ def migrate_schemas(state_sync, **kwargs): # type: ignore schema = state_sync.schema environments_table = "_environments" snapshots_table = "_snapshots" - plan_dags_table = "_plan_dags" if schema: environments_table = f"{schema}.{environments_table}" snapshots_table = f"{schema}.{snapshots_table}" - plan_dags_table = f"{schema}.{plan_dags_table}" targets = [ (environments_table, "snapshots"), @@ -30,7 +28,6 @@ def migrate_schemas(state_sync, **kwargs): # type: ignore (environments_table, "previous_finalized_snapshots"), (environments_table, "requirements"), (snapshots_table, "snapshot"), - (plan_dags_table, "dag_spec"), ] for table_name, column_name in targets: diff --git a/sqlmesh/migrations/v0096_remove_plan_dags_table.py b/sqlmesh/migrations/v0096_remove_plan_dags_table.py new file mode 100644 index 0000000000..e342d6b1a8 --- /dev/null +++ b/sqlmesh/migrations/v0096_remove_plan_dags_table.py @@ -0,0 +1,15 @@ +"""Remove the obsolete _plan_dags table.""" + + +def migrate_schemas(state_sync, **kwargs): # type: ignore + engine_adapter = state_sync.engine_adapter + schema = state_sync.schema + plan_dags_table = "_plan_dags" + if schema: + plan_dags_table = f"{schema}.{plan_dags_table}" + + engine_adapter.drop_table(plan_dags_table) + + +def migrate_rows(state_sync, **kwargs): # type: ignore + pass diff --git a/sqlmesh/utils/errors.py b/sqlmesh/utils/errors.py index bbd1db3802..d90965c25c 100644 --- a/sqlmesh/utils/errors.py +++ b/sqlmesh/utils/errors.py @@ -86,6 +86,10 @@ class AuditConfigError(ConfigError): pass +class StateMigrationError(SQLMeshError): + pass + + class AuditError(SQLMeshError): def __init__( self, diff --git a/tests/cli/test_cli.py b/tests/cli/test_cli.py index ef5b80e151..e97e03b29e 100644 --- a/tests/cli/test_cli.py +++ b/tests/cli/test_cli.py @@ -872,7 +872,6 @@ def test_info_on_new_project_does_not_create_state_sync(runner, tmp_path): assert not context.engine_adapter.table_exists("sqlmesh._snapshots") assert not context.engine_adapter.table_exists("sqlmesh._environments") assert not context.engine_adapter.table_exists("sqlmesh._intervals") - assert not context.engine_adapter.table_exists("sqlmesh._plan_dags") assert not context.engine_adapter.table_exists("sqlmesh._versions") diff --git a/tests/core/state_sync/test_export_import.py b/tests/core/state_sync/test_export_import.py index c303a63e59..769fa2c2fa 100644 --- a/tests/core/state_sync/test_export_import.py +++ b/tests/core/state_sync/test_export_import.py @@ -44,7 +44,7 @@ def test_export_empty_state(tmp_path: Path, state_sync: StateSync) -> None: with pytest.raises(SQLMeshError, match=r"Please run a migration"): export_state(state_sync, output_file) - state_sync.migrate(default_catalog=None) + state_sync.migrate() export_state(state_sync, output_file) @@ -326,7 +326,7 @@ def test_import_invalid_file(tmp_path: Path, state_sync: StateSync) -> None: def test_import_from_older_version_export_fails(tmp_path: Path, state_sync: StateSync) -> None: - state_sync.migrate(default_catalog=None) + state_sync.migrate() current_version = state_sync.get_versions() major, minor = current_version.minor_sqlmesh_version @@ -354,7 +354,7 @@ def test_import_from_older_version_export_fails(tmp_path: Path, state_sync: Stat def test_import_from_newer_version_export_fails(tmp_path: Path, state_sync: StateSync) -> None: - state_sync.migrate(default_catalog=None) + state_sync.migrate() current_version = state_sync.get_versions() major, minor = current_version.minor_sqlmesh_version @@ -472,7 +472,7 @@ def test_roundtrip(tmp_path: Path, example_project_config: Config, state_sync: S state_sync.engine_adapter.drop_schema("sqlmesh", cascade=True) # state was destroyed, plan should have changes - state_sync.migrate(default_catalog=None) + state_sync.migrate() plan = context.plan() assert plan.has_changes @@ -509,7 +509,7 @@ def test_roundtrip(tmp_path: Path, example_project_config: Config, state_sync: S with pytest.raises(SQLMeshError, match=r"Please run a migration"): state_sync.get_versions(validate=True) - state_sync.migrate(default_catalog=None) + state_sync.migrate() import_state(state_sync, state_file) # should be no changes in dev @@ -610,7 +610,7 @@ def test_roundtrip_includes_environment_statements(tmp_path: Path) -> None: with pytest.raises(SQLMeshError, match=r"Please run a migration"): state_sync.get_versions(validate=True) - state_sync.migrate(default_catalog=None) + state_sync.migrate() import_state(state_sync, state_file) assert not context.plan().has_changes diff --git a/tests/core/state_sync/test_state_sync.py b/tests/core/state_sync/test_state_sync.py index 327ec82210..51a646ce5d 100644 --- a/tests/core/state_sync/test_state_sync.py +++ b/tests/core/state_sync/test_state_sync.py @@ -47,7 +47,7 @@ Versions, ) from sqlmesh.utils.date import now_timestamp, to_datetime, to_timestamp -from sqlmesh.utils.errors import SQLMeshError +from sqlmesh.utils.errors import SQLMeshError, StateMigrationError pytestmark = pytest.mark.slow @@ -59,7 +59,7 @@ def state_sync(duck_conn, tmp_path): schema=c.SQLMESH, cache_dir=tmp_path / c.CACHE, ) - state_sync.migrate(default_catalog=None) + state_sync.migrate() return state_sync @@ -2031,7 +2031,7 @@ def test_version_schema(state_sync: EngineAdapterStateSync, tmp_path) -> None: ): state_sync.get_versions() - state_sync.migrate(default_catalog=None) + state_sync.migrate() # migration version is behind, always raise state_sync.version_state.update_versions(schema_version=SCHEMA_VERSION + 1) @@ -2137,7 +2137,7 @@ def test_migrate(state_sync: EngineAdapterStateSync, mocker: MockerFixture, tmp_ backup_state_mock = mocker.patch( "sqlmesh.core.state_sync.db.migrator.StateMigrator._backup_state" ) - state_sync.migrate(default_catalog=None) + state_sync.migrate() migrate_rows_mock.assert_not_called() backup_state_mock.assert_not_called() @@ -2148,7 +2148,7 @@ def test_migrate(state_sync: EngineAdapterStateSync, mocker: MockerFixture, tmp_ cache_dir=tmp_path / c.CACHE, ) - state_sync.migrate(default_catalog=None) + state_sync.migrate() migrate_rows_mock.assert_called_once() backup_state_mock.assert_called_once() assert state_sync.get_versions() == Versions( @@ -2205,7 +2205,7 @@ def test_first_migration_failure(duck_conn, mocker: MockerFixture, tmp_path) -> SQLMeshError, match="SQLMesh migration failed.", ): - state_sync.migrate(default_catalog=None) + state_sync.migrate() assert not state_sync.engine_adapter.table_exists(state_sync.snapshot_state.snapshots_table) assert not state_sync.engine_adapter.table_exists( state_sync.environment_state.environments_table @@ -2215,7 +2215,15 @@ def test_first_migration_failure(duck_conn, mocker: MockerFixture, tmp_path) -> def test_migrate_rows(state_sync: EngineAdapterStateSync, mocker: MockerFixture) -> None: - delete_versions(state_sync) + state_sync.engine_adapter.replace_query( + "sqlmesh._versions", + pd.read_json("tests/fixtures/migrations/versions.json"), + target_columns_to_types={ + "schema_version": exp.DataType.build("int"), + "sqlglot_version": exp.DataType.build("text"), + "sqlmesh_version": exp.DataType.build("text"), + }, + ) state_sync.engine_adapter.replace_query( "sqlmesh._snapshots", @@ -2225,6 +2233,11 @@ def test_migrate_rows(state_sync: EngineAdapterStateSync, mocker: MockerFixture) "identifier": exp.DataType.build("text"), "version": exp.DataType.build("text"), "snapshot": exp.DataType.build("text"), + "kind_name": exp.DataType.build("text"), + "updated_ts": exp.DataType.build("bigint"), + "unpaused_ts": exp.DataType.build("bigint"), + "ttl_ms": exp.DataType.build("bigint"), + "unrestorable": exp.DataType.build("boolean"), }, ) @@ -2239,21 +2252,43 @@ def test_migrate_rows(state_sync: EngineAdapterStateSync, mocker: MockerFixture) "plan_id": exp.DataType.build("text"), "previous_plan_id": exp.DataType.build("text"), "expiration_ts": exp.DataType.build("bigint"), + "finalized_ts": exp.DataType.build("bigint"), + "promoted_snapshot_ids": exp.DataType.build("text"), + "suffix_target": exp.DataType.build("text"), + "catalog_name_override": exp.DataType.build("text"), + "previous_finalized_snapshots": exp.DataType.build("text"), + "normalize_name": exp.DataType.build("boolean"), + "requirements": exp.DataType.build("text"), }, ) - state_sync.engine_adapter.drop_table("sqlmesh._seeds") - state_sync.engine_adapter.drop_table("sqlmesh._intervals") + state_sync.engine_adapter.replace_query( + "sqlmesh._intervals", + pd.read_json("tests/fixtures/migrations/intervals.json"), + target_columns_to_types={ + "id": exp.DataType.build("text"), + "created_ts": exp.DataType.build("bigint"), + "name": exp.DataType.build("text"), + "identifier": exp.DataType.build("text"), + "version": exp.DataType.build("text"), + "start_ts": exp.DataType.build("bigint"), + "end_ts": exp.DataType.build("bigint"), + "is_dev": exp.DataType.build("boolean"), + "is_removed": exp.DataType.build("boolean"), + "is_compacted": exp.DataType.build("boolean"), + }, + ) old_snapshots = state_sync.engine_adapter.fetchdf("select * from sqlmesh._snapshots") old_environments = state_sync.engine_adapter.fetchdf("select * from sqlmesh._environments") - state_sync.migrate(default_catalog=None, skip_backup=True) + state_sync.migrate(skip_backup=True) new_snapshots = state_sync.engine_adapter.fetchdf("select * from sqlmesh._snapshots") new_environments = state_sync.engine_adapter.fetchdf("select * from sqlmesh._environments") - assert len(old_snapshots) * 2 == len(new_snapshots) + assert len(old_snapshots) == 24 + assert len(new_snapshots) == 36 assert len(old_environments) == len(new_environments) start = "2023-01-01" @@ -2332,7 +2367,7 @@ def test_restore_snapshots_table(state_sync: EngineAdapterStateSync) -> None: old_snapshots_count = state_sync.engine_adapter.fetchone( "select count(*) from sqlmesh._snapshots" ) - assert old_snapshots_count == (12,) + assert old_snapshots_count == (24,) state_sync.migrator._backup_state() state_sync.engine_adapter.delete_from("sqlmesh._snapshots", "TRUE") @@ -3656,3 +3691,25 @@ def test_get_snapshots_by_names_include_expired( snapshot_names=['"a"'], current_ts=(now_ts - (10 * 1000)) ) } == {normal_a.snapshot_id, expired_a.snapshot_id} + + +def test_state_version_is_too_old( + state_sync: EngineAdapterStateSync, mocker: MockerFixture +) -> None: + state_sync.engine_adapter.replace_query( + "sqlmesh._versions", + pd.DataFrame( + [{"schema_version": 59, "sqlmesh_version": "0.133.0", "sqlglot_version": "25.31.4"}] + ), + target_columns_to_types={ + "schema_version": exp.DataType.build("int"), + "sqlglot_version": exp.DataType.build("text"), + "sqlmesh_version": exp.DataType.build("text"), + }, + ) + + with pytest.raises( + StateMigrationError, + match="The current state belongs to an old version of SQLMesh that is no longer supported. Please upgrade to 0.134.0 first before upgrading to.*", + ): + state_sync.migrate(skip_backup=True) diff --git a/tests/core/test_integration.py b/tests/core/test_integration.py index c00733238a..d7d9cb5ce3 100644 --- a/tests/core/test_integration.py +++ b/tests/core/test_integration.py @@ -6157,7 +6157,6 @@ def get_default_catalog_and_non_tables( { "physical.sqlmesh._environments", "physical.sqlmesh._intervals", - "physical.sqlmesh._plan_dags", "physical.sqlmesh._snapshots", "physical.sqlmesh._versions", } @@ -6177,7 +6176,6 @@ def get_default_catalog_and_non_tables( { "physical.sqlmesh._environments", "physical.sqlmesh._intervals", - "physical.sqlmesh._plan_dags", "physical.sqlmesh._snapshots", "physical.sqlmesh._versions", } @@ -6197,7 +6195,6 @@ def get_default_catalog_and_non_tables( { "physical.sqlmesh._environments", "physical.sqlmesh._intervals", - "physical.sqlmesh._plan_dags", "physical.sqlmesh._snapshots", "physical.sqlmesh._versions", } @@ -6218,7 +6215,6 @@ def get_default_catalog_and_non_tables( { "physical.sqlmesh._environments", "physical.sqlmesh._intervals", - "physical.sqlmesh._plan_dags", "physical.sqlmesh._snapshots", "physical.sqlmesh._versions", } @@ -6610,7 +6606,6 @@ def track_duckdb_execute(self, expression, **kwargs): "_intervals", "_auto_restatements", "_environment_statements", - "_plan_dags", ] # to ignore the state queries @@ -7097,7 +7092,6 @@ def test_destroy(copy_to_temp_path): "_auto_restatements", "_environment_statements", "_intervals", - "_plan_dags", "_versions", } for table_name in state_tables: diff --git a/tests/fixtures/migrations/environments.json b/tests/fixtures/migrations/environments.json index e841e38463..cbe4945863 100644 --- a/tests/fixtures/migrations/environments.json +++ b/tests/fixtures/migrations/environments.json @@ -1 +1 @@ -{"name":{"0":"staging","1":"dev"},"snapshots":{"0":"[{\"name\": \"sushi.waiter_as_customer_by_day\", \"fingerprint\": {\"data_hash\": \"486172035\", \"metadata_hash\": \"1992853678\", \"parent_data_hash\": \"2154574190\", \"parent_metadata_hash\": \"1349779748\"}, \"version\": \"1267397572\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"sushi.waiters\", \"identifier\": \"3386889721\"}, {\"name\": \"sushi.waiter_names\", \"identifier\": \"3233103305\"}, {\"name\": \"sushi.customers\", \"identifier\": \"3148897116\"}, {\"name\": \"sushi.orders\", \"identifier\": \"3564161223\"}], \"previous_versions\": [], \"is_materialized\": true, \"is_embedded_kind\": false}, {\"name\": \"sushi.waiter_revenue_by_day\", \"fingerprint\": {\"data_hash\": \"2443934302\", \"metadata_hash\": \"2904050331\", \"parent_data_hash\": \"764310396\", \"parent_metadata_hash\": \"3147731239\"}, \"version\": \"2695875565\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"sushi.order_items\", \"identifier\": \"1806777563\"}, {\"name\": \"sushi.items\", \"identifier\": \"2957171338\"}, {\"name\": \"sushi.orders\", \"identifier\": \"3564161223\"}], \"previous_versions\": [], \"is_materialized\": true, \"is_embedded_kind\": false}, {\"name\": \"sushi.top_waiters\", \"fingerprint\": {\"data_hash\": \"2891807529\", \"metadata_hash\": \"3392493998\", \"parent_data_hash\": \"1940707936\", \"parent_metadata_hash\": \"1276363398\"}, \"version\": \"3010914162\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"sushi.waiter_revenue_by_day\", \"identifier\": \"1609279380\"}], \"previous_versions\": [], \"is_materialized\": false, \"is_embedded_kind\": false}, {\"name\": \"sushi.waiters\", \"fingerprint\": {\"data_hash\": \"3501061139\", \"metadata_hash\": \"570478986\", \"parent_data_hash\": \"777615193\", \"parent_metadata_hash\": \"2042613269\"}, \"version\": \"2059227798\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"sushi.orders\", \"identifier\": \"3564161223\"}], \"previous_versions\": [], \"is_materialized\": false, \"is_embedded_kind\": true}, {\"name\": \"sushi.customers\", \"fingerprint\": {\"data_hash\": \"3553985282\", \"metadata_hash\": \"570478986\", \"parent_data_hash\": \"777615193\", \"parent_metadata_hash\": \"2042613269\"}, \"version\": \"2359719298\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"sushi.orders\", \"identifier\": \"3564161223\"}], \"previous_versions\": [], \"is_materialized\": true, \"is_embedded_kind\": false}, {\"name\": \"sushi.waiter_names\", \"fingerprint\": {\"data_hash\": \"1876476880\", \"metadata_hash\": \"570478986\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"version\": \"2505706914\", \"physical_schema\": \"sqlmesh\", \"parents\": [], \"previous_versions\": [], \"is_materialized\": true, \"is_embedded_kind\": false}, {\"name\": \"sushi.customer_revenue_by_day\", \"fingerprint\": {\"data_hash\": \"2657552867\", \"metadata_hash\": \"129771006\", \"parent_data_hash\": \"764310396\", \"parent_metadata_hash\": \"3147731239\"}, \"version\": \"1291364031\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"sushi.order_items\", \"identifier\": \"1806777563\"}, {\"name\": \"sushi.items\", \"identifier\": \"2957171338\"}, {\"name\": \"sushi.orders\", \"identifier\": \"3564161223\"}], \"previous_versions\": [], \"is_materialized\": true, \"is_embedded_kind\": false}, {\"name\": \"sushi.items\", \"fingerprint\": {\"data_hash\": \"1960378930\", \"metadata_hash\": \"2900807542\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"version\": \"312608270\", \"physical_schema\": \"sqlmesh\", \"parents\": [], \"previous_versions\": [], \"is_materialized\": true, \"is_embedded_kind\": false}, {\"name\": \"sushi.order_items\", \"fingerprint\": {\"data_hash\": \"653664599\", \"metadata_hash\": \"1960934702\", \"parent_data_hash\": \"3170724558\", \"parent_metadata_hash\": \"867324801\"}, \"version\": \"1015284155\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"sushi.items\", \"identifier\": \"2957171338\"}, {\"name\": \"sushi.orders\", \"identifier\": \"3564161223\"}], \"previous_versions\": [], \"is_materialized\": true, \"is_embedded_kind\": false}, {\"name\": \"sushi.orders\", \"fingerprint\": {\"data_hash\": \"1628439771\", \"metadata_hash\": \"2745052130\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"version\": \"925846788\", \"physical_schema\": \"sqlmesh\", \"parents\": [], \"previous_versions\": [], \"is_materialized\": true, \"is_embedded_kind\": false}]","1":"[{\"name\": \"sushi.waiter_as_customer_by_day\", \"fingerprint\": {\"data_hash\": \"486172035\", \"metadata_hash\": \"1992853678\", \"parent_data_hash\": \"2824767713\", \"parent_metadata_hash\": \"1349779748\"}, \"version\": \"3668757715\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"sushi.waiters\", \"identifier\": \"3386889721\"}, {\"name\": \"sushi.waiter_names\", \"identifier\": \"1604207722\"}, {\"name\": \"sushi.customers\", \"identifier\": \"3148897116\"}, {\"name\": \"sushi.orders\", \"identifier\": \"3564161223\"}], \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"486172035\", \"metadata_hash\": \"1992853678\", \"parent_data_hash\": \"2154574190\", \"parent_metadata_hash\": \"1349779748\"}, \"version\": \"1267397572\"}], \"is_materialized\": true, \"is_embedded_kind\": false}, {\"name\": \"sushi.waiter_revenue_by_day\", \"fingerprint\": {\"data_hash\": \"2443934302\", \"metadata_hash\": \"2904050331\", \"parent_data_hash\": \"764310396\", \"parent_metadata_hash\": \"3147731239\"}, \"version\": \"2695875565\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"sushi.order_items\", \"identifier\": \"1806777563\"}, {\"name\": \"sushi.items\", \"identifier\": \"2957171338\"}, {\"name\": \"sushi.orders\", \"identifier\": \"3564161223\"}], \"previous_versions\": [], \"is_materialized\": true, \"is_embedded_kind\": false}, {\"name\": \"sushi.top_waiters\", \"fingerprint\": {\"data_hash\": \"2891807529\", \"metadata_hash\": \"3392493998\", \"parent_data_hash\": \"1940707936\", \"parent_metadata_hash\": \"1276363398\"}, \"version\": \"3010914162\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"sushi.waiter_revenue_by_day\", \"identifier\": \"1609279380\"}], \"previous_versions\": [], \"is_materialized\": false, \"is_embedded_kind\": false}, {\"name\": \"sushi.waiters\", \"fingerprint\": {\"data_hash\": \"3501061139\", \"metadata_hash\": \"570478986\", \"parent_data_hash\": \"777615193\", \"parent_metadata_hash\": \"2042613269\"}, \"version\": \"2059227798\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"sushi.orders\", \"identifier\": \"3564161223\"}], \"previous_versions\": [], \"is_materialized\": false, \"is_embedded_kind\": true}, {\"name\": \"sushi.customers\", \"fingerprint\": {\"data_hash\": \"3553985282\", \"metadata_hash\": \"570478986\", \"parent_data_hash\": \"777615193\", \"parent_metadata_hash\": \"2042613269\"}, \"version\": \"2359719298\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"sushi.orders\", \"identifier\": \"3564161223\"}], \"previous_versions\": [], \"is_materialized\": true, \"is_embedded_kind\": false}, {\"name\": \"sushi.waiter_names\", \"fingerprint\": {\"data_hash\": \"4133862560\", \"metadata_hash\": \"570478986\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"version\": \"1204702829\", \"physical_schema\": \"sqlmesh\", \"parents\": [], \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"1876476880\", \"metadata_hash\": \"570478986\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"version\": \"2505706914\"}], \"change_category\": 1, \"is_materialized\": true, \"is_embedded_kind\": false}, {\"name\": \"sushi.customer_revenue_by_day\", \"fingerprint\": {\"data_hash\": \"2657552867\", \"metadata_hash\": \"129771006\", \"parent_data_hash\": \"764310396\", \"parent_metadata_hash\": \"3147731239\"}, \"version\": \"1291364031\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"sushi.order_items\", \"identifier\": \"1806777563\"}, {\"name\": \"sushi.items\", \"identifier\": \"2957171338\"}, {\"name\": \"sushi.orders\", \"identifier\": \"3564161223\"}], \"previous_versions\": [], \"is_materialized\": true, \"is_embedded_kind\": false}, {\"name\": \"sushi.items\", \"fingerprint\": {\"data_hash\": \"1960378930\", \"metadata_hash\": \"2900807542\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"version\": \"312608270\", \"physical_schema\": \"sqlmesh\", \"parents\": [], \"previous_versions\": [], \"is_materialized\": true, \"is_embedded_kind\": false}, {\"name\": \"sushi.order_items\", \"fingerprint\": {\"data_hash\": \"653664599\", \"metadata_hash\": \"1960934702\", \"parent_data_hash\": \"3170724558\", \"parent_metadata_hash\": \"867324801\"}, \"version\": \"1015284155\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"sushi.items\", \"identifier\": \"2957171338\"}, {\"name\": \"sushi.orders\", \"identifier\": \"3564161223\"}], \"previous_versions\": [], \"is_materialized\": true, \"is_embedded_kind\": false}, {\"name\": \"sushi.orders\", \"fingerprint\": {\"data_hash\": \"1628439771\", \"metadata_hash\": \"2745052130\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"version\": \"925846788\", \"physical_schema\": \"sqlmesh\", \"parents\": [], \"previous_versions\": [], \"is_materialized\": true, \"is_embedded_kind\": false}]"},"start_at":{"0":"2023-01-01","1":"2023-01-01"},"end_at":{"0":"2023-01-07","1":"2023-01-07"},"plan_id":{"0":"2b16ff4b77dc44789b628b4a8a4ed38a","1":"d5dcc7aafce742aab763331525196613"},"previous_plan_id":{"0":null,"1":"79f4bab2177b495ab877b674bc511f2b"},"expiration_ts":{"0":1681419197966,"1":1681419273635}} \ No newline at end of file +{"name":{"0":"staging","1":"dev"},"snapshots":{"0":"[{\"name\": \"\\\"sushi\\\".\\\"waiter_as_customer_by_day\\\"\", \"temp_version\": \"1267397572\", \"change_category\": 4, \"fingerprint\": {\"data_hash\": \"849558693\", \"metadata_hash\": \"2088684978\", \"parent_data_hash\": \"2705906012\", \"parent_metadata_hash\": \"665080906\"}, \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"486172035\", \"metadata_hash\": \"1992853678\", \"parent_data_hash\": \"2154574190\", \"parent_metadata_hash\": \"1349779748\"}, \"version\": \"1267397572\", \"change_category\": 4, \"physical_schema\": \"sqlmesh\"}], \"version\": \"1267397572\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"\\\"sushi\\\".\\\"waiter_names\\\"\", \"identifier\": \"1609854746\"}, {\"name\": \"\\\"sushi\\\".\\\"waiters\\\"\", \"identifier\": \"4123940212\"}, {\"name\": \"\\\"sushi\\\".\\\"orders\\\"\", \"identifier\": \"1250207606\"}, {\"name\": \"\\\"sushi\\\".\\\"customers\\\"\", \"identifier\": \"1461038955\"}], \"kind_name\": \"INCREMENTAL_BY_TIME_RANGE\", \"node_type\": \"model\"}, {\"name\": \"\\\"sushi\\\".\\\"waiter_revenue_by_day\\\"\", \"temp_version\": \"2695875565\", \"change_category\": 4, \"fingerprint\": {\"data_hash\": \"2224089837\", \"metadata_hash\": \"2504236462\", \"parent_data_hash\": \"2738168331\", \"parent_metadata_hash\": \"1795276494\"}, \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"2443934302\", \"metadata_hash\": \"2904050331\", \"parent_data_hash\": \"764310396\", \"parent_metadata_hash\": \"3147731239\"}, \"version\": \"2695875565\", \"change_category\": 4, \"physical_schema\": \"sqlmesh\"}], \"version\": \"2695875565\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"\\\"sushi\\\".\\\"items\\\"\", \"identifier\": \"3721860967\"}, {\"name\": \"\\\"sushi\\\".\\\"orders\\\"\", \"identifier\": \"1250207606\"}, {\"name\": \"\\\"sushi\\\".\\\"order_items\\\"\", \"identifier\": \"1422946820\"}], \"kind_name\": \"INCREMENTAL_BY_TIME_RANGE\", \"node_type\": \"model\"}, {\"name\": \"\\\"sushi\\\".\\\"top_waiters\\\"\", \"temp_version\": \"3010914162\", \"change_category\": 4, \"fingerprint\": {\"data_hash\": \"4131026946\", \"metadata_hash\": \"154190563\", \"parent_data_hash\": \"929243525\", \"parent_metadata_hash\": \"2366450878\"}, \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"2891807529\", \"metadata_hash\": \"3392493998\", \"parent_data_hash\": \"1940707936\", \"parent_metadata_hash\": \"1276363398\"}, \"version\": \"3010914162\", \"change_category\": 4, \"physical_schema\": \"sqlmesh\"}], \"version\": \"3010914162\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"\\\"sushi\\\".\\\"waiter_revenue_by_day\\\"\", \"identifier\": \"2175947464\"}], \"kind_name\": \"VIEW\", \"node_type\": \"model\"}, {\"name\": \"\\\"sushi\\\".\\\"waiters\\\"\", \"temp_version\": \"2059227798\", \"change_category\": 4, \"fingerprint\": {\"data_hash\": \"2037801255\", \"metadata_hash\": \"3063653103\", \"parent_data_hash\": \"458609840\", \"parent_metadata_hash\": \"2007040660\"}, \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"3501061139\", \"metadata_hash\": \"570478986\", \"parent_data_hash\": \"777615193\", \"parent_metadata_hash\": \"2042613269\"}, \"version\": \"2059227798\", \"change_category\": 4, \"physical_schema\": \"sqlmesh\"}], \"version\": \"2059227798\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"\\\"sushi\\\".\\\"orders\\\"\", \"identifier\": \"1250207606\"}], \"kind_name\": \"EMBEDDED\", \"node_type\": \"model\"}, {\"name\": \"\\\"sushi\\\".\\\"customers\\\"\", \"temp_version\": \"2359719298\", \"change_category\": 4, \"fingerprint\": {\"data_hash\": \"2431070412\", \"metadata_hash\": \"3063653103\", \"parent_data_hash\": \"458609840\", \"parent_metadata_hash\": \"2007040660\"}, \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"3553985282\", \"metadata_hash\": \"570478986\", \"parent_data_hash\": \"777615193\", \"parent_metadata_hash\": \"2042613269\"}, \"version\": \"2359719298\", \"change_category\": 4, \"physical_schema\": \"sqlmesh\"}], \"version\": \"2359719298\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"\\\"sushi\\\".\\\"orders\\\"\", \"identifier\": \"1250207606\"}], \"kind_name\": \"FULL\", \"node_type\": \"model\"}, {\"name\": \"\\\"sushi\\\".\\\"waiter_names\\\"\", \"temp_version\": \"2505706914\", \"change_category\": 4, \"fingerprint\": {\"data_hash\": \"3604872020\", \"metadata_hash\": \"3468846895\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"1876476880\", \"metadata_hash\": \"570478986\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"version\": \"2505706914\", \"change_category\": 4, \"physical_schema\": \"sqlmesh\"}], \"version\": \"2505706914\", \"physical_schema\": \"sqlmesh\", \"parents\": [], \"kind_name\": \"SEED\", \"node_type\": \"model\"}, {\"name\": \"\\\"sushi\\\".\\\"customer_revenue_by_day\\\"\", \"temp_version\": \"1291364031\", \"change_category\": 4, \"fingerprint\": {\"data_hash\": \"131732542\", \"metadata_hash\": \"1368842087\", \"parent_data_hash\": \"2738168331\", \"parent_metadata_hash\": \"1795276494\"}, \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"2657552867\", \"metadata_hash\": \"129771006\", \"parent_data_hash\": \"764310396\", \"parent_metadata_hash\": \"3147731239\"}, \"version\": \"1291364031\", \"change_category\": 4, \"physical_schema\": \"sqlmesh\"}], \"version\": \"1291364031\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"\\\"sushi\\\".\\\"items\\\"\", \"identifier\": \"3721860967\"}, {\"name\": \"\\\"sushi\\\".\\\"orders\\\"\", \"identifier\": \"1250207606\"}, {\"name\": \"\\\"sushi\\\".\\\"order_items\\\"\", \"identifier\": \"1422946820\"}], \"kind_name\": \"INCREMENTAL_BY_TIME_RANGE\", \"node_type\": \"model\"}, {\"name\": \"\\\"sushi\\\".\\\"items\\\"\", \"temp_version\": \"312608270\", \"change_category\": 4, \"fingerprint\": {\"data_hash\": \"1862622614\", \"metadata_hash\": \"3651173237\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"1960378930\", \"metadata_hash\": \"2900807542\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"version\": \"312608270\", \"change_category\": 4, \"physical_schema\": \"sqlmesh\"}], \"version\": \"312608270\", \"physical_schema\": \"sqlmesh\", \"parents\": [], \"kind_name\": \"INCREMENTAL_BY_TIME_RANGE\", \"node_type\": \"model\"}, {\"name\": \"\\\"sushi\\\".\\\"order_items\\\"\", \"temp_version\": \"1015284155\", \"change_category\": 4, \"fingerprint\": {\"data_hash\": \"4010068827\", \"metadata_hash\": \"799196655\", \"parent_data_hash\": \"2342431947\", \"parent_metadata_hash\": \"1746080605\"}, \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"653664599\", \"metadata_hash\": \"1960934702\", \"parent_data_hash\": \"3170724558\", \"parent_metadata_hash\": \"867324801\"}, \"version\": \"1015284155\", \"change_category\": 4, \"physical_schema\": \"sqlmesh\"}], \"version\": \"1015284155\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"\\\"sushi\\\".\\\"items\\\"\", \"identifier\": \"3721860967\"}, {\"name\": \"\\\"sushi\\\".\\\"orders\\\"\", \"identifier\": \"1250207606\"}], \"kind_name\": \"INCREMENTAL_BY_TIME_RANGE\", \"node_type\": \"model\"}, {\"name\": \"\\\"sushi\\\".\\\"orders\\\"\", \"temp_version\": \"925846788\", \"change_category\": 4, \"fingerprint\": {\"data_hash\": \"1588786367\", \"metadata_hash\": \"1674367104\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"1628439771\", \"metadata_hash\": \"2745052130\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"version\": \"925846788\", \"change_category\": 4, \"physical_schema\": \"sqlmesh\"}], \"version\": \"925846788\", \"physical_schema\": \"sqlmesh\", \"parents\": [], \"kind_name\": \"INCREMENTAL_BY_TIME_RANGE\", \"node_type\": \"model\"}]","1":"[{\"name\": \"\\\"sushi\\\".\\\"waiter_as_customer_by_day\\\"\", \"temp_version\": \"3668757715\", \"change_category\": 4, \"fingerprint\": {\"data_hash\": \"1936268024\", \"metadata_hash\": \"2088684978\", \"parent_data_hash\": \"3055854652\", \"parent_metadata_hash\": \"665080906\"}, \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"486172035\", \"metadata_hash\": \"1992853678\", \"parent_data_hash\": \"2154574190\", \"parent_metadata_hash\": \"1349779748\"}, \"version\": \"1267397572\"}, {\"fingerprint\": {\"data_hash\": \"486172035\", \"metadata_hash\": \"1992853678\", \"parent_data_hash\": \"2824767713\", \"parent_metadata_hash\": \"1349779748\"}, \"version\": \"3668757715\", \"change_category\": 4, \"physical_schema\": \"sqlmesh\"}], \"version\": \"3668757715\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"\\\"sushi\\\".\\\"waiter_names\\\"\", \"identifier\": \"2725136291\"}, {\"name\": \"\\\"sushi\\\".\\\"waiters\\\"\", \"identifier\": \"4123940212\"}, {\"name\": \"\\\"sushi\\\".\\\"orders\\\"\", \"identifier\": \"1250207606\"}, {\"name\": \"\\\"sushi\\\".\\\"customers\\\"\", \"identifier\": \"1461038955\"}], \"kind_name\": \"INCREMENTAL_BY_TIME_RANGE\", \"node_type\": \"model\"}, {\"name\": \"\\\"sushi\\\".\\\"waiter_revenue_by_day\\\"\", \"temp_version\": \"2695875565\", \"change_category\": 4, \"fingerprint\": {\"data_hash\": \"2224089837\", \"metadata_hash\": \"2504236462\", \"parent_data_hash\": \"2738168331\", \"parent_metadata_hash\": \"1795276494\"}, \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"2443934302\", \"metadata_hash\": \"2904050331\", \"parent_data_hash\": \"764310396\", \"parent_metadata_hash\": \"3147731239\"}, \"version\": \"2695875565\", \"change_category\": 4, \"physical_schema\": \"sqlmesh\"}], \"version\": \"2695875565\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"\\\"sushi\\\".\\\"items\\\"\", \"identifier\": \"3721860967\"}, {\"name\": \"\\\"sushi\\\".\\\"orders\\\"\", \"identifier\": \"1250207606\"}, {\"name\": \"\\\"sushi\\\".\\\"order_items\\\"\", \"identifier\": \"1422946820\"}], \"kind_name\": \"INCREMENTAL_BY_TIME_RANGE\", \"node_type\": \"model\"}, {\"name\": \"\\\"sushi\\\".\\\"top_waiters\\\"\", \"temp_version\": \"3010914162\", \"change_category\": 4, \"fingerprint\": {\"data_hash\": \"4131026946\", \"metadata_hash\": \"154190563\", \"parent_data_hash\": \"929243525\", \"parent_metadata_hash\": \"2366450878\"}, \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"2891807529\", \"metadata_hash\": \"3392493998\", \"parent_data_hash\": \"1940707936\", \"parent_metadata_hash\": \"1276363398\"}, \"version\": \"3010914162\", \"change_category\": 4, \"physical_schema\": \"sqlmesh\"}], \"version\": \"3010914162\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"\\\"sushi\\\".\\\"waiter_revenue_by_day\\\"\", \"identifier\": \"2175947464\"}], \"kind_name\": \"VIEW\", \"node_type\": \"model\"}, {\"name\": \"\\\"sushi\\\".\\\"waiters\\\"\", \"temp_version\": \"2059227798\", \"change_category\": 4, \"fingerprint\": {\"data_hash\": \"2037801255\", \"metadata_hash\": \"3063653103\", \"parent_data_hash\": \"458609840\", \"parent_metadata_hash\": \"2007040660\"}, \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"3501061139\", \"metadata_hash\": \"570478986\", \"parent_data_hash\": \"777615193\", \"parent_metadata_hash\": \"2042613269\"}, \"version\": \"2059227798\", \"change_category\": 4, \"physical_schema\": \"sqlmesh\"}], \"version\": \"2059227798\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"\\\"sushi\\\".\\\"orders\\\"\", \"identifier\": \"1250207606\"}], \"kind_name\": \"EMBEDDED\", \"node_type\": \"model\"}, {\"name\": \"\\\"sushi\\\".\\\"customers\\\"\", \"temp_version\": \"2359719298\", \"change_category\": 4, \"fingerprint\": {\"data_hash\": \"2431070412\", \"metadata_hash\": \"3063653103\", \"parent_data_hash\": \"458609840\", \"parent_metadata_hash\": \"2007040660\"}, \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"3553985282\", \"metadata_hash\": \"570478986\", \"parent_data_hash\": \"777615193\", \"parent_metadata_hash\": \"2042613269\"}, \"version\": \"2359719298\", \"change_category\": 4, \"physical_schema\": \"sqlmesh\"}], \"version\": \"2359719298\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"\\\"sushi\\\".\\\"orders\\\"\", \"identifier\": \"1250207606\"}], \"kind_name\": \"FULL\", \"node_type\": \"model\"}, {\"name\": \"\\\"sushi\\\".\\\"waiter_names\\\"\", \"temp_version\": \"1204702829\", \"change_category\": 1, \"fingerprint\": {\"data_hash\": \"1437406487\", \"metadata_hash\": \"3468846895\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"1876476880\", \"metadata_hash\": \"570478986\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"version\": \"2505706914\"}, {\"fingerprint\": {\"data_hash\": \"4133862560\", \"metadata_hash\": \"570478986\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"version\": \"1204702829\", \"change_category\": 1, \"physical_schema\": \"sqlmesh\"}], \"version\": \"1204702829\", \"physical_schema\": \"sqlmesh\", \"parents\": [], \"kind_name\": \"SEED\", \"node_type\": \"model\"}, {\"name\": \"\\\"sushi\\\".\\\"customer_revenue_by_day\\\"\", \"temp_version\": \"1291364031\", \"change_category\": 4, \"fingerprint\": {\"data_hash\": \"131732542\", \"metadata_hash\": \"1368842087\", \"parent_data_hash\": \"2738168331\", \"parent_metadata_hash\": \"1795276494\"}, \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"2657552867\", \"metadata_hash\": \"129771006\", \"parent_data_hash\": \"764310396\", \"parent_metadata_hash\": \"3147731239\"}, \"version\": \"1291364031\", \"change_category\": 4, \"physical_schema\": \"sqlmesh\"}], \"version\": \"1291364031\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"\\\"sushi\\\".\\\"items\\\"\", \"identifier\": \"3721860967\"}, {\"name\": \"\\\"sushi\\\".\\\"orders\\\"\", \"identifier\": \"1250207606\"}, {\"name\": \"\\\"sushi\\\".\\\"order_items\\\"\", \"identifier\": \"1422946820\"}], \"kind_name\": \"INCREMENTAL_BY_TIME_RANGE\", \"node_type\": \"model\"}, {\"name\": \"\\\"sushi\\\".\\\"items\\\"\", \"temp_version\": \"312608270\", \"change_category\": 4, \"fingerprint\": {\"data_hash\": \"1862622614\", \"metadata_hash\": \"3651173237\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"1960378930\", \"metadata_hash\": \"2900807542\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"version\": \"312608270\", \"change_category\": 4, \"physical_schema\": \"sqlmesh\"}], \"version\": \"312608270\", \"physical_schema\": \"sqlmesh\", \"parents\": [], \"kind_name\": \"INCREMENTAL_BY_TIME_RANGE\", \"node_type\": \"model\"}, {\"name\": \"\\\"sushi\\\".\\\"order_items\\\"\", \"temp_version\": \"1015284155\", \"change_category\": 4, \"fingerprint\": {\"data_hash\": \"4010068827\", \"metadata_hash\": \"799196655\", \"parent_data_hash\": \"2342431947\", \"parent_metadata_hash\": \"1746080605\"}, \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"653664599\", \"metadata_hash\": \"1960934702\", \"parent_data_hash\": \"3170724558\", \"parent_metadata_hash\": \"867324801\"}, \"version\": \"1015284155\", \"change_category\": 4, \"physical_schema\": \"sqlmesh\"}], \"version\": \"1015284155\", \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"\\\"sushi\\\".\\\"items\\\"\", \"identifier\": \"3721860967\"}, {\"name\": \"\\\"sushi\\\".\\\"orders\\\"\", \"identifier\": \"1250207606\"}], \"kind_name\": \"INCREMENTAL_BY_TIME_RANGE\", \"node_type\": \"model\"}, {\"name\": \"\\\"sushi\\\".\\\"orders\\\"\", \"temp_version\": \"925846788\", \"change_category\": 4, \"fingerprint\": {\"data_hash\": \"1588786367\", \"metadata_hash\": \"1674367104\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"1628439771\", \"metadata_hash\": \"2745052130\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"version\": \"925846788\", \"change_category\": 4, \"physical_schema\": \"sqlmesh\"}], \"version\": \"925846788\", \"physical_schema\": \"sqlmesh\", \"parents\": [], \"kind_name\": \"INCREMENTAL_BY_TIME_RANGE\", \"node_type\": \"model\"}]"},"start_at":{"0":"2023-01-01 00:00:00","1":"2023-01-01 00:00:00"},"end_at":{"0":"2023-01-07 00:00:00","1":"2023-01-07 00:00:00"},"plan_id":{"0":"2b16ff4b77dc44789b628b4a8a4ed38a","1":"d5dcc7aafce742aab763331525196613"},"previous_plan_id":{"0":null,"1":"79f4bab2177b495ab877b674bc511f2b"},"expiration_ts":{"0":1681419197966,"1":1681419273635},"finalized_ts":{"0":null,"1":null},"promoted_snapshot_ids":{"0":null,"1":null},"suffix_target":{"0":"schema","1":"schema"},"catalog_name_override":{"0":null,"1":null},"previous_finalized_snapshots":{"0":null,"1":null},"normalize_name":{"0":false,"1":false},"requirements":{"0":"{}","1":"{}"}} \ No newline at end of file diff --git a/tests/fixtures/migrations/intervals.json b/tests/fixtures/migrations/intervals.json new file mode 100644 index 0000000000..276fdd60de --- /dev/null +++ b/tests/fixtures/migrations/intervals.json @@ -0,0 +1 @@ +{"id":{"0":"1a1121bc700040d8af4f78ad96e025f1","1":"b901107d2ede4f50be32090eb2559d1a","2":"b366e44fd5e541008cb987a503b5ed7a","3":"ccbcd24427ac432da53fa158313ad800","4":"4fd6bdae011c4978aac8eb5a47521753","5":"d8549fb5f3674b29b4aa2b9988a42052","6":"3f8120d2a2c74f3baca25172537a7788","7":"f417d94c20e44dc5b1a0c29478672ac4","8":"6fd67cfbfcc743c8a87c32a95431c079","9":"b5a8f45c901e4c97aa634eb3ee5f521e","10":"46c7fdaccfd84ba68d766021d7d76511"},"created_ts":{"0":1757115220259,"1":1757115220259,"2":1757115220259,"3":1757115220259,"4":1757115220259,"5":1757115220259,"6":1757115220259,"7":1757115220259,"8":1757115220259,"9":1757115220260,"10":1757115220260},"name":{"0":"\"sushi\".\"waiter_as_customer_by_day\"","1":"\"sushi\".\"waiter_revenue_by_day\"","2":"\"sushi\".\"top_waiters\"","3":"\"sushi\".\"customers\"","4":"\"sushi\".\"waiter_names\"","5":"\"sushi\".\"customer_revenue_by_day\"","6":"\"sushi\".\"items\"","7":"\"sushi\".\"order_items\"","8":"\"sushi\".\"orders\"","9":"\"sushi\".\"waiter_as_customer_by_day\"","10":"\"sushi\".\"waiter_names\""},"identifier":{"0":"1281222509","1":"1609279380","2":"599861134","3":"3148897116","4":"3233103305","5":"1308408370","6":"2957171338","7":"1806777563","8":"3564161223","9":"1084858582","10":"1604207722"},"version":{"0":"1267397572","1":"2695875565","2":"3010914162","3":"2359719298","4":"2505706914","5":"1291364031","6":"312608270","7":"1015284155","8":"925846788","9":"3668757715","10":"1204702829"},"start_ts":{"0":1672531200000,"1":1672531200000,"2":1672531200000,"3":1672531200000,"4":1672531200000,"5":1672531200000,"6":1672531200000,"7":1672531200000,"8":1672531200000,"9":1672531200000,"10":1672531200000},"end_ts":{"0":1673136000000,"1":1673136000000,"2":1673136000000,"3":1673136000000,"4":1673136000000,"5":1673136000000,"6":1673136000000,"7":1673136000000,"8":1673136000000,"9":1673136000000,"10":1673136000000},"is_dev":{"0":false,"1":false,"2":false,"3":false,"4":false,"5":false,"6":false,"7":false,"8":false,"9":false,"10":false},"is_removed":{"0":false,"1":false,"2":false,"3":false,"4":false,"5":false,"6":false,"7":false,"8":false,"9":false,"10":false},"is_compacted":{"0":true,"1":true,"2":true,"3":true,"4":true,"5":true,"6":true,"7":true,"8":true,"9":true,"10":true}} \ No newline at end of file diff --git a/tests/fixtures/migrations/snapshots.json b/tests/fixtures/migrations/snapshots.json index 45cebe613b..638009abf1 100644 --- a/tests/fixtures/migrations/snapshots.json +++ b/tests/fixtures/migrations/snapshots.json @@ -1 +1 @@ -{"name":{"0":"sushi.waiter_as_customer_by_day","1":"sushi.waiter_revenue_by_day","2":"sushi.top_waiters","3":"sushi.waiters","4":"sushi.customers","5":"sushi.waiter_names","6":"sushi.customer_revenue_by_day","7":"sushi.items","8":"sushi.order_items","9":"sushi.orders","10":"sushi.waiter_as_customer_by_day","11":"sushi.waiter_names"},"identifier":{"0":"1281222509","1":"1609279380","2":"599861134","3":"3386889721","4":"3148897116","5":"3233103305","6":"1308408370","7":"2957171338","8":"1806777563","9":"3564161223","10":"1084858582","11":"1604207722"},"version":{"0":"1267397572","1":"2695875565","2":"3010914162","3":"2059227798","4":"2359719298","5":"2505706914","6":"1291364031","7":"312608270","8":"1015284155","9":"925846788","10":"3668757715","11":"1204702829"},"snapshot":{"0":"{\"name\": \"sushi.waiter_as_customer_by_day\", \"fingerprint\": {\"data_hash\": \"486172035\", \"metadata_hash\": \"1992853678\", \"parent_data_hash\": \"2154574190\", \"parent_metadata_hash\": \"1349779748\"}, \"physical_schema\": \"sqlmesh\", \"model\": {\"name\": \"sushi.waiter_as_customer_by_day\", \"kind\": {\"name\": \"INCREMENTAL_BY_TIME_RANGE\", \"time_column\": {\"column\": \"ds\", \"format\": \"%Y-%m-%d\"}}, \"dialect\": \"duckdb\", \"cron\": \"@daily\", \"owner\": \"jen\", \"partitioned_by\": [], \"pre\": [], \"post\": [], \"audits\": [[\"not_null\", {\"columns\": \"ARRAY(waiter_id)\"}]], \"expressions\": [], \"python_env\": {}, \"jinja_macros\": {\"packages\": {}, \"root_macros\": {}, \"global_objs\": {}}, \"query\": \"SELECT w.ds AS ds, w.waiter_id AS waiter_id, wn.name AS waiter_name FROM sushi.waiters AS w JOIN sushi.customers AS c ON w.waiter_id = c.customer_id JOIN sushi.waiter_names AS wn ON w.waiter_id = wn.id\", \"source_type\": \"sql\"}, \"parents\": [{\"name\": \"sushi.waiters\", \"identifier\": \"3386889721\"}, {\"name\": \"sushi.waiter_names\", \"identifier\": \"3233103305\"}, {\"name\": \"sushi.customers\", \"identifier\": \"3148897116\"}, {\"name\": \"sushi.orders\", \"identifier\": \"3564161223\"}], \"audits\": [], \"intervals\": [[1672531200000, 1673136000000]], \"dev_intervals\": [], \"created_ts\": 1680814376348, \"updated_ts\": 1680814376348, \"ttl\": \"in 1 week\", \"previous_versions\": [], \"indirect_versions\": {}, \"version\": \"1267397572\"}","1":"{\"name\": \"sushi.waiter_revenue_by_day\", \"fingerprint\": {\"data_hash\": \"2443934302\", \"metadata_hash\": \"2904050331\", \"parent_data_hash\": \"764310396\", \"parent_metadata_hash\": \"3147731239\"}, \"physical_schema\": \"sqlmesh\", \"model\": {\"name\": \"sushi.waiter_revenue_by_day\", \"kind\": {\"name\": \"INCREMENTAL_BY_TIME_RANGE\", \"time_column\": {\"column\": \"ds\", \"format\": \"%Y-%m-%d\"}}, \"dialect\": \"duckdb\", \"cron\": \"@daily\", \"owner\": \"jen\", \"description\": \"Table of revenue generated by waiters by day.\", \"batch_size\": 10, \"partitioned_by\": [], \"pre\": [], \"post\": [], \"audits\": [[\"number_of_rows\", {\"threshold\": \"0\"}]], \"expressions\": [], \"python_env\": {}, \"jinja_macros\": {\"packages\": {}, \"root_macros\": {}, \"global_objs\": {}}, \"query\": \"SELECT CAST(o.waiter_id AS INT) AS waiter_id \/* Waiter id *\/, CAST(SUM(oi.quantity * i.price) AS DOUBLE) AS revenue \/* Revenue from orders taken by this waiter *\/, CAST(o.ds AS TEXT) AS ds \/* Date *\/ FROM sushi.orders AS o LEFT JOIN sushi.order_items AS oi ON o.id = oi.order_id AND o.ds = oi.ds LEFT JOIN sushi.items AS i ON oi.item_id = i.id AND oi.ds = i.ds WHERE o.ds BETWEEN @start_ds AND @end_ds GROUP BY o.waiter_id, o.ds\", \"source_type\": \"sql\"}, \"parents\": [{\"name\": \"sushi.order_items\", \"identifier\": \"1806777563\"}, {\"name\": \"sushi.items\", \"identifier\": \"2957171338\"}, {\"name\": \"sushi.orders\", \"identifier\": \"3564161223\"}], \"audits\": [], \"intervals\": [[1672531200000, 1673136000000]], \"dev_intervals\": [], \"created_ts\": 1680814376361, \"updated_ts\": 1680814376361, \"ttl\": \"in 1 week\", \"previous_versions\": [], \"indirect_versions\": {}, \"version\": \"2695875565\"}","2":"{\"name\": \"sushi.top_waiters\", \"fingerprint\": {\"data_hash\": \"2891807529\", \"metadata_hash\": \"3392493998\", \"parent_data_hash\": \"1940707936\", \"parent_metadata_hash\": \"1276363398\"}, \"physical_schema\": \"sqlmesh\", \"model\": {\"name\": \"sushi.top_waiters\", \"kind\": {\"name\": \"VIEW\"}, \"dialect\": \"duckdb\", \"cron\": \"@daily\", \"owner\": \"jen\", \"description\": \"View of top waiters.\", \"partitioned_by\": [], \"pre\": [], \"post\": [], \"audits\": [[\"unique_values\", {\"columns\": \"ARRAY(waiter_id)\"}]], \"expressions\": [], \"python_env\": {}, \"jinja_macros\": {\"packages\": {}, \"root_macros\": {}, \"global_objs\": {}}, \"query\": \"SELECT CAST(waiter_id AS INT) AS waiter_id, CAST(revenue AS DOUBLE) AS revenue FROM sushi.waiter_revenue_by_day WHERE ds = (SELECT MAX(ds) FROM sushi.waiter_revenue_by_day) ORDER BY revenue DESC LIMIT 10\", \"source_type\": \"sql\"}, \"parents\": [{\"name\": \"sushi.waiter_revenue_by_day\", \"identifier\": \"1609279380\"}], \"audits\": [], \"intervals\": [[1672531200000, 1673136000000]], \"dev_intervals\": [], \"created_ts\": 1680814376384, \"updated_ts\": 1680814376384, \"ttl\": \"in 1 week\", \"previous_versions\": [], \"indirect_versions\": {}, \"version\": \"3010914162\"}","3":"{\"name\": \"sushi.waiters\", \"fingerprint\": {\"data_hash\": \"3501061139\", \"metadata_hash\": \"570478986\", \"parent_data_hash\": \"777615193\", \"parent_metadata_hash\": \"2042613269\"}, \"physical_schema\": \"sqlmesh\", \"model\": {\"name\": \"sushi.waiters\", \"kind\": {\"name\": \"EMBEDDED\"}, \"dialect\": \"duckdb\", \"cron\": \"@daily\", \"owner\": \"jen\", \"partitioned_by\": [], \"pre\": [], \"post\": [], \"audits\": [], \"expressions\": [], \"python_env\": {\"incremental_by_ds\": {\"payload\": \"def incremental_by_ds(evaluator, column):\\n expression = evaluator.transform(exp.Between(this=column, low=MacroVar(\\n this='start_ds'), high=MacroVar(this='end_ds')))\\n if not isinstance(expression, exp.Expression):\\n raise MacroEvalError(\\n f'Return type is {type(expression)}, expected exp.Expression')\\n return expression\", \"kind\": \"definition\", \"name\": \"incremental_by_ds\", \"path\": \"macros\/macros.py\"}, \"exp\": {\"payload\": \"import sqlglot.expressions as exp\", \"kind\": \"import\"}, \"MacroVar\": {\"payload\": \"from sqlmesh.core.dialect import MacroVar\", \"kind\": \"import\"}, \"MacroEvalError\": {\"payload\": \"from sqlmesh.utils.errors import MacroEvalError\", \"kind\": \"import\"}}, \"jinja_macros\": {\"packages\": {}, \"root_macros\": {}, \"global_objs\": {}}, \"query\": \"SELECT DISTINCT CAST(waiter_id AS INT) AS waiter_id, CAST(ds AS TEXT) AS ds FROM sushi.orders AS o WHERE @incremental_by_ds(ds)\", \"source_type\": \"sql\"}, \"parents\": [{\"name\": \"sushi.orders\", \"identifier\": \"3564161223\"}], \"audits\": [], \"intervals\": [], \"dev_intervals\": [], \"created_ts\": 1680814376387, \"updated_ts\": 1680814376387, \"ttl\": \"in 1 week\", \"previous_versions\": [], \"indirect_versions\": {}, \"version\": \"2059227798\"}","4":"{\"name\": \"sushi.customers\", \"fingerprint\": {\"data_hash\": \"3553985282\", \"metadata_hash\": \"570478986\", \"parent_data_hash\": \"777615193\", \"parent_metadata_hash\": \"2042613269\"}, \"physical_schema\": \"sqlmesh\", \"model\": {\"name\": \"sushi.customers\", \"kind\": {\"name\": \"FULL\"}, \"dialect\": \"duckdb\", \"cron\": \"@daily\", \"owner\": \"jen\", \"partitioned_by\": [], \"pre\": [[\"noop\", {\"x\": \"1\"}]], \"post\": [[\"noop\", {}], [\"noop\", {\"y\": \"ARRAY('a', 2)\"}]], \"audits\": [], \"expressions\": [], \"python_env\": {\"noop\": {\"payload\": \"def noop(context, start, end, latest, **kwargs):\\n pass\", \"kind\": \"definition\", \"name\": \"noop\", \"path\": \"hooks\/hooks.py\"}}, \"jinja_macros\": {\"packages\": {}, \"root_macros\": {}, \"global_objs\": {}}, \"query\": \"SELECT DISTINCT CAST(customer_id AS INT) AS customer_id FROM sushi.orders AS o\", \"source_type\": \"sql\"}, \"parents\": [{\"name\": \"sushi.orders\", \"identifier\": \"3564161223\"}], \"audits\": [], \"intervals\": [[1672531200000, 1673136000000]], \"dev_intervals\": [], \"created_ts\": 1680814376388, \"updated_ts\": 1680814376388, \"ttl\": \"in 1 week\", \"previous_versions\": [], \"indirect_versions\": {}, \"version\": \"2359719298\"}","5":"{\"name\": \"sushi.waiter_names\", \"fingerprint\": {\"data_hash\": \"1876476880\", \"metadata_hash\": \"570478986\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"physical_schema\": \"sqlmesh\", \"model\": {\"name\": \"sushi.waiter_names\", \"kind\": {\"name\": \"SEED\", \"path\": \"..\/seeds\/waiter_names.csv\", \"batch_size\": 5}, \"dialect\": \"duckdb\", \"cron\": \"@daily\", \"owner\": \"jen\", \"partitioned_by\": [], \"pre\": [], \"post\": [], \"audits\": [], \"expressions\": [], \"jinja_macros\": {\"packages\": {}, \"root_macros\": {}, \"global_objs\": {}}, \"seed\": {\"content\": \"id,name\\n0,Toby\\n1,Tyson\\n2,Ryan\\n3,George\\n4,Chris\\n5,Max\\n6,Vincent\\n7,Iaroslav\\n8,Emma\\n9,Maia\\n\"}, \"source_type\": \"seed\"}, \"parents\": [], \"audits\": [], \"intervals\": [[1672531200000, 1673136000000]], \"dev_intervals\": [], \"created_ts\": 1680814376389, \"updated_ts\": 1680814376389, \"ttl\": \"in 1 week\", \"previous_versions\": [], \"indirect_versions\": {}, \"version\": \"2505706914\"}","6":"{\"name\": \"sushi.customer_revenue_by_day\", \"fingerprint\": {\"data_hash\": \"2657552867\", \"metadata_hash\": \"129771006\", \"parent_data_hash\": \"764310396\", \"parent_metadata_hash\": \"3147731239\"}, \"physical_schema\": \"sqlmesh\", \"model\": {\"name\": \"sushi.customer_revenue_by_day\", \"kind\": {\"name\": \"INCREMENTAL_BY_TIME_RANGE\", \"time_column\": {\"column\": \"ds\", \"format\": \"%Y-%m-%d\"}}, \"dialect\": \"hive\", \"cron\": \"@daily\", \"owner\": \"jen\", \"description\": \"Table of revenue from customers by day.\", \"batch_size\": 10, \"partitioned_by\": [], \"pre\": [], \"post\": [], \"audits\": [], \"expressions\": [], \"python_env\": {}, \"jinja_macros\": {\"packages\": {}, \"root_macros\": {}, \"global_objs\": {}}, \"query\": \"WITH order_total AS (SELECT oi.order_id AS order_id, SUM(oi.quantity * i.price) AS total, oi.ds AS ds FROM sushi.order_items AS oi LEFT JOIN sushi.items AS i ON oi.item_id = i.id AND oi.ds = i.ds WHERE oi.ds BETWEEN '{{ start_ds }}' AND '{{ end_ds }}' GROUP BY oi.order_id, oi.ds) SELECT CAST(o.customer_id AS INT) AS customer_id \/* Customer id *\/, CAST(SUM(ot.total) AS DOUBLE) AS revenue \/* Revenue from orders made by this customer *\/, CAST(o.ds AS TEXT) AS ds \/* Date *\/ FROM sushi.orders AS o LEFT JOIN order_total AS ot ON o.id = ot.order_id AND o.ds = ot.ds WHERE o.ds BETWEEN '{{ start_ds }}' AND '{{ end_ds }}' GROUP BY o.customer_id, o.ds\", \"source_type\": \"sql\"}, \"parents\": [{\"name\": \"sushi.order_items\", \"identifier\": \"1806777563\"}, {\"name\": \"sushi.items\", \"identifier\": \"2957171338\"}, {\"name\": \"sushi.orders\", \"identifier\": \"3564161223\"}], \"audits\": [], \"intervals\": [[1672531200000, 1673136000000]], \"dev_intervals\": [], \"created_ts\": 1680814376391, \"updated_ts\": 1680814376391, \"ttl\": \"in 1 week\", \"previous_versions\": [], \"indirect_versions\": {}, \"version\": \"1291364031\"}","7":"{\"name\": \"sushi.items\", \"fingerprint\": {\"data_hash\": \"1960378930\", \"metadata_hash\": \"2900807542\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"physical_schema\": \"sqlmesh\", \"model\": {\"name\": \"sushi.items\", \"kind\": {\"name\": \"INCREMENTAL_BY_TIME_RANGE\", \"time_column\": {\"column\": \"ds\", \"format\": \"%Y-%m-%d\"}}, \"dialect\": \"\", \"cron\": \"@daily\", \"start\": \"Jan 1 2022\", \"batch_size\": 30, \"partitioned_by\": [], \"pre\": [], \"post\": [], \"depends_on\": [], \"columns\": {\"id\": \"INT\", \"name\": \"TEXT\", \"price\": \"DOUBLE\", \"ds\": \"TEXT\"}, \"audits\": [[\"accepted_values\", {\"column\": \"name\", \"values\": \"ARRAY('Ahi', 'Aji', 'Amaebi', 'Anago', 'Aoyagi', 'Bincho', 'Katsuo', 'Ebi', 'Escolar', 'Hamachi', 'Hamachi Toro', 'Hirame', 'Hokigai', 'Hotate', 'Ika', 'Ikura', 'Iwashi', 'Kani', 'Kanpachi', 'Maguro', 'Saba', 'Sake', 'Sake Toro', 'Tai', 'Tako', 'Tamago', 'Tobiko', 'Toro', 'Tsubugai', 'Umi Masu', 'Unagi', 'Uni')\"}], [\"not_null\", {\"columns\": \"ARRAY(name, price)\"}], [\"assert_items_price_exceeds_threshold\", {\"price\": \"0\"}]], \"expressions\": [], \"python_env\": {\"execute\": {\"payload\": \"def execute(context, start, end, latest, **kwargs):\\n dfs = []\\n for dt in iter_dates(start, end):\\n num_items = random.randint(10, len(ITEMS))\\n dfs.append(pd.DataFrame({'name': random.sample(ITEMS, num_items),\\n 'price': np.random.uniform(3.0, 10.0, size=num_items).round(2),\\n 'ds': to_ds(dt)}).reset_index().rename(columns={'index': 'id'}))\\n return pd.concat(dfs)\", \"kind\": \"definition\", \"name\": \"execute\", \"path\": \"models\/items.py\"}, \"iter_dates\": {\"payload\": \"def iter_dates(start, end):\\n for i in range((end - start).days + 1):\\n dt = start + timedelta(days=i)\\n set_seed(dt)\\n yield dt\", \"kind\": \"definition\", \"name\": \"iter_dates\", \"path\": \"helper.py\"}, \"timedelta\": {\"payload\": \"from datetime import timedelta\", \"kind\": \"import\"}, \"set_seed\": {\"payload\": \"def set_seed(dt):\\n ts = int(dt.timestamp())\\n random.seed(ts)\\n np.random.seed(ts)\", \"kind\": \"definition\", \"name\": \"set_seed\", \"path\": \"helper.py\"}, \"random\": {\"payload\": \"import random\", \"kind\": \"import\"}, \"np\": {\"payload\": \"import numpy as np\", \"kind\": \"import\"}, \"ITEMS\": {\"payload\": \"['Ahi', 'Aji', 'Amaebi', 'Anago', 'Aoyagi', 'Bincho', 'Katsuo', 'Ebi', 'Escolar', 'Hamachi', 'Hamachi Toro', 'Hirame', 'Hokigai', 'Hotate', 'Ika', 'Ikura', 'Iwashi', 'Kani', 'Kanpachi', 'Maguro', 'Saba', 'Sake', 'Sake Toro', 'Tai', 'Tako', 'Tamago', 'Tobiko', 'Toro', 'Tsubugai', 'Umi Masu', 'Unagi', 'Uni']\", \"kind\": \"value\"}, \"pd\": {\"payload\": \"import pandas as pd\", \"kind\": \"import\"}, \"to_ds\": {\"payload\": \"from sqlmesh.utils.date import to_ds\", \"kind\": \"import\"}}, \"jinja_macros\": {\"packages\": {}, \"root_macros\": {}, \"global_objs\": {}}, \"entrypoint\": \"execute\", \"source_type\": \"python\"}, \"parents\": [], \"audits\": [{\"name\": \"assert_items_price_exceeds_threshold\", \"dialect\": \"\", \"skip\": false, \"blocking\": true, \"query\": \"SELECT * FROM @this_model WHERE price <= @price\", \"expressions\": []}], \"intervals\": [[1672531200000, 1673136000000]], \"dev_intervals\": [], \"created_ts\": 1680814376399, \"updated_ts\": 1680814376399, \"ttl\": \"in 1 week\", \"previous_versions\": [], \"indirect_versions\": {}, \"version\": \"312608270\"}","8":"{\"name\": \"sushi.order_items\", \"fingerprint\": {\"data_hash\": \"653664599\", \"metadata_hash\": \"1960934702\", \"parent_data_hash\": \"3170724558\", \"parent_metadata_hash\": \"867324801\"}, \"physical_schema\": \"sqlmesh\", \"model\": {\"name\": \"sushi.order_items\", \"kind\": {\"name\": \"INCREMENTAL_BY_TIME_RANGE\", \"time_column\": {\"column\": \"ds\", \"format\": \"%Y-%m-%d\"}}, \"dialect\": \"\", \"cron\": \"@daily\", \"batch_size\": 30, \"partitioned_by\": [], \"pre\": [], \"post\": [], \"depends_on\": [\"sushi.items\", \"sushi.orders\"], \"columns\": {\"id\": \"INT\", \"order_id\": \"INT\", \"item_id\": \"INT\", \"quantity\": \"INT\", \"ds\": \"TEXT\"}, \"audits\": [[\"not_null\", {\"columns\": \"ARRAY(id, order_id, item_id, quantity)\"}], [\"assert_order_items_quantity_exceeds_threshold\", {\"quantity\": \"0\"}]], \"expressions\": [], \"python_env\": {\"execute\": {\"payload\": \"def execute(context, start, end, latest, **kwargs):\\n orders_table = context.table('sushi.orders')\\n items_table = context.table(ITEMS)\\n for dt in iter_dates(start, end):\\n orders = context.fetchdf(\\n f\\\"\\\"\\\"\\n SELECT *\\n FROM {orders_table}\\n WHERE ds = '{to_ds(dt)}'\\n \\\"\\\"\\\"\\n )\\n items = context.fetchdf(\\n f\\\"\\\"\\\"\\n SELECT *\\n FROM {items_table}\\n WHERE ds = '{to_ds(dt)}'\\n \\\"\\\"\\\"\\n )\\n for order_id in orders['id']:\\n n = random.randint(1, 5)\\n yield pd.DataFrame({'order_id': order_id, 'item_id': items.\\n sample(n=n)['id'], 'quantity': np.random.randint(1, 10, n),\\n 'ds': to_ds(dt)}).reset_index().rename(columns={'index': 'id'})\", \"kind\": \"definition\", \"name\": \"execute\", \"path\": \"models\/order_items.py\"}, \"ITEMS\": {\"payload\": \"'sushi.items'\", \"kind\": \"value\"}, \"iter_dates\": {\"payload\": \"def iter_dates(start, end):\\n for i in range((end - start).days + 1):\\n dt = start + timedelta(days=i)\\n set_seed(dt)\\n yield dt\", \"kind\": \"definition\", \"name\": \"iter_dates\", \"path\": \"helper.py\"}, \"timedelta\": {\"payload\": \"from datetime import timedelta\", \"kind\": \"import\"}, \"set_seed\": {\"payload\": \"def set_seed(dt):\\n ts = int(dt.timestamp())\\n random.seed(ts)\\n np.random.seed(ts)\", \"kind\": \"definition\", \"name\": \"set_seed\", \"path\": \"helper.py\"}, \"random\": {\"payload\": \"import random\", \"kind\": \"import\"}, \"np\": {\"payload\": \"import numpy as np\", \"kind\": \"import\"}, \"to_ds\": {\"payload\": \"from sqlmesh.utils.date import to_ds\", \"kind\": \"import\"}, \"pd\": {\"payload\": \"import pandas as pd\", \"kind\": \"import\"}}, \"jinja_macros\": {\"packages\": {}, \"root_macros\": {}, \"global_objs\": {}}, \"entrypoint\": \"execute\", \"source_type\": \"python\"}, \"parents\": [{\"name\": \"sushi.items\", \"identifier\": \"2957171338\"}, {\"name\": \"sushi.orders\", \"identifier\": \"3564161223\"}], \"audits\": [{\"name\": \"assert_order_items_quantity_exceeds_threshold\", \"dialect\": \"\", \"skip\": false, \"blocking\": true, \"query\": \"SELECT * FROM @this_model WHERE quantity <= @quantity\", \"expressions\": []}], \"intervals\": [[1672531200000, 1673136000000]], \"dev_intervals\": [], \"created_ts\": 1680814376401, \"updated_ts\": 1680814376401, \"ttl\": \"in 1 week\", \"previous_versions\": [], \"indirect_versions\": {}, \"version\": \"1015284155\"}","9":"{\"name\": \"sushi.orders\", \"fingerprint\": {\"data_hash\": \"1628439771\", \"metadata_hash\": \"2745052130\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"physical_schema\": \"sqlmesh\", \"model\": {\"name\": \"sushi.orders\", \"kind\": {\"name\": \"INCREMENTAL_BY_TIME_RANGE\", \"time_column\": {\"column\": \"ds\", \"format\": \"%Y-%m-%d\"}}, \"dialect\": \"\", \"cron\": \"@daily\", \"description\": \"Table of sushi orders.\", \"start\": \"2022-01-01\", \"batch_size\": 30, \"partitioned_by\": [], \"pre\": [], \"post\": [], \"depends_on\": [], \"columns\": {\"id\": \"INT\", \"customer_id\": \"INT\", \"waiter_id\": \"INT\", \"start_ts\": \"INT\", \"end_ts\": \"INT\", \"ds\": \"TEXT\"}, \"audits\": [], \"expressions\": [], \"python_env\": {\"execute\": {\"payload\": \"def execute(context, start, end, latest, **kwargs):\\n dfs = []\\n for dt in iter_dates(start, end):\\n num_orders = random.randint(10, 30)\\n start_ts = [int((dt + timedelta(seconds=random.randint(0, 80000))).\\n timestamp()) for _ in range(num_orders)]\\n end_ts = [int(s + random.randint(0, 60 * 60)) for s in start_ts]\\n dfs.append(pd.DataFrame({'customer_id': random.choices(CUSTOMERS, k\\n =num_orders), 'waiter_id': random.choices(WAITERS, k=num_orders\\n ), 'start_ts': start_ts, 'end_ts': end_ts, 'ds': to_ds(dt)}).\\n reset_index().rename(columns={'index': 'id'}))\\n return pd.concat(dfs)\", \"kind\": \"definition\", \"name\": \"execute\", \"path\": \"models\/orders.py\"}, \"iter_dates\": {\"payload\": \"def iter_dates(start, end):\\n for i in range((end - start).days + 1):\\n dt = start + timedelta(days=i)\\n set_seed(dt)\\n yield dt\", \"kind\": \"definition\", \"name\": \"iter_dates\", \"path\": \"helper.py\"}, \"timedelta\": {\"payload\": \"from datetime import timedelta\", \"kind\": \"import\"}, \"set_seed\": {\"payload\": \"def set_seed(dt):\\n ts = int(dt.timestamp())\\n random.seed(ts)\\n np.random.seed(ts)\", \"kind\": \"definition\", \"name\": \"set_seed\", \"path\": \"helper.py\"}, \"random\": {\"payload\": \"import random\", \"kind\": \"import\"}, \"np\": {\"payload\": \"import numpy as np\", \"kind\": \"import\"}, \"pd\": {\"payload\": \"import pandas as pd # noqa: TID253\", \"kind\": \"import\"}, \"CUSTOMERS\": {\"payload\": \"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99]\", \"kind\": \"value\"}, \"WAITERS\": {\"payload\": \"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\", \"kind\": \"value\"}, \"to_ds\": {\"payload\": \"from sqlmesh.utils.date import to_ds\", \"kind\": \"import\"}}, \"jinja_macros\": {\"packages\": {}, \"root_macros\": {}, \"global_objs\": {}}, \"entrypoint\": \"execute\", \"source_type\": \"python\"}, \"parents\": [], \"audits\": [], \"intervals\": [[1672531200000, 1673136000000]], \"dev_intervals\": [], \"created_ts\": 1680814376402, \"updated_ts\": 1680814376402, \"ttl\": \"in 1 week\", \"previous_versions\": [], \"indirect_versions\": {}, \"version\": \"925846788\"}","10":"{\"name\": \"sushi.waiter_as_customer_by_day\", \"fingerprint\": {\"data_hash\": \"486172035\", \"metadata_hash\": \"1992853678\", \"parent_data_hash\": \"2824767713\", \"parent_metadata_hash\": \"1349779748\"}, \"physical_schema\": \"sqlmesh\", \"model\": {\"name\": \"sushi.waiter_as_customer_by_day\", \"kind\": {\"name\": \"INCREMENTAL_BY_TIME_RANGE\", \"time_column\": {\"column\": \"ds\", \"format\": \"%Y-%m-%d\"}}, \"dialect\": \"duckdb\", \"cron\": \"@daily\", \"owner\": \"jen\", \"partitioned_by\": [], \"pre\": [], \"post\": [], \"audits\": [[\"not_null\", {\"columns\": \"ARRAY(waiter_id)\"}]], \"expressions\": [], \"python_env\": {}, \"jinja_macros\": {\"packages\": {}, \"root_macros\": {}, \"global_objs\": {}}, \"table_properties\": {\"key\": \"'value'\"}, \"query\": \"SELECT w.ds AS ds, w.waiter_id AS waiter_id, wn.name AS waiter_name FROM sushi.waiters AS w JOIN sushi.customers AS c ON w.waiter_id = c.customer_id JOIN sushi.waiter_names AS wn ON w.waiter_id = wn.id\", \"source_type\": \"sql\"}, \"parents\": [{\"name\": \"sushi.waiters\", \"identifier\": \"3386889721\"}, {\"name\": \"sushi.waiter_names\", \"identifier\": \"1604207722\"}, {\"name\": \"sushi.customers\", \"identifier\": \"3148897116\"}, {\"name\": \"sushi.orders\", \"identifier\": \"3564161223\"}], \"audits\": [], \"intervals\": [[1672531200000, 1673136000000]], \"dev_intervals\": [], \"created_ts\": 1680814464891, \"updated_ts\": 1680814464891, \"ttl\": \"in 1 week\", \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"486172035\", \"metadata_hash\": \"1992853678\", \"parent_data_hash\": \"2154574190\", \"parent_metadata_hash\": \"1349779748\"}, \"version\": \"1267397572\"}], \"indirect_versions\": {}, \"version\": \"3668757715\"}","11":"{\"name\": \"sushi.waiter_names\", \"fingerprint\": {\"data_hash\": \"4133862560\", \"metadata_hash\": \"570478986\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"physical_schema\": \"sqlmesh\", \"model\": {\"name\": \"sushi.waiter_names\", \"kind\": {\"name\": \"SEED\", \"path\": \"..\/seeds\/waiter_names.csv\", \"batch_size\": 5}, \"dialect\": \"duckdb\", \"cron\": \"@daily\", \"owner\": \"jen\", \"partitioned_by\": [], \"pre\": [], \"post\": [], \"audits\": [], \"expressions\": [], \"jinja_macros\": {\"packages\": {}, \"root_macros\": {}, \"global_objs\": {}}, \"seed\": {\"content\": \"id,name\\n0,Toby\\n1,Tyson\\n2,Ryan\\n3,George\\n4,Chris\\n5,Max\\n6,Vincent\\n7,Iaroslav\\n8,Emma\\n9,Maia\\n10,Jim\\n\"}, \"source_type\": \"seed\"}, \"parents\": [], \"audits\": [], \"intervals\": [[1672531200000, 1673136000000]], \"dev_intervals\": [], \"created_ts\": 1680814464932, \"updated_ts\": 1680814464932, \"ttl\": \"in 1 week\", \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"1876476880\", \"metadata_hash\": \"570478986\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"version\": \"2505706914\"}], \"indirect_versions\": {\"sushi.waiter_as_customer_by_day\": [{\"fingerprint\": {\"data_hash\": \"486172035\", \"metadata_hash\": \"1992853678\", \"parent_data_hash\": \"2154574190\", \"parent_metadata_hash\": \"1349779748\"}, \"version\": \"1267397572\"}, {\"fingerprint\": {\"data_hash\": \"486172035\", \"metadata_hash\": \"1992853678\", \"parent_data_hash\": \"2824767713\", \"parent_metadata_hash\": \"1349779748\"}, \"version\": \"3668757715\"}]}, \"version\": \"1204702829\", \"change_category\": 1}"}} \ No newline at end of file +{"name":{"0":"\"sushi\".\"waiter_as_customer_by_day\"","1":"\"sushi\".\"waiter_revenue_by_day\"","2":"\"sushi\".\"top_waiters\"","3":"\"sushi\".\"waiters\"","4":"\"sushi\".\"customers\"","5":"\"sushi\".\"waiter_names\"","6":"\"sushi\".\"customer_revenue_by_day\"","7":"\"sushi\".\"items\"","8":"\"sushi\".\"order_items\"","9":"\"sushi\".\"orders\"","10":"\"sushi\".\"waiter_as_customer_by_day\"","11":"\"sushi\".\"waiter_names\"","12":"\"sushi\".\"waiter_as_customer_by_day\"","13":"\"sushi\".\"waiter_names\"","14":"\"sushi\".\"customer_revenue_by_day\"","15":"\"sushi\".\"top_waiters\"","16":"\"sushi\".\"waiter_revenue_by_day\"","17":"\"sushi\".\"order_items\"","18":"\"sushi\".\"items\"","19":"\"sushi\".\"waiter_as_customer_by_day\"","20":"\"sushi\".\"waiter_names\"","21":"\"sushi\".\"customers\"","22":"\"sushi\".\"waiters\"","23":"\"sushi\".\"orders\""},"identifier":{"0":"1281222509","1":"1609279380","2":"599861134","3":"3386889721","4":"3148897116","5":"3233103305","6":"1308408370","7":"2957171338","8":"1806777563","9":"3564161223","10":"1084858582","11":"1604207722","12":"3998224796","13":"2725136291","14":"3566886383","15":"129039563","16":"2175947464","17":"1422946820","18":"3721860967","19":"1341746752","20":"1609854746","21":"1461038955","22":"4123940212","23":"1250207606"},"version":{"0":"1267397572","1":"2695875565","2":"3010914162","3":"2059227798","4":"2359719298","5":"2505706914","6":"1291364031","7":"312608270","8":"1015284155","9":"925846788","10":"3668757715","11":"3668757715","12":"3668757715","13":"1204702829","14":"1291364031","15":"3010914162","16":"2695875565","17":"1015284155","18":"312608270","19":"1267397572","20":"2505706914","21":"2359719298","22":"2059227798","23":"925846788"},"snapshot":{"0":"{\"name\": \"\\\"sushi\\\".\\\"waiter_as_customer_by_day\\\"\", \"fingerprint\": {\"data_hash\": \"486172035\", \"metadata_hash\": \"1992853678\", \"parent_data_hash\": \"2154574190\", \"parent_metadata_hash\": \"1349779748\"}, \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"\\\"sushi\\\".\\\"waiters\\\"\", \"identifier\": \"3386889721\"}, {\"name\": \"\\\"sushi\\\".\\\"waiter_names\\\"\", \"identifier\": \"3233103305\"}, {\"name\": \"\\\"sushi\\\".\\\"customers\\\"\", \"identifier\": \"3148897116\"}, {\"name\": \"\\\"sushi\\\".\\\"orders\\\"\", \"identifier\": \"3564161223\"}], \"created_ts\": 1680814376348, \"ttl\": \"in 1 week\", \"previous_versions\": [], \"version\": \"1267397572\", \"node\": {\"name\": \"sushi.waiter_as_customer_by_day\", \"kind\": {\"name\": \"INCREMENTAL_BY_TIME_RANGE\", \"time_column\": {\"column\": \"ds\", \"format\": \"%Y-%m-%d\"}}, \"dialect\": \"duckdb\", \"cron\": \"@daily\", \"owner\": \"jen\", \"partitioned_by\": [], \"audits\": [[\"not_null\", {\"columns\": \"ARRAY(waiter_id)\"}]], \"python_env\": {}, \"jinja_macros\": {\"packages\": {}, \"root_macros\": {}, \"global_objs\": {}}, \"query\": \"SELECT w.ds AS ds, w.waiter_id AS waiter_id, wn.name AS waiter_name FROM sushi.waiters AS w JOIN sushi.customers AS c ON w.waiter_id = c.customer_id JOIN sushi.waiter_names AS wn ON w.waiter_id = wn.id\", \"source_type\": \"sql\", \"project\": \"\", \"default_catalog\": null}, \"change_category\": 4, \"base_table_name_override\": \"sushi.waiter_as_customer_by_day\"}","1":"{\"name\": \"\\\"sushi\\\".\\\"waiter_revenue_by_day\\\"\", \"fingerprint\": {\"data_hash\": \"2443934302\", \"metadata_hash\": \"2904050331\", \"parent_data_hash\": \"764310396\", \"parent_metadata_hash\": \"3147731239\"}, \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"\\\"sushi\\\".\\\"order_items\\\"\", \"identifier\": \"1806777563\"}, {\"name\": \"\\\"sushi\\\".\\\"items\\\"\", \"identifier\": \"2957171338\"}, {\"name\": \"\\\"sushi\\\".\\\"orders\\\"\", \"identifier\": \"3564161223\"}], \"created_ts\": 1680814376361, \"ttl\": \"in 1 week\", \"previous_versions\": [], \"version\": \"2695875565\", \"node\": {\"name\": \"sushi.waiter_revenue_by_day\", \"kind\": {\"name\": \"INCREMENTAL_BY_TIME_RANGE\", \"time_column\": {\"column\": \"ds\", \"format\": \"%Y-%m-%d\"}, \"batch_size\": 10}, \"dialect\": \"duckdb\", \"cron\": \"@daily\", \"owner\": \"jen\", \"description\": \"Table of revenue generated by waiters by day.\", \"partitioned_by\": [], \"audits\": [[\"number_of_rows\", {\"threshold\": \"0\"}]], \"python_env\": {}, \"jinja_macros\": {\"packages\": {}, \"root_macros\": {}, \"global_objs\": {}}, \"query\": \"SELECT CAST(o.waiter_id AS INT) AS waiter_id \/* Waiter id *\/, CAST(SUM(oi.quantity * i.price) AS DOUBLE) AS revenue \/* Revenue from orders taken by this waiter *\/, CAST(o.ds AS TEXT) AS ds \/* Date *\/ FROM sushi.orders AS o LEFT JOIN sushi.order_items AS oi ON o.id = oi.order_id AND o.ds = oi.ds LEFT JOIN sushi.items AS i ON oi.item_id = i.id AND oi.ds = i.ds WHERE o.ds BETWEEN @start_ds AND @end_ds GROUP BY o.waiter_id, o.ds\", \"source_type\": \"sql\", \"project\": \"\", \"default_catalog\": null}, \"change_category\": 4, \"base_table_name_override\": \"sushi.waiter_revenue_by_day\"}","2":"{\"name\": \"\\\"sushi\\\".\\\"top_waiters\\\"\", \"fingerprint\": {\"data_hash\": \"2891807529\", \"metadata_hash\": \"3392493998\", \"parent_data_hash\": \"1940707936\", \"parent_metadata_hash\": \"1276363398\"}, \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"\\\"sushi\\\".\\\"waiter_revenue_by_day\\\"\", \"identifier\": \"1609279380\"}], \"created_ts\": 1680814376384, \"ttl\": \"in 1 week\", \"previous_versions\": [], \"version\": \"3010914162\", \"node\": {\"name\": \"sushi.top_waiters\", \"kind\": {\"name\": \"VIEW\"}, \"dialect\": \"duckdb\", \"cron\": \"@daily\", \"owner\": \"jen\", \"description\": \"View of top waiters.\", \"partitioned_by\": [], \"audits\": [[\"unique_values\", {\"columns\": \"ARRAY(waiter_id)\"}]], \"python_env\": {}, \"jinja_macros\": {\"packages\": {}, \"root_macros\": {}, \"global_objs\": {}}, \"query\": \"SELECT CAST(waiter_id AS INT) AS waiter_id, CAST(revenue AS DOUBLE) AS revenue FROM sushi.waiter_revenue_by_day WHERE ds = (SELECT MAX(ds) FROM sushi.waiter_revenue_by_day) ORDER BY revenue DESC LIMIT 10\", \"source_type\": \"sql\", \"project\": \"\", \"default_catalog\": null}, \"change_category\": 4, \"base_table_name_override\": \"sushi.top_waiters\"}","3":"{\"name\": \"\\\"sushi\\\".\\\"waiters\\\"\", \"fingerprint\": {\"data_hash\": \"3501061139\", \"metadata_hash\": \"570478986\", \"parent_data_hash\": \"777615193\", \"parent_metadata_hash\": \"2042613269\"}, \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"\\\"sushi\\\".\\\"orders\\\"\", \"identifier\": \"3564161223\"}], \"created_ts\": 1680814376387, \"ttl\": \"in 1 week\", \"previous_versions\": [], \"version\": \"2059227798\", \"node\": {\"name\": \"sushi.waiters\", \"kind\": {\"name\": \"EMBEDDED\"}, \"dialect\": \"duckdb\", \"cron\": \"@daily\", \"owner\": \"jen\", \"partitioned_by\": [], \"audits\": [], \"python_env\": {\"incremental_by_ds\": {\"payload\": \"def incremental_by_ds(evaluator, column):\\n expression = evaluator.transform(exp.Between(this=column, low=MacroVar(\\n this='start_ds'), high=MacroVar(this='end_ds')))\\n if not isinstance(expression, exp.Expression):\\n raise MacroEvalError(\\n f'Return type is {type(expression)}, expected exp.Expression')\\n return expression\", \"kind\": \"definition\", \"name\": \"incremental_by_ds\", \"path\": \"macros\/macros.py\"}, \"exp\": {\"payload\": \"import sqlglot.expressions as exp\", \"kind\": \"import\"}, \"MacroVar\": {\"payload\": \"from sqlmesh.core.dialect import MacroVar\", \"kind\": \"import\"}, \"MacroEvalError\": {\"payload\": \"from sqlmesh.utils.errors import MacroEvalError\", \"kind\": \"import\"}}, \"jinja_macros\": {\"packages\": {}, \"root_macros\": {}, \"global_objs\": {}}, \"query\": \"SELECT DISTINCT CAST(waiter_id AS INT) AS waiter_id, CAST(ds AS TEXT) AS ds FROM sushi.orders AS o WHERE @incremental_by_ds(ds)\", \"source_type\": \"sql\", \"project\": \"\", \"default_catalog\": null}, \"change_category\": 4, \"base_table_name_override\": \"sushi.waiters\"}","4":"{\"name\": \"\\\"sushi\\\".\\\"customers\\\"\", \"fingerprint\": {\"data_hash\": \"3553985282\", \"metadata_hash\": \"570478986\", \"parent_data_hash\": \"777615193\", \"parent_metadata_hash\": \"2042613269\"}, \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"\\\"sushi\\\".\\\"orders\\\"\", \"identifier\": \"3564161223\"}], \"created_ts\": 1680814376388, \"ttl\": \"in 1 week\", \"previous_versions\": [], \"version\": \"2359719298\", \"node\": {\"name\": \"sushi.customers\", \"kind\": {\"name\": \"FULL\"}, \"dialect\": \"duckdb\", \"cron\": \"@daily\", \"owner\": \"jen\", \"partitioned_by\": [], \"audits\": [], \"python_env\": {\"noop\": {\"payload\": \"def noop(context, start, end, latest, **kwargs):\\n pass\", \"kind\": \"definition\", \"name\": \"noop\", \"path\": \"hooks\/hooks.py\"}}, \"jinja_macros\": {\"packages\": {}, \"root_macros\": {}, \"global_objs\": {}}, \"query\": \"SELECT DISTINCT CAST(customer_id AS INT) AS customer_id FROM sushi.orders AS o\", \"source_type\": \"sql\", \"project\": \"\", \"default_catalog\": null}, \"change_category\": 4, \"base_table_name_override\": \"sushi.customers\"}","5":"{\"name\": \"\\\"sushi\\\".\\\"waiter_names\\\"\", \"fingerprint\": {\"data_hash\": \"1876476880\", \"metadata_hash\": \"570478986\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"physical_schema\": \"sqlmesh\", \"parents\": [], \"created_ts\": 1680814376389, \"ttl\": \"in 1 week\", \"previous_versions\": [], \"version\": \"2505706914\", \"node\": {\"name\": \"sushi.waiter_names\", \"kind\": {\"name\": \"SEED\", \"path\": \"..\/seeds\/waiter_names.csv\", \"batch_size\": 5}, \"dialect\": \"duckdb\", \"cron\": \"@daily\", \"owner\": \"jen\", \"partitioned_by\": [], \"audits\": [], \"jinja_macros\": {\"packages\": {}, \"root_macros\": {}, \"global_objs\": {}}, \"seed\": {\"content\": \"id,name\\n0,Toby\\n1,Tyson\\n2,Ryan\\n3,George\\n4,Chris\\n5,Max\\n6,Vincent\\n7,Iaroslav\\n8,Emma\\n9,Maia\\n\"}, \"source_type\": \"seed\", \"project\": \"\", \"default_catalog\": null}, \"change_category\": 4, \"base_table_name_override\": \"sushi.waiter_names\"}","6":"{\"name\": \"\\\"sushi\\\".\\\"customer_revenue_by_day\\\"\", \"fingerprint\": {\"data_hash\": \"2657552867\", \"metadata_hash\": \"129771006\", \"parent_data_hash\": \"764310396\", \"parent_metadata_hash\": \"3147731239\"}, \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"\\\"sushi\\\".\\\"order_items\\\"\", \"identifier\": \"1806777563\"}, {\"name\": \"\\\"sushi\\\".\\\"items\\\"\", \"identifier\": \"2957171338\"}, {\"name\": \"\\\"sushi\\\".\\\"orders\\\"\", \"identifier\": \"3564161223\"}], \"created_ts\": 1680814376391, \"ttl\": \"in 1 week\", \"previous_versions\": [], \"version\": \"1291364031\", \"node\": {\"name\": \"sushi.customer_revenue_by_day\", \"kind\": {\"name\": \"INCREMENTAL_BY_TIME_RANGE\", \"time_column\": {\"column\": \"ds\", \"format\": \"%Y-%m-%d\"}, \"batch_size\": 10}, \"dialect\": \"hive\", \"cron\": \"@daily\", \"owner\": \"jen\", \"description\": \"Table of revenue from customers by day.\", \"partitioned_by\": [], \"audits\": [], \"python_env\": {}, \"jinja_macros\": {\"packages\": {}, \"root_macros\": {}, \"global_objs\": {}}, \"query\": \"JINJA_QUERY_BEGIN;\\nWITH order_total AS (SELECT oi.order_id AS order_id, SUM(oi.quantity * i.price) AS total, oi.ds AS ds FROM sushi.order_items AS oi LEFT JOIN sushi.items AS i ON oi.item_id = i.id AND oi.ds = i.ds WHERE oi.ds BETWEEN '{{ start_ds }}' AND '{{ end_ds }}' GROUP BY oi.order_id, oi.ds) SELECT CAST(o.customer_id AS INT) AS customer_id \/* Customer id *\/, CAST(SUM(ot.total) AS DOUBLE) AS revenue \/* Revenue from orders made by this customer *\/, CAST(o.ds AS TEXT) AS ds \/* Date *\/ FROM sushi.orders AS o LEFT JOIN order_total AS ot ON o.id = ot.order_id AND o.ds = ot.ds WHERE o.ds BETWEEN '{{ start_ds }}' AND '{{ end_ds }}' GROUP BY o.customer_id, o.ds\\nJINJA_END;\", \"source_type\": \"sql\", \"project\": \"\", \"default_catalog\": null}, \"change_category\": 4, \"base_table_name_override\": \"sushi.customer_revenue_by_day\"}","7":"{\"name\": \"\\\"sushi\\\".\\\"items\\\"\", \"fingerprint\": {\"data_hash\": \"1960378930\", \"metadata_hash\": \"2900807542\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"physical_schema\": \"sqlmesh\", \"parents\": [], \"created_ts\": 1680814376399, \"ttl\": \"in 1 week\", \"previous_versions\": [], \"version\": \"312608270\", \"node\": {\"name\": \"sushi.items\", \"kind\": {\"name\": \"INCREMENTAL_BY_TIME_RANGE\", \"time_column\": {\"column\": \"ds\", \"format\": \"%Y-%m-%d\"}, \"batch_size\": 30}, \"dialect\": \"\", \"cron\": \"@daily\", \"start\": \"Jan 1 2022\", \"partitioned_by\": [], \"depends_on\": [], \"columns\": {\"id\": \"INT\", \"name\": \"TEXT\", \"price\": \"DOUBLE\", \"ds\": \"TEXT\"}, \"audits\": [[\"accepted_values\", {\"column\": \"name\", \"values\": \"ARRAY('Ahi', 'Aji', 'Amaebi', 'Anago', 'Aoyagi', 'Bincho', 'Katsuo', 'Ebi', 'Escolar', 'Hamachi', 'Hamachi Toro', 'Hirame', 'Hokigai', 'Hotate', 'Ika', 'Ikura', 'Iwashi', 'Kani', 'Kanpachi', 'Maguro', 'Saba', 'Sake', 'Sake Toro', 'Tai', 'Tako', 'Tamago', 'Tobiko', 'Toro', 'Tsubugai', 'Umi Masu', 'Unagi', 'Uni')\"}], [\"not_null\", {\"columns\": \"ARRAY(name, price)\"}], [\"assert_items_price_exceeds_threshold\", {\"price\": \"0\"}]], \"python_env\": {\"execute\": {\"payload\": \"def execute(context, start, end, latest, **kwargs):\\n dfs = []\\n for dt in iter_dates(start, end):\\n num_items = random.randint(10, len(ITEMS))\\n dfs.append(pd.DataFrame({'name': random.sample(ITEMS, num_items),\\n 'price': np.random.uniform(3.0, 10.0, size=num_items).round(2),\\n 'ds': to_ds(dt)}).reset_index().rename(columns={'index': 'id'}))\\n return pd.concat(dfs)\", \"kind\": \"definition\", \"name\": \"execute\", \"path\": \"models\/items.py\"}, \"iter_dates\": {\"payload\": \"def iter_dates(start, end):\\n for i in range((end - start).days + 1):\\n dt = start + timedelta(days=i)\\n set_seed(dt)\\n yield dt\", \"kind\": \"definition\", \"name\": \"iter_dates\", \"path\": \"helper.py\"}, \"timedelta\": {\"payload\": \"from datetime import timedelta\", \"kind\": \"import\"}, \"set_seed\": {\"payload\": \"def set_seed(dt):\\n ts = int(dt.timestamp())\\n random.seed(ts)\\n np.random.seed(ts)\", \"kind\": \"definition\", \"name\": \"set_seed\", \"path\": \"helper.py\"}, \"random\": {\"payload\": \"import random\", \"kind\": \"import\"}, \"np\": {\"payload\": \"import numpy as np\", \"kind\": \"import\"}, \"ITEMS\": {\"payload\": \"['Ahi', 'Aji', 'Amaebi', 'Anago', 'Aoyagi', 'Bincho', 'Katsuo', 'Ebi', 'Escolar', 'Hamachi', 'Hamachi Toro', 'Hirame', 'Hokigai', 'Hotate', 'Ika', 'Ikura', 'Iwashi', 'Kani', 'Kanpachi', 'Maguro', 'Saba', 'Sake', 'Sake Toro', 'Tai', 'Tako', 'Tamago', 'Tobiko', 'Toro', 'Tsubugai', 'Umi Masu', 'Unagi', 'Uni']\", \"kind\": \"value\"}, \"pd\": {\"payload\": \"import pandas as pd\", \"kind\": \"import\"}, \"to_ds\": {\"payload\": \"from sqlmesh.utils.date import to_ds\", \"kind\": \"import\"}}, \"jinja_macros\": {\"packages\": {}, \"root_macros\": {}, \"global_objs\": {}}, \"entrypoint\": \"execute\", \"source_type\": \"python\", \"project\": \"\", \"default_catalog\": null, \"audit_definitions\": {\"assert_items_price_exceeds_threshold\": {\"name\": \"assert_items_price_exceeds_threshold\", \"dialect\": \"\", \"skip\": false, \"blocking\": true, \"query\": \"SELECT * FROM @this_model WHERE price <= @price\", \"expressions\": []}}}, \"change_category\": 4, \"base_table_name_override\": \"sushi.items\"}","8":"{\"name\": \"\\\"sushi\\\".\\\"order_items\\\"\", \"fingerprint\": {\"data_hash\": \"653664599\", \"metadata_hash\": \"1960934702\", \"parent_data_hash\": \"3170724558\", \"parent_metadata_hash\": \"867324801\"}, \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"\\\"sushi\\\".\\\"items\\\"\", \"identifier\": \"2957171338\"}, {\"name\": \"\\\"sushi\\\".\\\"orders\\\"\", \"identifier\": \"3564161223\"}], \"created_ts\": 1680814376401, \"ttl\": \"in 1 week\", \"previous_versions\": [], \"version\": \"1015284155\", \"node\": {\"name\": \"sushi.order_items\", \"kind\": {\"name\": \"INCREMENTAL_BY_TIME_RANGE\", \"time_column\": {\"column\": \"ds\", \"format\": \"%Y-%m-%d\"}, \"batch_size\": 30}, \"dialect\": \"\", \"cron\": \"@daily\", \"partitioned_by\": [], \"depends_on\": [\"\\\"sushi\\\".\\\"items\\\"\", \"\\\"sushi\\\".\\\"orders\\\"\"], \"columns\": {\"id\": \"INT\", \"order_id\": \"INT\", \"item_id\": \"INT\", \"quantity\": \"INT\", \"ds\": \"TEXT\"}, \"audits\": [[\"not_null\", {\"columns\": \"ARRAY(id, order_id, item_id, quantity)\"}], [\"assert_order_items_quantity_exceeds_threshold\", {\"quantity\": \"0\"}]], \"python_env\": {\"execute\": {\"payload\": \"def execute(context, start, end, latest, **kwargs):\\n orders_table = context.table('sushi.orders')\\n items_table = context.table(ITEMS)\\n for dt in iter_dates(start, end):\\n orders = context.fetchdf(\\n f\\\"\\\"\\\"\\n SELECT *\\n FROM {orders_table}\\n WHERE ds = '{to_ds(dt)}'\\n \\\"\\\"\\\"\\n )\\n items = context.fetchdf(\\n f\\\"\\\"\\\"\\n SELECT *\\n FROM {items_table}\\n WHERE ds = '{to_ds(dt)}'\\n \\\"\\\"\\\"\\n )\\n for order_id in orders['id']:\\n n = random.randint(1, 5)\\n yield pd.DataFrame({'order_id': order_id, 'item_id': items.\\n sample(n=n)['id'], 'quantity': np.random.randint(1, 10, n),\\n 'ds': to_ds(dt)}).reset_index().rename(columns={'index': 'id'})\", \"kind\": \"definition\", \"name\": \"execute\", \"path\": \"models\/order_items.py\"}, \"ITEMS\": {\"payload\": \"'sushi.items'\", \"kind\": \"value\"}, \"iter_dates\": {\"payload\": \"def iter_dates(start, end):\\n for i in range((end - start).days + 1):\\n dt = start + timedelta(days=i)\\n set_seed(dt)\\n yield dt\", \"kind\": \"definition\", \"name\": \"iter_dates\", \"path\": \"helper.py\"}, \"timedelta\": {\"payload\": \"from datetime import timedelta\", \"kind\": \"import\"}, \"set_seed\": {\"payload\": \"def set_seed(dt):\\n ts = int(dt.timestamp())\\n random.seed(ts)\\n np.random.seed(ts)\", \"kind\": \"definition\", \"name\": \"set_seed\", \"path\": \"helper.py\"}, \"random\": {\"payload\": \"import random\", \"kind\": \"import\"}, \"np\": {\"payload\": \"import numpy as np\", \"kind\": \"import\"}, \"to_ds\": {\"payload\": \"from sqlmesh.utils.date import to_ds\", \"kind\": \"import\"}, \"pd\": {\"payload\": \"import pandas as pd\", \"kind\": \"import\"}}, \"jinja_macros\": {\"packages\": {}, \"root_macros\": {}, \"global_objs\": {}}, \"entrypoint\": \"execute\", \"source_type\": \"python\", \"project\": \"\", \"default_catalog\": null, \"audit_definitions\": {\"assert_order_items_quantity_exceeds_threshold\": {\"name\": \"assert_order_items_quantity_exceeds_threshold\", \"dialect\": \"\", \"skip\": false, \"blocking\": true, \"query\": \"SELECT * FROM @this_model WHERE quantity <= @quantity\", \"expressions\": []}}}, \"change_category\": 4, \"base_table_name_override\": \"sushi.order_items\"}","9":"{\"name\": \"\\\"sushi\\\".\\\"orders\\\"\", \"fingerprint\": {\"data_hash\": \"1628439771\", \"metadata_hash\": \"2745052130\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"physical_schema\": \"sqlmesh\", \"parents\": [], \"created_ts\": 1680814376402, \"ttl\": \"in 1 week\", \"previous_versions\": [], \"version\": \"925846788\", \"node\": {\"name\": \"sushi.orders\", \"kind\": {\"name\": \"INCREMENTAL_BY_TIME_RANGE\", \"time_column\": {\"column\": \"ds\", \"format\": \"%Y-%m-%d\"}, \"batch_size\": 30}, \"dialect\": \"\", \"cron\": \"@daily\", \"description\": \"Table of sushi orders.\", \"start\": \"2022-01-01\", \"partitioned_by\": [], \"depends_on\": [], \"columns\": {\"id\": \"INT\", \"customer_id\": \"INT\", \"waiter_id\": \"INT\", \"start_ts\": \"INT\", \"end_ts\": \"INT\", \"ds\": \"TEXT\"}, \"audits\": [], \"python_env\": {\"execute\": {\"payload\": \"def execute(context, start, end, latest, **kwargs):\\n dfs = []\\n for dt in iter_dates(start, end):\\n num_orders = random.randint(10, 30)\\n start_ts = [int((dt + timedelta(seconds=random.randint(0, 80000))).\\n timestamp()) for _ in range(num_orders)]\\n end_ts = [int(s + random.randint(0, 60 * 60)) for s in start_ts]\\n dfs.append(pd.DataFrame({'customer_id': random.choices(CUSTOMERS, k\\n =num_orders), 'waiter_id': random.choices(WAITERS, k=num_orders\\n ), 'start_ts': start_ts, 'end_ts': end_ts, 'ds': to_ds(dt)}).\\n reset_index().rename(columns={'index': 'id'}))\\n return pd.concat(dfs)\", \"kind\": \"definition\", \"name\": \"execute\", \"path\": \"models\/orders.py\"}, \"iter_dates\": {\"payload\": \"def iter_dates(start, end):\\n for i in range((end - start).days + 1):\\n dt = start + timedelta(days=i)\\n set_seed(dt)\\n yield dt\", \"kind\": \"definition\", \"name\": \"iter_dates\", \"path\": \"helper.py\"}, \"timedelta\": {\"payload\": \"from datetime import timedelta\", \"kind\": \"import\"}, \"set_seed\": {\"payload\": \"def set_seed(dt):\\n ts = int(dt.timestamp())\\n random.seed(ts)\\n np.random.seed(ts)\", \"kind\": \"definition\", \"name\": \"set_seed\", \"path\": \"helper.py\"}, \"random\": {\"payload\": \"import random\", \"kind\": \"import\"}, \"np\": {\"payload\": \"import numpy as np\", \"kind\": \"import\"}, \"pd\": {\"payload\": \"import pandas as pd\", \"kind\": \"import\"}, \"CUSTOMERS\": {\"payload\": \"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99]\", \"kind\": \"value\"}, \"WAITERS\": {\"payload\": \"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\", \"kind\": \"value\"}, \"to_ds\": {\"payload\": \"from sqlmesh.utils.date import to_ds\", \"kind\": \"import\"}}, \"jinja_macros\": {\"packages\": {}, \"root_macros\": {}, \"global_objs\": {}}, \"entrypoint\": \"execute\", \"source_type\": \"python\", \"project\": \"\", \"default_catalog\": null}, \"change_category\": 4, \"base_table_name_override\": \"sushi.orders\"}","10":"{\"name\": \"\\\"sushi\\\".\\\"waiter_as_customer_by_day\\\"\", \"fingerprint\": {\"data_hash\": \"486172035\", \"metadata_hash\": \"1992853678\", \"parent_data_hash\": \"2824767713\", \"parent_metadata_hash\": \"1349779748\"}, \"physical_schema\": \"sqlmesh\", \"parents\": [{\"name\": \"\\\"sushi\\\".\\\"waiters\\\"\", \"identifier\": \"3386889721\"}, {\"name\": \"\\\"sushi\\\".\\\"waiter_names\\\"\", \"identifier\": \"1604207722\"}, {\"name\": \"\\\"sushi\\\".\\\"customers\\\"\", \"identifier\": \"3148897116\"}, {\"name\": \"\\\"sushi\\\".\\\"orders\\\"\", \"identifier\": \"3564161223\"}], \"created_ts\": 1680814464891, \"ttl\": \"in 1 week\", \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"486172035\", \"metadata_hash\": \"1992853678\", \"parent_data_hash\": \"2154574190\", \"parent_metadata_hash\": \"1349779748\"}, \"version\": \"1267397572\"}], \"version\": \"3668757715\", \"node\": {\"name\": \"sushi.waiter_as_customer_by_day\", \"kind\": {\"name\": \"INCREMENTAL_BY_TIME_RANGE\", \"time_column\": {\"column\": \"ds\", \"format\": \"%Y-%m-%d\"}}, \"dialect\": \"duckdb\", \"cron\": \"@daily\", \"owner\": \"jen\", \"partitioned_by\": [], \"audits\": [[\"not_null\", {\"columns\": \"ARRAY(waiter_id)\"}]], \"python_env\": {}, \"jinja_macros\": {\"packages\": {}, \"root_macros\": {}, \"global_objs\": {}}, \"table_properties\": \"('key' = 'value')\", \"query\": \"SELECT w.ds AS ds, w.waiter_id AS waiter_id, wn.name AS waiter_name FROM sushi.waiters AS w JOIN sushi.customers AS c ON w.waiter_id = c.customer_id JOIN sushi.waiter_names AS wn ON w.waiter_id = wn.id\", \"source_type\": \"sql\", \"project\": \"\", \"default_catalog\": null}, \"change_category\": 4, \"base_table_name_override\": \"sushi.waiter_as_customer_by_day\"}","11":"{\"name\": \"\\\"sushi\\\".\\\"waiter_names\\\"\", \"fingerprint\": {\"data_hash\": \"4133862560\", \"metadata_hash\": \"570478986\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"physical_schema\": \"sqlmesh\", \"parents\": [], \"created_ts\": 1680814464932, \"ttl\": \"in 1 week\", \"previous_versions\": [{\"fingerprint\": {\"data_hash\": \"1876476880\", \"metadata_hash\": \"570478986\", \"parent_data_hash\": \"0\", \"parent_metadata_hash\": \"0\"}, \"version\": \"2505706914\"}], \"version\": \"1204702829\", \"change_category\": 1, \"node\": {\"name\": \"sushi.waiter_names\", \"kind\": {\"name\": \"SEED\", \"path\": \"..\/seeds\/waiter_names.csv\", \"batch_size\": 5}, \"dialect\": \"duckdb\", \"cron\": \"@daily\", \"owner\": \"jen\", \"partitioned_by\": [], \"audits\": [], \"jinja_macros\": {\"packages\": {}, \"root_macros\": {}, \"global_objs\": {}}, \"seed\": {\"content\": \"id,name\\n0,Toby\\n1,Tyson\\n2,Ryan\\n3,George\\n4,Chris\\n5,Max\\n6,Vincent\\n7,Iaroslav\\n8,Emma\\n9,Maia\\n10,Jim\\n\"}, \"source_type\": \"seed\", \"project\": \"\", \"default_catalog\": null}, \"base_table_name_override\": \"sushi.waiter_names\"}","12":"{\"name\":\"\\\"sushi\\\".\\\"waiter_as_customer_by_day\\\"\",\"temp_version\":\"3668757715\",\"change_category\":4,\"fingerprint\":{\"data_hash\":\"1936268024\",\"metadata_hash\":\"2088684978\",\"parent_data_hash\":\"3055854652\",\"parent_metadata_hash\":\"665080906\"},\"previous_versions\":[{\"fingerprint\":{\"data_hash\":\"486172035\",\"metadata_hash\":\"1992853678\",\"parent_data_hash\":\"2154574190\",\"parent_metadata_hash\":\"1349779748\"},\"version\":\"1267397572\"},{\"fingerprint\":{\"data_hash\":\"486172035\",\"metadata_hash\":\"1992853678\",\"parent_data_hash\":\"2824767713\",\"parent_metadata_hash\":\"1349779748\"},\"version\":\"3668757715\",\"change_category\":4,\"physical_schema\":\"sqlmesh\"}],\"base_table_name_override\":\"sushi.waiter_as_customer_by_day\",\"physical_schema\":\"sqlmesh\",\"node\":{\"name\":\"sushi.waiter_as_customer_by_day\",\"project\":\"\",\"owner\":\"jen\",\"cron\":\"@daily\",\"tags\":[],\"dialect\":\"duckdb\",\"kind\":{\"name\":\"INCREMENTAL_BY_TIME_RANGE\",\"on_destructive_change\":\"ERROR\",\"dialect\":\"duckdb\",\"forward_only\":false,\"disable_restatement\":false,\"time_column\":{\"column\":\"ds\",\"format\":\"%Y-%m-%d\"}},\"partitioned_by\":[],\"clustered_by\":[],\"audits\":[[\"not_null\",{\"columns\":\"ARRAY(waiter_id)\"}]],\"grains\":[],\"references\":[],\"physical_properties\":\"('key' = 'value')\",\"allow_partials\":false,\"signals\":[],\"enabled\":true,\"python_env\":{},\"jinja_macros\":{\"packages\":{},\"root_macros\":{},\"global_objs\":{},\"create_builtins_module\":\"sqlmesh.utils.jinja\",\"top_level_packages\":[]},\"audit_definitions\":{},\"mapping_schema\":{},\"extract_dependencies_from_query\":true,\"query\":\"SELECT w.ds AS ds, w.waiter_id AS waiter_id, wn.name AS waiter_name FROM sushi.waiters AS w JOIN sushi.customers AS c ON w.waiter_id = c.customer_id JOIN sushi.waiter_names AS wn ON w.waiter_id = wn.id\",\"source_type\":\"sql\"},\"parents\":[{\"name\":\"\\\"sushi\\\".\\\"waiter_names\\\"\",\"identifier\":\"2725136291\"},{\"name\":\"\\\"sushi\\\".\\\"waiters\\\"\",\"identifier\":\"4123940212\"},{\"name\":\"\\\"sushi\\\".\\\"orders\\\"\",\"identifier\":\"1250207606\"},{\"name\":\"\\\"sushi\\\".\\\"customers\\\"\",\"identifier\":\"1461038955\"}],\"created_ts\":1680814464891,\"ttl\":\"in 1 week\",\"version\":\"3668757715\",\"migrated\":true}","13":"{\"name\":\"\\\"sushi\\\".\\\"waiter_names\\\"\",\"temp_version\":\"1204702829\",\"change_category\":1,\"fingerprint\":{\"data_hash\":\"1437406487\",\"metadata_hash\":\"3468846895\",\"parent_data_hash\":\"0\",\"parent_metadata_hash\":\"0\"},\"previous_versions\":[{\"fingerprint\":{\"data_hash\":\"1876476880\",\"metadata_hash\":\"570478986\",\"parent_data_hash\":\"0\",\"parent_metadata_hash\":\"0\"},\"version\":\"2505706914\"},{\"fingerprint\":{\"data_hash\":\"4133862560\",\"metadata_hash\":\"570478986\",\"parent_data_hash\":\"0\",\"parent_metadata_hash\":\"0\"},\"version\":\"1204702829\",\"change_category\":1,\"physical_schema\":\"sqlmesh\"}],\"base_table_name_override\":\"sushi.waiter_names\",\"physical_schema\":\"sqlmesh\",\"node\":{\"name\":\"sushi.waiter_names\",\"project\":\"\",\"owner\":\"jen\",\"cron\":\"@daily\",\"tags\":[],\"dialect\":\"duckdb\",\"kind\":{\"name\":\"SEED\",\"path\":\"..\/seeds\/waiter_names.csv\",\"batch_size\":5},\"partitioned_by\":[],\"clustered_by\":[],\"audits\":[],\"grains\":[],\"references\":[],\"allow_partials\":false,\"signals\":[],\"enabled\":true,\"python_env\":{},\"jinja_macros\":{\"packages\":{},\"root_macros\":{},\"global_objs\":{},\"create_builtins_module\":\"sqlmesh.utils.jinja\",\"top_level_packages\":[]},\"audit_definitions\":{},\"mapping_schema\":{},\"extract_dependencies_from_query\":true,\"seed\":{\"content\":\"\"},\"column_hashes\":{\"id\":\"3061821109\",\"name\":\"2706736258\"},\"derived_columns_to_types\":{\"id\":\"BIGINT\",\"name\":\"TEXT\"},\"is_hydrated\":false,\"source_type\":\"seed\"},\"parents\":[],\"created_ts\":1680814464932,\"ttl\":\"in 1 week\",\"version\":\"1204702829\",\"migrated\":true}","14":"{\"name\":\"\\\"sushi\\\".\\\"customer_revenue_by_day\\\"\",\"temp_version\":\"1291364031\",\"change_category\":4,\"fingerprint\":{\"data_hash\":\"131732542\",\"metadata_hash\":\"1368842087\",\"parent_data_hash\":\"2738168331\",\"parent_metadata_hash\":\"1795276494\"},\"previous_versions\":[{\"fingerprint\":{\"data_hash\":\"2657552867\",\"metadata_hash\":\"129771006\",\"parent_data_hash\":\"764310396\",\"parent_metadata_hash\":\"3147731239\"},\"version\":\"1291364031\",\"change_category\":4,\"physical_schema\":\"sqlmesh\"}],\"base_table_name_override\":\"sushi.customer_revenue_by_day\",\"physical_schema\":\"sqlmesh\",\"node\":{\"name\":\"sushi.customer_revenue_by_day\",\"project\":\"\",\"description\":\"Table of revenue from customers by day.\",\"owner\":\"jen\",\"cron\":\"@daily\",\"tags\":[],\"dialect\":\"hive\",\"kind\":{\"name\":\"INCREMENTAL_BY_TIME_RANGE\",\"on_destructive_change\":\"ERROR\",\"dialect\":\"hive\",\"batch_size\":10,\"forward_only\":false,\"disable_restatement\":false,\"time_column\":{\"column\":\"ds\",\"format\":\"%Y-%m-%d\"}},\"partitioned_by\":[],\"clustered_by\":[],\"audits\":[],\"grains\":[],\"references\":[],\"allow_partials\":false,\"signals\":[],\"enabled\":true,\"python_env\":{},\"jinja_macros\":{\"packages\":{},\"root_macros\":{},\"global_objs\":{},\"create_builtins_module\":\"sqlmesh.utils.jinja\",\"top_level_packages\":[]},\"audit_definitions\":{},\"mapping_schema\":{},\"extract_dependencies_from_query\":true,\"query\":\"JINJA_QUERY_BEGIN;\\nWITH order_total AS (SELECT oi.order_id AS order_id, SUM(oi.quantity * i.price) AS total, oi.ds AS ds FROM sushi.order_items AS oi LEFT JOIN sushi.items AS i ON oi.item_id = i.id AND oi.ds = i.ds WHERE oi.ds BETWEEN '{{ start_ds }}' AND '{{ end_ds }}' GROUP BY oi.order_id, oi.ds) SELECT CAST(o.customer_id AS INT) AS customer_id \/* Customer id *\/, CAST(SUM(ot.total) AS DOUBLE) AS revenue \/* Revenue from orders made by this customer *\/, CAST(o.ds AS TEXT) AS ds \/* Date *\/ FROM sushi.orders AS o LEFT JOIN order_total AS ot ON o.id = ot.order_id AND o.ds = ot.ds WHERE o.ds BETWEEN '{{ start_ds }}' AND '{{ end_ds }}' GROUP BY o.customer_id, o.ds\\nJINJA_END\",\"source_type\":\"sql\"},\"parents\":[{\"name\":\"\\\"sushi\\\".\\\"items\\\"\",\"identifier\":\"3721860967\"},{\"name\":\"\\\"sushi\\\".\\\"orders\\\"\",\"identifier\":\"1250207606\"},{\"name\":\"\\\"sushi\\\".\\\"order_items\\\"\",\"identifier\":\"1422946820\"}],\"created_ts\":1680814376391,\"ttl\":\"in 1 week\",\"version\":\"1291364031\",\"migrated\":true}","15":"{\"name\":\"\\\"sushi\\\".\\\"top_waiters\\\"\",\"temp_version\":\"3010914162\",\"change_category\":4,\"fingerprint\":{\"data_hash\":\"4131026946\",\"metadata_hash\":\"154190563\",\"parent_data_hash\":\"929243525\",\"parent_metadata_hash\":\"2366450878\"},\"previous_versions\":[{\"fingerprint\":{\"data_hash\":\"2891807529\",\"metadata_hash\":\"3392493998\",\"parent_data_hash\":\"1940707936\",\"parent_metadata_hash\":\"1276363398\"},\"version\":\"3010914162\",\"change_category\":4,\"physical_schema\":\"sqlmesh\"}],\"base_table_name_override\":\"sushi.top_waiters\",\"physical_schema\":\"sqlmesh\",\"node\":{\"name\":\"sushi.top_waiters\",\"project\":\"\",\"description\":\"View of top waiters.\",\"owner\":\"jen\",\"cron\":\"@daily\",\"tags\":[],\"dialect\":\"duckdb\",\"kind\":{\"name\":\"VIEW\",\"materialized\":false},\"partitioned_by\":[],\"clustered_by\":[],\"audits\":[[\"unique_values\",{\"columns\":\"ARRAY(waiter_id)\"}]],\"grains\":[],\"references\":[],\"allow_partials\":false,\"signals\":[],\"enabled\":true,\"python_env\":{},\"jinja_macros\":{\"packages\":{},\"root_macros\":{},\"global_objs\":{},\"create_builtins_module\":\"sqlmesh.utils.jinja\",\"top_level_packages\":[]},\"audit_definitions\":{},\"mapping_schema\":{},\"extract_dependencies_from_query\":true,\"query\":\"SELECT CAST(waiter_id AS INT) AS waiter_id, CAST(revenue AS DOUBLE) AS revenue FROM sushi.waiter_revenue_by_day WHERE ds = (SELECT MAX(ds) FROM sushi.waiter_revenue_by_day) ORDER BY revenue DESC LIMIT 10\",\"source_type\":\"sql\"},\"parents\":[{\"name\":\"\\\"sushi\\\".\\\"waiter_revenue_by_day\\\"\",\"identifier\":\"2175947464\"}],\"created_ts\":1680814376384,\"ttl\":\"in 1 week\",\"version\":\"3010914162\",\"migrated\":true}","16":"{\"name\":\"\\\"sushi\\\".\\\"waiter_revenue_by_day\\\"\",\"temp_version\":\"2695875565\",\"change_category\":4,\"fingerprint\":{\"data_hash\":\"2224089837\",\"metadata_hash\":\"2504236462\",\"parent_data_hash\":\"2738168331\",\"parent_metadata_hash\":\"1795276494\"},\"previous_versions\":[{\"fingerprint\":{\"data_hash\":\"2443934302\",\"metadata_hash\":\"2904050331\",\"parent_data_hash\":\"764310396\",\"parent_metadata_hash\":\"3147731239\"},\"version\":\"2695875565\",\"change_category\":4,\"physical_schema\":\"sqlmesh\"}],\"base_table_name_override\":\"sushi.waiter_revenue_by_day\",\"physical_schema\":\"sqlmesh\",\"node\":{\"name\":\"sushi.waiter_revenue_by_day\",\"project\":\"\",\"description\":\"Table of revenue generated by waiters by day.\",\"owner\":\"jen\",\"cron\":\"@daily\",\"tags\":[],\"dialect\":\"duckdb\",\"kind\":{\"name\":\"INCREMENTAL_BY_TIME_RANGE\",\"on_destructive_change\":\"ERROR\",\"dialect\":\"duckdb\",\"batch_size\":10,\"forward_only\":false,\"disable_restatement\":false,\"time_column\":{\"column\":\"ds\",\"format\":\"%Y-%m-%d\"}},\"partitioned_by\":[],\"clustered_by\":[],\"audits\":[[\"number_of_rows\",{\"threshold\":\"0\"}]],\"grains\":[],\"references\":[],\"allow_partials\":false,\"signals\":[],\"enabled\":true,\"python_env\":{},\"jinja_macros\":{\"packages\":{},\"root_macros\":{},\"global_objs\":{},\"create_builtins_module\":\"sqlmesh.utils.jinja\",\"top_level_packages\":[]},\"audit_definitions\":{},\"mapping_schema\":{},\"extract_dependencies_from_query\":true,\"query\":\"SELECT CAST(o.waiter_id AS INT) AS waiter_id \/* Waiter id *\/, CAST(SUM(oi.quantity * i.price) AS DOUBLE) AS revenue \/* Revenue from orders taken by this waiter *\/, CAST(o.ds AS TEXT) AS ds \/* Date *\/ FROM sushi.orders AS o LEFT JOIN sushi.order_items AS oi ON o.id = oi.order_id AND o.ds = oi.ds LEFT JOIN sushi.items AS i ON oi.item_id = i.id AND oi.ds = i.ds WHERE o.ds BETWEEN @start_ds AND @end_ds GROUP BY o.waiter_id, o.ds\",\"source_type\":\"sql\"},\"parents\":[{\"name\":\"\\\"sushi\\\".\\\"items\\\"\",\"identifier\":\"3721860967\"},{\"name\":\"\\\"sushi\\\".\\\"orders\\\"\",\"identifier\":\"1250207606\"},{\"name\":\"\\\"sushi\\\".\\\"order_items\\\"\",\"identifier\":\"1422946820\"}],\"created_ts\":1680814376361,\"ttl\":\"in 1 week\",\"version\":\"2695875565\",\"migrated\":true}","17":"{\"name\":\"\\\"sushi\\\".\\\"order_items\\\"\",\"temp_version\":\"1015284155\",\"change_category\":4,\"fingerprint\":{\"data_hash\":\"4010068827\",\"metadata_hash\":\"799196655\",\"parent_data_hash\":\"2342431947\",\"parent_metadata_hash\":\"1746080605\"},\"previous_versions\":[{\"fingerprint\":{\"data_hash\":\"653664599\",\"metadata_hash\":\"1960934702\",\"parent_data_hash\":\"3170724558\",\"parent_metadata_hash\":\"867324801\"},\"version\":\"1015284155\",\"change_category\":4,\"physical_schema\":\"sqlmesh\"}],\"base_table_name_override\":\"sushi.order_items\",\"physical_schema\":\"sqlmesh\",\"node\":{\"name\":\"sushi.order_items\",\"project\":\"\",\"cron\":\"@daily\",\"tags\":[],\"dialect\":\"\",\"kind\":{\"name\":\"INCREMENTAL_BY_TIME_RANGE\",\"on_destructive_change\":\"ERROR\",\"dialect\":\"\",\"batch_size\":30,\"forward_only\":false,\"disable_restatement\":false,\"time_column\":{\"column\":\"ds\",\"format\":\"%Y-%m-%d\"}},\"partitioned_by\":[],\"clustered_by\":[],\"depends_on\":[\"\\\"sushi\\\".\\\"items\\\"\",\"\\\"sushi\\\".\\\"orders\\\"\"],\"columns\":{\"id\":\"INT\",\"order_id\":\"INT\",\"item_id\":\"INT\",\"quantity\":\"INT\",\"ds\":\"TEXT\"},\"audits\":[[\"not_null\",{\"columns\":\"ARRAY(id, order_id, item_id, quantity)\"}],[\"assert_order_items_quantity_exceeds_threshold\",{\"quantity\":\"0\"}]],\"grains\":[],\"references\":[],\"allow_partials\":false,\"signals\":[],\"enabled\":true,\"python_env\":{\"execute\":{\"payload\":\"def execute(context, start, end, latest, **kwargs):\\n orders_table = context.table('sushi.orders')\\n items_table = context.table(ITEMS)\\n for dt in iter_dates(start, end):\\n orders = context.fetchdf(\\n f\\\"\\\"\\\"\\n SELECT *\\n FROM {orders_table}\\n WHERE ds = '{to_ds(dt)}'\\n \\\"\\\"\\\"\\n )\\n items = context.fetchdf(\\n f\\\"\\\"\\\"\\n SELECT *\\n FROM {items_table}\\n WHERE ds = '{to_ds(dt)}'\\n \\\"\\\"\\\"\\n )\\n for order_id in orders['id']:\\n n = random.randint(1, 5)\\n yield pd.DataFrame({'order_id': order_id, 'item_id': items.\\n sample(n=n)['id'], 'quantity': np.random.randint(1, 10, n),\\n 'ds': to_ds(dt)}).reset_index().rename(columns={'index': 'id'})\",\"kind\":\"definition\",\"name\":\"execute\",\"path\":\"models\/order_items.py\"},\"ITEMS\":{\"payload\":\"'sushi.items'\",\"kind\":\"value\"},\"iter_dates\":{\"payload\":\"def iter_dates(start, end):\\n for i in range((end - start).days + 1):\\n dt = start + timedelta(days=i)\\n set_seed(dt)\\n yield dt\",\"kind\":\"definition\",\"name\":\"iter_dates\",\"path\":\"helper.py\"},\"timedelta\":{\"payload\":\"from datetime import timedelta\",\"kind\":\"import\"},\"set_seed\":{\"payload\":\"def set_seed(dt):\\n ts = int(dt.timestamp())\\n random.seed(ts)\\n np.random.seed(ts)\",\"kind\":\"definition\",\"name\":\"set_seed\",\"path\":\"helper.py\"},\"random\":{\"payload\":\"import random\",\"kind\":\"import\"},\"np\":{\"payload\":\"import numpy as np\",\"kind\":\"import\"},\"to_ds\":{\"payload\":\"from sqlmesh.utils.date import to_ds\",\"kind\":\"import\"},\"pd\":{\"payload\":\"import pandas as pd\",\"kind\":\"import\"}},\"jinja_macros\":{\"packages\":{},\"root_macros\":{},\"global_objs\":{},\"create_builtins_module\":\"sqlmesh.utils.jinja\",\"top_level_packages\":[]},\"audit_definitions\":{\"assert_order_items_quantity_exceeds_threshold\":{\"name\":\"assert_order_items_quantity_exceeds_threshold\",\"dialect\":\"\",\"skip\":false,\"blocking\":true,\"standalone\":false,\"query\":\"SELECT * FROM @this_model WHERE quantity <= @quantity\",\"defaults\":{},\"expressions\":[],\"jinja_macros\":{\"packages\":{},\"root_macros\":{},\"global_objs\":{},\"create_builtins_module\":\"sqlmesh.utils.jinja\",\"top_level_packages\":[]}}},\"mapping_schema\":{},\"extract_dependencies_from_query\":true,\"entrypoint\":\"execute\",\"source_type\":\"python\"},\"parents\":[{\"name\":\"\\\"sushi\\\".\\\"items\\\"\",\"identifier\":\"3721860967\"},{\"name\":\"\\\"sushi\\\".\\\"orders\\\"\",\"identifier\":\"1250207606\"}],\"created_ts\":1680814376401,\"ttl\":\"in 1 week\",\"version\":\"1015284155\",\"migrated\":true}","18":"{\"name\":\"\\\"sushi\\\".\\\"items\\\"\",\"temp_version\":\"312608270\",\"change_category\":4,\"fingerprint\":{\"data_hash\":\"1862622614\",\"metadata_hash\":\"3651173237\",\"parent_data_hash\":\"0\",\"parent_metadata_hash\":\"0\"},\"previous_versions\":[{\"fingerprint\":{\"data_hash\":\"1960378930\",\"metadata_hash\":\"2900807542\",\"parent_data_hash\":\"0\",\"parent_metadata_hash\":\"0\"},\"version\":\"312608270\",\"change_category\":4,\"physical_schema\":\"sqlmesh\"}],\"base_table_name_override\":\"sushi.items\",\"physical_schema\":\"sqlmesh\",\"node\":{\"name\":\"sushi.items\",\"project\":\"\",\"start\":\"Jan 1 2022\",\"cron\":\"@daily\",\"tags\":[],\"dialect\":\"\",\"kind\":{\"name\":\"INCREMENTAL_BY_TIME_RANGE\",\"on_destructive_change\":\"ERROR\",\"dialect\":\"\",\"batch_size\":30,\"forward_only\":false,\"disable_restatement\":false,\"time_column\":{\"column\":\"ds\",\"format\":\"%Y-%m-%d\"}},\"partitioned_by\":[],\"clustered_by\":[],\"depends_on\":[],\"columns\":{\"id\":\"INT\",\"name\":\"TEXT\",\"price\":\"DOUBLE\",\"ds\":\"TEXT\"},\"audits\":[[\"accepted_values\",{\"column\":\"name\",\"values\":\"ARRAY('Ahi', 'Aji', 'Amaebi', 'Anago', 'Aoyagi', 'Bincho', 'Katsuo', 'Ebi', 'Escolar', 'Hamachi', 'Hamachi Toro', 'Hirame', 'Hokigai', 'Hotate', 'Ika', 'Ikura', 'Iwashi', 'Kani', 'Kanpachi', 'Maguro', 'Saba', 'Sake', 'Sake Toro', 'Tai', 'Tako', 'Tamago', 'Tobiko', 'Toro', 'Tsubugai', 'Umi Masu', 'Unagi', 'Uni')\"}],[\"not_null\",{\"columns\":\"ARRAY(name, price)\"}],[\"assert_items_price_exceeds_threshold\",{\"price\":\"0\"}]],\"grains\":[],\"references\":[],\"allow_partials\":false,\"signals\":[],\"enabled\":true,\"python_env\":{\"execute\":{\"payload\":\"def execute(context, start, end, latest, **kwargs):\\n dfs = []\\n for dt in iter_dates(start, end):\\n num_items = random.randint(10, len(ITEMS))\\n dfs.append(pd.DataFrame({'name': random.sample(ITEMS, num_items),\\n 'price': np.random.uniform(3.0, 10.0, size=num_items).round(2),\\n 'ds': to_ds(dt)}).reset_index().rename(columns={'index': 'id'}))\\n return pd.concat(dfs)\",\"kind\":\"definition\",\"name\":\"execute\",\"path\":\"models\/items.py\"},\"iter_dates\":{\"payload\":\"def iter_dates(start, end):\\n for i in range((end - start).days + 1):\\n dt = start + timedelta(days=i)\\n set_seed(dt)\\n yield dt\",\"kind\":\"definition\",\"name\":\"iter_dates\",\"path\":\"helper.py\"},\"timedelta\":{\"payload\":\"from datetime import timedelta\",\"kind\":\"import\"},\"set_seed\":{\"payload\":\"def set_seed(dt):\\n ts = int(dt.timestamp())\\n random.seed(ts)\\n np.random.seed(ts)\",\"kind\":\"definition\",\"name\":\"set_seed\",\"path\":\"helper.py\"},\"random\":{\"payload\":\"import random\",\"kind\":\"import\"},\"np\":{\"payload\":\"import numpy as np\",\"kind\":\"import\"},\"ITEMS\":{\"payload\":\"['Ahi', 'Aji', 'Amaebi', 'Anago', 'Aoyagi', 'Bincho', 'Katsuo', 'Ebi', 'Escolar', 'Hamachi', 'Hamachi Toro', 'Hirame', 'Hokigai', 'Hotate', 'Ika', 'Ikura', 'Iwashi', 'Kani', 'Kanpachi', 'Maguro', 'Saba', 'Sake', 'Sake Toro', 'Tai', 'Tako', 'Tamago', 'Tobiko', 'Toro', 'Tsubugai', 'Umi Masu', 'Unagi', 'Uni']\",\"kind\":\"value\"},\"pd\":{\"payload\":\"import pandas as pd\",\"kind\":\"import\"},\"to_ds\":{\"payload\":\"from sqlmesh.utils.date import to_ds\",\"kind\":\"import\"}},\"jinja_macros\":{\"packages\":{},\"root_macros\":{},\"global_objs\":{},\"create_builtins_module\":\"sqlmesh.utils.jinja\",\"top_level_packages\":[]},\"audit_definitions\":{\"assert_items_price_exceeds_threshold\":{\"name\":\"assert_items_price_exceeds_threshold\",\"dialect\":\"\",\"skip\":false,\"blocking\":true,\"standalone\":false,\"query\":\"SELECT * FROM @this_model WHERE price <= @price\",\"defaults\":{},\"expressions\":[],\"jinja_macros\":{\"packages\":{},\"root_macros\":{},\"global_objs\":{},\"create_builtins_module\":\"sqlmesh.utils.jinja\",\"top_level_packages\":[]}}},\"mapping_schema\":{},\"extract_dependencies_from_query\":true,\"entrypoint\":\"execute\",\"source_type\":\"python\"},\"parents\":[],\"created_ts\":1680814376399,\"ttl\":\"in 1 week\",\"version\":\"312608270\",\"migrated\":true}","19":"{\"name\":\"\\\"sushi\\\".\\\"waiter_as_customer_by_day\\\"\",\"temp_version\":\"1267397572\",\"change_category\":4,\"fingerprint\":{\"data_hash\":\"849558693\",\"metadata_hash\":\"2088684978\",\"parent_data_hash\":\"2705906012\",\"parent_metadata_hash\":\"665080906\"},\"previous_versions\":[{\"fingerprint\":{\"data_hash\":\"486172035\",\"metadata_hash\":\"1992853678\",\"parent_data_hash\":\"2154574190\",\"parent_metadata_hash\":\"1349779748\"},\"version\":\"1267397572\",\"change_category\":4,\"physical_schema\":\"sqlmesh\"}],\"base_table_name_override\":\"sushi.waiter_as_customer_by_day\",\"physical_schema\":\"sqlmesh\",\"node\":{\"name\":\"sushi.waiter_as_customer_by_day\",\"project\":\"\",\"owner\":\"jen\",\"cron\":\"@daily\",\"tags\":[],\"dialect\":\"duckdb\",\"kind\":{\"name\":\"INCREMENTAL_BY_TIME_RANGE\",\"on_destructive_change\":\"ERROR\",\"dialect\":\"duckdb\",\"forward_only\":false,\"disable_restatement\":false,\"time_column\":{\"column\":\"ds\",\"format\":\"%Y-%m-%d\"}},\"partitioned_by\":[],\"clustered_by\":[],\"audits\":[[\"not_null\",{\"columns\":\"ARRAY(waiter_id)\"}]],\"grains\":[],\"references\":[],\"allow_partials\":false,\"signals\":[],\"enabled\":true,\"python_env\":{},\"jinja_macros\":{\"packages\":{},\"root_macros\":{},\"global_objs\":{},\"create_builtins_module\":\"sqlmesh.utils.jinja\",\"top_level_packages\":[]},\"audit_definitions\":{},\"mapping_schema\":{},\"extract_dependencies_from_query\":true,\"query\":\"SELECT w.ds AS ds, w.waiter_id AS waiter_id, wn.name AS waiter_name FROM sushi.waiters AS w JOIN sushi.customers AS c ON w.waiter_id = c.customer_id JOIN sushi.waiter_names AS wn ON w.waiter_id = wn.id\",\"source_type\":\"sql\"},\"parents\":[{\"name\":\"\\\"sushi\\\".\\\"waiter_names\\\"\",\"identifier\":\"1609854746\"},{\"name\":\"\\\"sushi\\\".\\\"waiters\\\"\",\"identifier\":\"4123940212\"},{\"name\":\"\\\"sushi\\\".\\\"orders\\\"\",\"identifier\":\"1250207606\"},{\"name\":\"\\\"sushi\\\".\\\"customers\\\"\",\"identifier\":\"1461038955\"}],\"created_ts\":1680814376348,\"ttl\":\"in 1 week\",\"version\":\"1267397572\",\"migrated\":true}","20":"{\"name\":\"\\\"sushi\\\".\\\"waiter_names\\\"\",\"temp_version\":\"2505706914\",\"change_category\":4,\"fingerprint\":{\"data_hash\":\"3604872020\",\"metadata_hash\":\"3468846895\",\"parent_data_hash\":\"0\",\"parent_metadata_hash\":\"0\"},\"previous_versions\":[{\"fingerprint\":{\"data_hash\":\"1876476880\",\"metadata_hash\":\"570478986\",\"parent_data_hash\":\"0\",\"parent_metadata_hash\":\"0\"},\"version\":\"2505706914\",\"change_category\":4,\"physical_schema\":\"sqlmesh\"}],\"base_table_name_override\":\"sushi.waiter_names\",\"physical_schema\":\"sqlmesh\",\"node\":{\"name\":\"sushi.waiter_names\",\"project\":\"\",\"owner\":\"jen\",\"cron\":\"@daily\",\"tags\":[],\"dialect\":\"duckdb\",\"kind\":{\"name\":\"SEED\",\"path\":\"..\/seeds\/waiter_names.csv\",\"batch_size\":5},\"partitioned_by\":[],\"clustered_by\":[],\"audits\":[],\"grains\":[],\"references\":[],\"allow_partials\":false,\"signals\":[],\"enabled\":true,\"python_env\":{},\"jinja_macros\":{\"packages\":{},\"root_macros\":{},\"global_objs\":{},\"create_builtins_module\":\"sqlmesh.utils.jinja\",\"top_level_packages\":[]},\"audit_definitions\":{},\"mapping_schema\":{},\"extract_dependencies_from_query\":true,\"seed\":{\"content\":\"\"},\"column_hashes\":{\"id\":\"3679804453\",\"name\":\"537745575\"},\"derived_columns_to_types\":{\"id\":\"BIGINT\",\"name\":\"TEXT\"},\"is_hydrated\":false,\"source_type\":\"seed\"},\"parents\":[],\"created_ts\":1680814376389,\"ttl\":\"in 1 week\",\"version\":\"2505706914\",\"migrated\":true}","21":"{\"name\":\"\\\"sushi\\\".\\\"customers\\\"\",\"temp_version\":\"2359719298\",\"change_category\":4,\"fingerprint\":{\"data_hash\":\"2431070412\",\"metadata_hash\":\"3063653103\",\"parent_data_hash\":\"458609840\",\"parent_metadata_hash\":\"2007040660\"},\"previous_versions\":[{\"fingerprint\":{\"data_hash\":\"3553985282\",\"metadata_hash\":\"570478986\",\"parent_data_hash\":\"777615193\",\"parent_metadata_hash\":\"2042613269\"},\"version\":\"2359719298\",\"change_category\":4,\"physical_schema\":\"sqlmesh\"}],\"base_table_name_override\":\"sushi.customers\",\"physical_schema\":\"sqlmesh\",\"node\":{\"name\":\"sushi.customers\",\"project\":\"\",\"owner\":\"jen\",\"cron\":\"@daily\",\"tags\":[],\"dialect\":\"duckdb\",\"kind\":{\"name\":\"FULL\"},\"partitioned_by\":[],\"clustered_by\":[],\"audits\":[],\"grains\":[],\"references\":[],\"allow_partials\":false,\"signals\":[],\"enabled\":true,\"python_env\":{\"noop\":{\"payload\":\"def noop(context, start, end, latest, **kwargs):\\n pass\",\"kind\":\"definition\",\"name\":\"noop\",\"path\":\"hooks\/hooks.py\"}},\"jinja_macros\":{\"packages\":{},\"root_macros\":{},\"global_objs\":{},\"create_builtins_module\":\"sqlmesh.utils.jinja\",\"top_level_packages\":[]},\"audit_definitions\":{},\"mapping_schema\":{},\"extract_dependencies_from_query\":true,\"query\":\"SELECT DISTINCT CAST(customer_id AS INT) AS customer_id FROM sushi.orders AS o\",\"source_type\":\"sql\"},\"parents\":[{\"name\":\"\\\"sushi\\\".\\\"orders\\\"\",\"identifier\":\"1250207606\"}],\"created_ts\":1680814376388,\"ttl\":\"in 1 week\",\"version\":\"2359719298\",\"migrated\":true}","22":"{\"name\":\"\\\"sushi\\\".\\\"waiters\\\"\",\"temp_version\":\"2059227798\",\"change_category\":4,\"fingerprint\":{\"data_hash\":\"2037801255\",\"metadata_hash\":\"3063653103\",\"parent_data_hash\":\"458609840\",\"parent_metadata_hash\":\"2007040660\"},\"previous_versions\":[{\"fingerprint\":{\"data_hash\":\"3501061139\",\"metadata_hash\":\"570478986\",\"parent_data_hash\":\"777615193\",\"parent_metadata_hash\":\"2042613269\"},\"version\":\"2059227798\",\"change_category\":4,\"physical_schema\":\"sqlmesh\"}],\"base_table_name_override\":\"sushi.waiters\",\"physical_schema\":\"sqlmesh\",\"node\":{\"name\":\"sushi.waiters\",\"project\":\"\",\"owner\":\"jen\",\"cron\":\"@daily\",\"tags\":[],\"dialect\":\"duckdb\",\"kind\":{\"name\":\"EMBEDDED\",\"disable_restatement\":true},\"partitioned_by\":[],\"clustered_by\":[],\"audits\":[],\"grains\":[],\"references\":[],\"allow_partials\":false,\"signals\":[],\"enabled\":true,\"python_env\":{\"incremental_by_ds\":{\"payload\":\"def incremental_by_ds(evaluator, column):\\n expression = evaluator.transform(exp.Between(this=column, low=MacroVar(\\n this='start_ds'), high=MacroVar(this='end_ds')))\\n if not isinstance(expression, exp.Expression):\\n raise MacroEvalError(\\n f'Return type is {type(expression)}, expected exp.Expression')\\n return expression\",\"kind\":\"definition\",\"name\":\"incremental_by_ds\",\"path\":\"macros\/macros.py\"},\"exp\":{\"payload\":\"import sqlglot.expressions as exp\",\"kind\":\"import\"},\"MacroVar\":{\"payload\":\"from sqlmesh.core.dialect import MacroVar\",\"kind\":\"import\"},\"MacroEvalError\":{\"payload\":\"from sqlmesh.utils.errors import MacroEvalError\",\"kind\":\"import\"}},\"jinja_macros\":{\"packages\":{},\"root_macros\":{},\"global_objs\":{},\"create_builtins_module\":\"sqlmesh.utils.jinja\",\"top_level_packages\":[]},\"audit_definitions\":{},\"mapping_schema\":{},\"extract_dependencies_from_query\":true,\"query\":\"SELECT DISTINCT CAST(waiter_id AS INT) AS waiter_id, CAST(ds AS TEXT) AS ds FROM sushi.orders AS o WHERE @incremental_by_ds(ds)\",\"source_type\":\"sql\"},\"parents\":[{\"name\":\"\\\"sushi\\\".\\\"orders\\\"\",\"identifier\":\"1250207606\"}],\"created_ts\":1680814376387,\"ttl\":\"in 1 week\",\"version\":\"2059227798\",\"migrated\":true}","23":"{\"name\":\"\\\"sushi\\\".\\\"orders\\\"\",\"temp_version\":\"925846788\",\"change_category\":4,\"fingerprint\":{\"data_hash\":\"1588786367\",\"metadata_hash\":\"1674367104\",\"parent_data_hash\":\"0\",\"parent_metadata_hash\":\"0\"},\"previous_versions\":[{\"fingerprint\":{\"data_hash\":\"1628439771\",\"metadata_hash\":\"2745052130\",\"parent_data_hash\":\"0\",\"parent_metadata_hash\":\"0\"},\"version\":\"925846788\",\"change_category\":4,\"physical_schema\":\"sqlmesh\"}],\"base_table_name_override\":\"sushi.orders\",\"physical_schema\":\"sqlmesh\",\"node\":{\"name\":\"sushi.orders\",\"project\":\"\",\"description\":\"Table of sushi orders.\",\"start\":\"2022-01-01\",\"cron\":\"@daily\",\"tags\":[],\"dialect\":\"\",\"kind\":{\"name\":\"INCREMENTAL_BY_TIME_RANGE\",\"on_destructive_change\":\"ERROR\",\"dialect\":\"\",\"batch_size\":30,\"forward_only\":false,\"disable_restatement\":false,\"time_column\":{\"column\":\"ds\",\"format\":\"%Y-%m-%d\"}},\"partitioned_by\":[],\"clustered_by\":[],\"depends_on\":[],\"columns\":{\"id\":\"INT\",\"customer_id\":\"INT\",\"waiter_id\":\"INT\",\"start_ts\":\"INT\",\"end_ts\":\"INT\",\"ds\":\"TEXT\"},\"audits\":[],\"grains\":[],\"references\":[],\"allow_partials\":false,\"signals\":[],\"enabled\":true,\"python_env\":{\"execute\":{\"payload\":\"def execute(context, start, end, latest, **kwargs):\\n dfs = []\\n for dt in iter_dates(start, end):\\n num_orders = random.randint(10, 30)\\n start_ts = [int((dt + timedelta(seconds=random.randint(0, 80000))).\\n timestamp()) for _ in range(num_orders)]\\n end_ts = [int(s + random.randint(0, 60 * 60)) for s in start_ts]\\n dfs.append(pd.DataFrame({'customer_id': random.choices(CUSTOMERS, k\\n =num_orders), 'waiter_id': random.choices(WAITERS, k=num_orders\\n ), 'start_ts': start_ts, 'end_ts': end_ts, 'ds': to_ds(dt)}).\\n reset_index().rename(columns={'index': 'id'}))\\n return pd.concat(dfs)\",\"kind\":\"definition\",\"name\":\"execute\",\"path\":\"models\/orders.py\"},\"iter_dates\":{\"payload\":\"def iter_dates(start, end):\\n for i in range((end - start).days + 1):\\n dt = start + timedelta(days=i)\\n set_seed(dt)\\n yield dt\",\"kind\":\"definition\",\"name\":\"iter_dates\",\"path\":\"helper.py\"},\"timedelta\":{\"payload\":\"from datetime import timedelta\",\"kind\":\"import\"},\"set_seed\":{\"payload\":\"def set_seed(dt):\\n ts = int(dt.timestamp())\\n random.seed(ts)\\n np.random.seed(ts)\",\"kind\":\"definition\",\"name\":\"set_seed\",\"path\":\"helper.py\"},\"random\":{\"payload\":\"import random\",\"kind\":\"import\"},\"np\":{\"payload\":\"import numpy as np\",\"kind\":\"import\"},\"pd\":{\"payload\":\"import pandas as pd\",\"kind\":\"import\"},\"CUSTOMERS\":{\"payload\":\"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99]\",\"kind\":\"value\"},\"WAITERS\":{\"payload\":\"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\",\"kind\":\"value\"},\"to_ds\":{\"payload\":\"from sqlmesh.utils.date import to_ds\",\"kind\":\"import\"}},\"jinja_macros\":{\"packages\":{},\"root_macros\":{},\"global_objs\":{},\"create_builtins_module\":\"sqlmesh.utils.jinja\",\"top_level_packages\":[]},\"audit_definitions\":{},\"mapping_schema\":{},\"extract_dependencies_from_query\":true,\"entrypoint\":\"execute\",\"source_type\":\"python\"},\"parents\":[],\"created_ts\":1680814376402,\"ttl\":\"in 1 week\",\"version\":\"925846788\",\"migrated\":true}"},"kind_name":{"0":"INCREMENTAL_BY_TIME_RANGE","1":"INCREMENTAL_BY_TIME_RANGE","2":"VIEW","3":"EMBEDDED","4":"FULL","5":"SEED","6":"INCREMENTAL_BY_TIME_RANGE","7":"INCREMENTAL_BY_TIME_RANGE","8":"INCREMENTAL_BY_TIME_RANGE","9":"INCREMENTAL_BY_TIME_RANGE","10":"INCREMENTAL_BY_TIME_RANGE","11":"SEED","12":"INCREMENTAL_BY_TIME_RANGE","13":"SEED","14":"INCREMENTAL_BY_TIME_RANGE","15":"VIEW","16":"INCREMENTAL_BY_TIME_RANGE","17":"INCREMENTAL_BY_TIME_RANGE","18":"INCREMENTAL_BY_TIME_RANGE","19":"INCREMENTAL_BY_TIME_RANGE","20":"SEED","21":"FULL","22":"EMBEDDED","23":"INCREMENTAL_BY_TIME_RANGE"},"updated_ts":{"0":1680814376348,"1":1680814376361,"2":1680814376384,"3":1680814376387,"4":1680814376388,"5":1680814376389,"6":1680814376391,"7":1680814376399,"8":1680814376401,"9":1680814376402,"10":1680814464891,"11":1680814464932,"12":1680814464891,"13":1680814464932,"14":1680814376391,"15":1680814376384,"16":1680814376361,"17":1680814376401,"18":1680814376399,"19":1680814376348,"20":1680814376389,"21":1680814376388,"22":1680814376387,"23":1680814376402},"unpaused_ts":{"0":null,"1":null,"2":null,"3":null,"4":null,"5":null,"6":null,"7":null,"8":null,"9":null,"10":null,"11":null,"12":null,"13":null,"14":null,"15":null,"16":null,"17":null,"18":null,"19":null,"20":null,"21":null,"22":null,"23":null},"ttl_ms":{"0":604800000,"1":604800000,"2":604800000,"3":604800000,"4":604800000,"5":604800000,"6":604800000,"7":604800000,"8":604800000,"9":604800000,"10":604800000,"11":604800000,"12":604800000,"13":604800000,"14":604800000,"15":604800000,"16":604800000,"17":604800000,"18":604800000,"19":604800000,"20":604800000,"21":604800000,"22":604800000,"23":604800000},"unrestorable":{"0":false,"1":false,"2":false,"3":false,"4":false,"5":false,"6":false,"7":false,"8":false,"9":false,"10":false,"11":false,"12":false,"13":false,"14":false,"15":false,"16":false,"17":false,"18":false,"19":false,"20":false,"21":false,"22":false,"23":false}} \ No newline at end of file diff --git a/tests/fixtures/migrations/versions.json b/tests/fixtures/migrations/versions.json new file mode 100644 index 0000000000..5eac7ed987 --- /dev/null +++ b/tests/fixtures/migrations/versions.json @@ -0,0 +1 @@ +{"schema_version":{"0":60},"sqlglot_version":{"0":"25.31.4"},"sqlmesh_version":{"0":"0.134.0"}} diff --git a/tooling/validating_migration_numbers.sh b/tooling/validating_migration_numbers.sh index 6dbb597dc1..6997d41fe1 100755 --- a/tooling/validating_migration_numbers.sh +++ b/tooling/validating_migration_numbers.sh @@ -14,7 +14,9 @@ numbers=() for file in "${migration_files[@]}"; do if [[ $file =~ ^v0*([0-9]+)_ ]]; then num=${BASH_REMATCH[1]} - numbers+=("$num") + if [[ "$num" -gt 0 ]]; then + numbers+=("$num") + fi fi done