Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
75 commits
Select commit Hold shift + click to select a range
a6c20e5
feat(observability): integrate Langfuse for LLM request tracing
ding113 Feb 15, 2026
aacaae4
fix(logs): improve fake 200 error logs (#790)
ding113 Feb 15, 2026
de634d9
refactor(provider): improve provider page performance (#789)
ding113 Feb 15, 2026
f1a5167
fix(repository): wrap SQL interval arithmetic in parentheses and cast…
ding113 Feb 15, 2026
2290bad
test(repository): add regression tests for SQL timezone parentheses a…
ding113 Feb 16, 2026
1d164c5
fix(proxy): avoid deadlock on reader.cancel() for large chunked respo…
ding113 Feb 16, 2026
9462004
feat(provider): add swap cache TTL billing option (#798)
miraserver Feb 17, 2026
8f50adc
refactor(proxy): introduce EndpointPolicy to replace hardcoded count_…
ding113 Feb 18, 2026
61cce9b
refactor: security auth overhaul and provider batch operations (#806)
ding113 Feb 19, 2026
e853954
perf(dashboard): comprehensive homepage performance optimization (#808)
ding113 Feb 19, 2026
4773425
fix(drizzle): add IF NOT EXISTS to index creation statements
ding113 Feb 19, 2026
e2c5b5a
refactor(security): replace timingSafeEqual with XOR loop for edge ru…
ding113 Feb 19, 2026
0ff0409
fix(statistics): use bucket alias instead of expression in GROUP BY c…
ding113 Feb 19, 2026
32342dc
fix(dashboard): throw on statistics fetch failure and constrain chart…
ding113 Feb 19, 2026
ab92a2d
fix(dashboard): align leaderboard card heights by removing row-span-2
ding113 Feb 19, 2026
ff054a6
feat(logs): add session reuse origin decision chain (#736) (#810)
ding113 Feb 19, 2026
9c0baba
fix(dashboard): prevent chart overflow with proper flex sizing
ding113 Feb 19, 2026
c620c29
fix(dashboard): set fixed height for statistics chart container
ding113 Feb 19, 2026
7d254d1
fix(repository): filter origin chain query to require initial_selecti…
ding113 Feb 19, 2026
dab5cd5
refactor(logs): redesign origin decision section with StepCard layout
ding113 Feb 19, 2026
d85e540
perf(keys): replace DISTINCT ON with LATERAL JOIN and add composite p…
ding113 Feb 19, 2026
1c549a6
feat(repo): add shared ledger conditions and types
ding113 Feb 19, 2026
076684b
feat(schema): add usage_ledger table definition
ding113 Feb 19, 2026
339c556
feat(repo): add usage-ledger repository module
ding113 Feb 19, 2026
3bc82b4
feat(db): add fn_upsert_usage_ledger trigger function
ding113 Feb 19, 2026
76c20cb
test(ledger): add trigger verification test infrastructure
ding113 Feb 19, 2026
399cf65
refactor(repo): migrate overview.ts read paths to usage_ledger
ding113 Feb 19, 2026
9bf89df
feat(migration): add idempotent ledger backfill service
ding113 Feb 19, 2026
4c9a692
refactor(repo): migrate message.ts session aggregation to usage_ledger
ding113 Feb 19, 2026
2111684
refactor(repo): migrate leaderboard.ts read paths to usage_ledger
ding113 Feb 19, 2026
d40b570
refactor(repo): migrate statistics.ts read paths to usage_ledger
ding113 Feb 19, 2026
44be131
refactor(repo): migrate usage-logs summary + my-usage to usage_ledger
ding113 Feb 19, 2026
3437a95
refactor(repo): migrate key.ts + provider.ts read paths to usage_ledger
ding113 Feb 19, 2026
d83d140
fix(users): update resetUserAllStatistics to clear usage_ledger + ver…
ding113 Feb 19, 2026
543a3ce
feat(export): add ledger-only export option
ding113 Feb 19, 2026
4d6b54f
feat(migration): generate drizzle migration + wire startup backfill
ding113 Feb 19, 2026
31371b2
feat(fallback): add ledger-only mode support
ding113 Feb 19, 2026
10d4641
test(consistency): add ledger data consistency verification suite
ding113 Feb 19, 2026
2557aea
test(integration): add ledger integration and consistency tests
ding113 Feb 19, 2026
4fdbf97
test(integration): add ledger integration tests
ding113 Feb 19, 2026
aa7c674
test(ledger): fix unit tests after ledger migration
ding113 Feb 19, 2026
8be7490
feat(ledger): usage_ledger decoupling (#811)
ding113 Feb 20, 2026
c483d4c
refactor(database-backup): route pg tools through docker compose exec…
ding113 Feb 20, 2026
8775575
chore: format code (dev-c483d4c)
github-actions[bot] Feb 20, 2026
7784b2d
fix(log-cleanup): remove SKIP LOCKED from batch delete query
ding113 Feb 20, 2026
d922b12
feat(i18n): add database size and table count translation keys
ding113 Feb 20, 2026
d019a2d
fix(i18n): refine zh-TW and ja translation wording
ding113 Feb 20, 2026
291aed7
chore(i18n): expand placeholder audit allowlist for shared CJK terms
ding113 Feb 20, 2026
0e398f8
fix(statistics): serialize dates to ISO strings in quota cost queries
ding113 Feb 20, 2026
5482333
fix(repository): use ARRAY[] literal instead of row constructor in un…
ding113 Feb 20, 2026
dabfd77
chore: format code (dev-5482333)
github-actions[bot] Feb 20, 2026
658dbc3
feat(proxy): client restriction refactor - blockedClients + Claude Co…
ding113 Feb 22, 2026
bd78861
fix: Removed a vertical separator between quick period buttons and da…
tesgth032 Feb 22, 2026
3ade40f
fix(proxy): read Claude Code betas signal from anthropic-beta header …
ding113 Feb 22, 2026
38f8537
fix(proxy): persist Fake 200 error detail to DB/Redis and display in …
ding113 Feb 22, 2026
af072b3
fix(proxy): relax Claude Code detection to 4-signal system for CLI 2.…
ding113 Feb 22, 2026
6ab889d
fix(ledger-backfill): skip already-synced records to prevent redundan…
ding113 Feb 22, 2026
90fbeaf
perf(dashboard): lazy-load usage data on users page for instant first…
ding113 Feb 23, 2026
9535276
fix(dashboard): harden keyset cursor parsing and timestamp precision …
ding113 Feb 23, 2026
2502cf4
fix(dashboard): prevent memory leaks from unbounded polling, caches, …
ding113 Feb 23, 2026
17d0a96
refactor(test): consolidate vitest configs from root into tests/configs/
ding113 Feb 23, 2026
4259d4c
fix(log-cleanup): correct affected-row count and rolling cron date
ding113 Feb 24, 2026
dcc73f4
fix(repository): convert Date objects to ISO strings in raw sql templ…
ding113 Feb 24, 2026
86a5e64
chore: format code (dev-dcc73f4)
github-actions[bot] Feb 24, 2026
2e84b12
fix(ui): improve mobile provider form UX and add client restriction c…
ding113 Feb 24, 2026
dc6b6d0
refactor(ui): replace TagInput with checkbox-based preset selection i…
ding113 Feb 24, 2026
865fb0a
feat(ui): add model vendor icons to usage tables and price filters (#…
ding113 Feb 25, 2026
7e45ac4
fix(ui): resolve visual issues across 4 settings pages
ding113 Feb 25, 2026
4bcebb9
fix: responses/compact endpoint, CB cache invalidation, client restri…
ding113 Feb 26, 2026
8dc3e70
feat(notification): add cache hit rate alert with review fixes (#834)
ding113 Feb 26, 2026
15f63e2
feat(error-rules): add built-in rules for OpenAI Responses API errors
ding113 Feb 26, 2026
40d7b62
fix: 移除 placeholderData 配置以修复用户搜索时分页无法正常工作的问题 (#841)
YewFence Feb 27, 2026
aa4e03d
修复首页统计图表在小高度下裁切 (#838)
tesgth032 Feb 27, 2026
aa2dabd
修复:熔断器禁用后仍拦截供应商 (#837)
tesgth032 Feb 27, 2026
4ad4ffa
feat: add provider scheduled active time & remove joinClaudePool (#844)
ding113 Feb 27, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
13 changes: 13 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,16 @@ FETCH_HEADERS_TIMEOUT=600000
FETCH_BODY_TIMEOUT=600000
MAX_RETRY_ATTEMPTS_DEFAULT=2 # 单供应商最大尝试次数(含首次调用),范围 1-10,留空使用默认值 2

# Langfuse Observability (optional, auto-enabled when keys are set)
# 功能说明:企业级 LLM 可观测性集成,自动追踪所有代理请求的完整生命周期
# - 配置 PUBLIC_KEY 和 SECRET_KEY 后自动启用
# - 支持 Langfuse Cloud 和自托管实例
LANGFUSE_PUBLIC_KEY= # Langfuse project public key (pk-lf-...)
LANGFUSE_SECRET_KEY= # Langfuse project secret key (sk-lf-...)
LANGFUSE_BASE_URL=https://cloud.langfuse.com # Langfuse server URL (self-hosted or cloud)
LANGFUSE_SAMPLE_RATE=1.0 # Trace sampling rate (0.0-1.0, default: 1.0 = 100%)
LANGFUSE_DEBUG=false # Enable Langfuse debug logging

# 智能探测配置
# 功能说明:当熔断器处于 OPEN 状态时,定期探测供应商以实现更快恢复
# - ENABLE_SMART_PROBING:是否启用智能探测(默认:false)
Expand All @@ -152,6 +162,9 @@ PROBE_TIMEOUT_MS=5000
#
# ENDPOINT_PROBE_INTERVAL_MS controls the base interval. Single-vendor and timeout intervals are fixed.
ENDPOINT_PROBE_INTERVAL_MS=60000
# When no endpoints are due, scheduler will still poll DB periodically to pick up config changes.
# Default: min(ENDPOINT_PROBE_INTERVAL_MS, 30000)
ENDPOINT_PROBE_IDLE_DB_POLL_INTERVAL_MS=30000
ENDPOINT_PROBE_TIMEOUT_MS=5000
ENDPOINT_PROBE_CONCURRENCY=10
ENDPOINT_PROBE_CYCLE_JITTER_MS=1000
Expand Down
7 changes: 1 addition & 6 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,7 @@

# testing
/coverage
/coverage-quota
/coverage-my-usage
/coverage-proxy-guard-pipeline
/coverage-thinking-signature-rectifier
/coverage-logs-sessionid-time-filter
/coverage-usage-logs-sessionid-search
tests/.tmp-*

# next.js
/.next/
Expand Down
31 changes: 31 additions & 0 deletions .sisyphus/evidence/task-11-session-agg-migrated.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
Task 11: Migrate message.ts session aggregation to usage_ledger
================================================================

File changed: src/repository/message.ts

Functions migrated:
1. aggregateSessionStats() - single session stats
2. aggregateMultipleSessionStats() - batch session stats

Changes per function:
- .from(messageRequest) -> .from(usageLedger)
- messageRequest.X -> usageLedger.X for all aggregated columns
- EXCLUDE_WARMUP_CONDITION -> LEDGER_BILLING_CONDITION (warmup already excluded at trigger level)
- Removed isNull(messageRequest.deletedAt) (no deletedAt on ledger)
- Provider sub-query: messageRequest.providerId -> usageLedger.finalProviderId
- Cache TTL sub-query: messageRequest.cacheTtlApplied -> usageLedger.cacheTtlApplied
- Model sub-query: messageRequest.model -> usageLedger.model
- FILTER (WHERE EXCLUDE_WARMUP_CONDITION) removed from aggregates (ledger has no warmup rows)

NOT migrated (detail-view, stays on messageRequest):
- userInfo sub-query (step 4) - needs userAgent, apiType, key join
- findMessageRequestById, findMessageRequestBySessionId, etc.

Import changes:
- Added: usageLedger from schema
- Added: LEDGER_BILLING_CONDITION from ledger-conditions
- Kept: messageRequest, EXCLUDE_WARMUP_CONDITION (used by detail functions)

Return types: UNCHANGED

Typecheck: PASS (tsgo exit code 0, 0.51s)
29 changes: 29 additions & 0 deletions .sisyphus/evidence/task-9-overview-migrated.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
Task 9: Migrate overview.ts read paths to usage_ledger
========================================================

File: src/repository/overview.ts

Changes:
- Imports: messageRequest -> usageLedger, EXCLUDE_WARMUP_CONDITION -> LEDGER_BILLING_CONDITION
- Removed: isNull import (no deletedAt on ledger)

getOverviewMetrics():
- .from(messageRequest) -> .from(usageLedger)
- messageRequest.costUsd -> usageLedger.costUsd
- messageRequest.durationMs -> usageLedger.durationMs
- messageRequest.createdAt -> usageLedger.createdAt
- Error rate: statusCode >= 400 -> NOT isSuccess (pre-computed boolean)
- Removed: isNull(messageRequest.deletedAt)
- EXCLUDE_WARMUP_CONDITION -> LEDGER_BILLING_CONDITION

getOverviewMetricsWithComparison():
- Same pattern as above for all 3 parallel queries (today, yesterday, RPM)
- messageRequest.userId -> usageLedger.userId
- Error rate: statusCode >= 400 -> NOT isSuccess

Interfaces unchanged:
- OverviewMetrics: todayRequests, todayCost, avgResponseTime, todayErrorRate
- OverviewMetricsWithComparison: extends OverviewMetrics + yesterday comparison + RPM

Verification:
- bun run typecheck: EXIT_CODE:0 (clean)
2 changes: 1 addition & 1 deletion biome.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
{
"$schema": "https://biomejs.dev/schemas/2.3.14/schema.json",
"$schema": "https://biomejs.dev/schemas/2.4.4/schema.json",
"vcs": {
"enabled": true,
"clientKind": "git",
Expand Down
4 changes: 3 additions & 1 deletion dev/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,9 @@ dev: db
@echo " REDIS_URL=$(LOCAL_REDIS_URL)"
@echo ""
@DSN="$(LOCAL_DSN)" REDIS_URL="$(LOCAL_REDIS_URL)" ENABLE_RATE_LIMIT="$(ENABLE_RATE_LIMIT)" \
ADMIN_TOKEN="$(ADMIN_TOKEN)" bun run dev
ADMIN_TOKEN="$(ADMIN_TOKEN)" \
PG_COMPOSE_EXEC="docker compose -f $(CURDIR)/$(COMPOSE_FILE) -p $(PROJECT_NAME)" \
bun run dev

build:
@$(COMPOSE) --profile app build app
Expand Down
14 changes: 14 additions & 0 deletions drizzle/0068_flaky_swarm.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
-- Note: message_request is a high-write table. Standard CREATE INDEX may block writes during index creation.
-- Drizzle migrator does not support CREATE INDEX CONCURRENTLY. If write blocking is a concern,
-- manually pre-create indexes with CONCURRENTLY before running this migration (IF NOT EXISTS prevents conflicts).
CREATE INDEX IF NOT EXISTS "idx_keys_key" ON "keys" USING btree ("key");--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "idx_message_request_key_created_at_id" ON "message_request" USING btree ("key","created_at" DESC NULLS LAST,"id" DESC NULLS LAST) WHERE "message_request"."deleted_at" IS NULL;--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "idx_message_request_key_model_active" ON "message_request" USING btree ("key","model") WHERE "message_request"."deleted_at" IS NULL AND "message_request"."model" IS NOT NULL AND ("message_request"."blocked_by" IS NULL OR "message_request"."blocked_by" <> 'warmup');--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "idx_message_request_key_endpoint_active" ON "message_request" USING btree ("key","endpoint") WHERE "message_request"."deleted_at" IS NULL AND "message_request"."endpoint" IS NOT NULL AND ("message_request"."blocked_by" IS NULL OR "message_request"."blocked_by" <> 'warmup');--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "idx_message_request_created_at_id_active" ON "message_request" USING btree ("created_at" DESC NULLS LAST,"id" DESC NULLS LAST) WHERE "message_request"."deleted_at" IS NULL;--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "idx_message_request_model_active" ON "message_request" USING btree ("model") WHERE "message_request"."deleted_at" IS NULL AND "message_request"."model" IS NOT NULL;--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "idx_message_request_status_code_active" ON "message_request" USING btree ("status_code") WHERE "message_request"."deleted_at" IS NULL AND "message_request"."status_code" IS NOT NULL;--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "idx_provider_endpoints_pick_enabled" ON "provider_endpoints" USING btree ("vendor_id","provider_type","is_enabled","sort_order","id") WHERE "provider_endpoints"."deleted_at" IS NULL;--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "idx_providers_vendor_type_url_active" ON "providers" USING btree ("provider_vendor_id","provider_type","url") WHERE "providers"."deleted_at" IS NULL;--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "idx_providers_enabled_vendor_type" ON "providers" USING btree ("provider_vendor_id","provider_type") WHERE "providers"."deleted_at" IS NULL AND "providers"."is_enabled" = true AND "providers"."provider_vendor_id" IS NOT NULL AND "providers"."provider_vendor_id" > 0;--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "idx_users_tags_gin" ON "users" USING gin ("tags") WHERE "users"."deleted_at" IS NULL;
1 change: 1 addition & 0 deletions drizzle/0069_special_squirrel_girl.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
ALTER TABLE "providers" ADD COLUMN "swap_cache_ttl_billing" boolean DEFAULT false NOT NULL;
1 change: 1 addition & 0 deletions drizzle/0070_stormy_exiles.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
ALTER TABLE "message_request" ADD COLUMN "swap_cache_ttl_applied" boolean DEFAULT false;
2 changes: 2 additions & 0 deletions drizzle/0071_purple_captain_midlands.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
CREATE INDEX IF NOT EXISTS "idx_message_request_user_created_at_cost_stats" ON "message_request" USING btree ("user_id","created_at","cost_usd") WHERE "message_request"."deleted_at" IS NULL AND ("message_request"."blocked_by" IS NULL OR "message_request"."blocked_by" <> 'warmup');--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "idx_message_request_provider_created_at_active" ON "message_request" USING btree ("provider_id","created_at") WHERE "message_request"."deleted_at" IS NULL AND ("message_request"."blocked_by" IS NULL OR "message_request"."blocked_by" <> 'warmup');
3 changes: 3 additions & 0 deletions drizzle/0072_dark_gwen_stacy.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
-- #slow-query: composite partial indexes for key-based lookups with EXCLUDE_WARMUP filter
CREATE INDEX IF NOT EXISTS "idx_message_request_key_last_active" ON "message_request" USING btree ("key","created_at" DESC NULLS LAST) WHERE "message_request"."deleted_at" IS NULL AND ("message_request"."blocked_by" IS NULL OR "message_request"."blocked_by" <> 'warmup');--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "idx_message_request_key_cost_active" ON "message_request" USING btree ("key","cost_usd") WHERE "message_request"."deleted_at" IS NULL AND ("message_request"."blocked_by" IS NULL OR "message_request"."blocked_by" <> 'warmup');
127 changes: 127 additions & 0 deletions drizzle/0073_magical_manta.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
CREATE TABLE IF NOT EXISTS "usage_ledger" (
"id" serial PRIMARY KEY NOT NULL,
"request_id" integer NOT NULL,
"user_id" integer NOT NULL,
"key" varchar NOT NULL,
"provider_id" integer NOT NULL,
"final_provider_id" integer NOT NULL,
"model" varchar(128),
"original_model" varchar(128),
"endpoint" varchar(256),
"api_type" varchar(20),
"session_id" varchar(64),
"status_code" integer,
"is_success" boolean DEFAULT false NOT NULL,
"blocked_by" varchar(50),
"cost_usd" numeric(21, 15) DEFAULT '0',
"cost_multiplier" numeric(10, 4),
"input_tokens" bigint,
"output_tokens" bigint,
"cache_creation_input_tokens" bigint,
"cache_read_input_tokens" bigint,
"cache_creation_5m_input_tokens" bigint,
"cache_creation_1h_input_tokens" bigint,
"cache_ttl_applied" varchar(10),
"context_1m_applied" boolean DEFAULT false,
"swap_cache_ttl_applied" boolean DEFAULT false,
"duration_ms" integer,
"ttfb_ms" integer,
"created_at" timestamp with time zone NOT NULL
);
--> statement-breakpoint
CREATE UNIQUE INDEX IF NOT EXISTS "idx_usage_ledger_request_id" ON "usage_ledger" USING btree ("request_id");--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "idx_usage_ledger_user_created_at" ON "usage_ledger" USING btree ("user_id","created_at") WHERE "usage_ledger"."blocked_by" IS NULL;--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "idx_usage_ledger_key_created_at" ON "usage_ledger" USING btree ("key","created_at") WHERE "usage_ledger"."blocked_by" IS NULL;--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "idx_usage_ledger_provider_created_at" ON "usage_ledger" USING btree ("final_provider_id","created_at") WHERE "usage_ledger"."blocked_by" IS NULL;--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "idx_usage_ledger_created_at_minute" ON "usage_ledger" USING btree (date_trunc('minute', "created_at" AT TIME ZONE 'UTC'));--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "idx_usage_ledger_created_at_desc_id" ON "usage_ledger" USING btree ("created_at" DESC NULLS LAST,"id" DESC NULLS LAST);--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "idx_usage_ledger_session_id" ON "usage_ledger" USING btree ("session_id") WHERE "usage_ledger"."session_id" IS NOT NULL;--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "idx_usage_ledger_model" ON "usage_ledger" USING btree ("model") WHERE "usage_ledger"."model" IS NOT NULL;--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "idx_usage_ledger_key_cost" ON "usage_ledger" USING btree ("key","cost_usd") WHERE "usage_ledger"."blocked_by" IS NULL;--> statement-breakpoint

-- Trigger: auto-upsert usage_ledger on message_request INSERT/UPDATE
CREATE OR REPLACE FUNCTION fn_upsert_usage_ledger()
RETURNS TRIGGER AS $$
DECLARE
v_final_provider_id integer;
v_is_success boolean;
BEGIN
IF NEW.blocked_by = 'warmup' THEN
-- If a ledger row already exists (row was originally non-warmup), mark it as warmup
UPDATE usage_ledger SET blocked_by = 'warmup' WHERE request_id = NEW.id;
RETURN NEW;
END IF;

IF NEW.provider_chain IS NOT NULL
AND jsonb_typeof(NEW.provider_chain) = 'array'
AND jsonb_array_length(NEW.provider_chain) > 0
AND jsonb_typeof(NEW.provider_chain -> -1) = 'object'
AND (NEW.provider_chain -> -1 ? 'id')
AND (NEW.provider_chain -> -1 ->> 'id') ~ '^[0-9]+$' THEN
v_final_provider_id := (NEW.provider_chain -> -1 ->> 'id')::integer;
ELSE
v_final_provider_id := NEW.provider_id;
END IF;

v_is_success := (NEW.error_message IS NULL OR NEW.error_message = '');

INSERT INTO usage_ledger (
request_id, user_id, key, provider_id, final_provider_id,
model, original_model, endpoint, api_type, session_id,
status_code, is_success, blocked_by,
cost_usd, cost_multiplier,
input_tokens, output_tokens,
cache_creation_input_tokens, cache_read_input_tokens,
cache_creation_5m_input_tokens, cache_creation_1h_input_tokens,
cache_ttl_applied, context_1m_applied, swap_cache_ttl_applied,
duration_ms, ttfb_ms, created_at
) VALUES (
NEW.id, NEW.user_id, NEW.key, NEW.provider_id, v_final_provider_id,
NEW.model, NEW.original_model, NEW.endpoint, NEW.api_type, NEW.session_id,
NEW.status_code, v_is_success, NEW.blocked_by,
NEW.cost_usd, NEW.cost_multiplier,
NEW.input_tokens, NEW.output_tokens,
NEW.cache_creation_input_tokens, NEW.cache_read_input_tokens,
NEW.cache_creation_5m_input_tokens, NEW.cache_creation_1h_input_tokens,
NEW.cache_ttl_applied, NEW.context_1m_applied, NEW.swap_cache_ttl_applied,
NEW.duration_ms, NEW.ttfb_ms, NEW.created_at
)
ON CONFLICT (request_id) DO UPDATE SET
user_id = EXCLUDED.user_id,
key = EXCLUDED.key,
provider_id = EXCLUDED.provider_id,
final_provider_id = EXCLUDED.final_provider_id,
model = EXCLUDED.model,
original_model = EXCLUDED.original_model,
endpoint = EXCLUDED.endpoint,
api_type = EXCLUDED.api_type,
session_id = EXCLUDED.session_id,
status_code = EXCLUDED.status_code,
is_success = EXCLUDED.is_success,
blocked_by = EXCLUDED.blocked_by,
cost_usd = EXCLUDED.cost_usd,
cost_multiplier = EXCLUDED.cost_multiplier,
input_tokens = EXCLUDED.input_tokens,
output_tokens = EXCLUDED.output_tokens,
cache_creation_input_tokens = EXCLUDED.cache_creation_input_tokens,
cache_read_input_tokens = EXCLUDED.cache_read_input_tokens,
cache_creation_5m_input_tokens = EXCLUDED.cache_creation_5m_input_tokens,
cache_creation_1h_input_tokens = EXCLUDED.cache_creation_1h_input_tokens,
cache_ttl_applied = EXCLUDED.cache_ttl_applied,
context_1m_applied = EXCLUDED.context_1m_applied,
swap_cache_ttl_applied = EXCLUDED.swap_cache_ttl_applied,
duration_ms = EXCLUDED.duration_ms,
ttfb_ms = EXCLUDED.ttfb_ms,
created_at = EXCLUDED.created_at;

RETURN NEW;
EXCEPTION WHEN OTHERS THEN
RAISE WARNING 'fn_upsert_usage_ledger failed for request_id=%: %', NEW.id, SQLERRM;
RETURN NEW;
END;
Comment on lines +118 to +121
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The EXCEPTION WHEN OTHERS block in the fn_upsert_usage_ledger trigger only raises a WARNING. This means if the trigger fails to update the usage_ledger for any reason, the main request will still succeed, but the billing and analytics data in usage_ledger will become out of sync. This could lead to silent data discrepancies if database warnings are not actively monitored. For a production environment, you might consider a more robust error handling strategy, such as logging failures to a separate dead-letter table for later inspection and reprocessing, to ensure the integrity of billing data.

$$ LANGUAGE plpgsql;--> statement-breakpoint

CREATE TRIGGER trg_upsert_usage_ledger
AFTER INSERT OR UPDATE ON message_request
FOR EACH ROW
EXECUTE FUNCTION fn_upsert_usage_ledger();
3 changes: 3 additions & 0 deletions drizzle/0074_wide_retro_girl.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
ALTER TABLE "providers" ADD COLUMN "allowed_clients" jsonb DEFAULT '[]'::jsonb NOT NULL;--> statement-breakpoint
ALTER TABLE "providers" ADD COLUMN "blocked_clients" jsonb DEFAULT '[]'::jsonb NOT NULL;--> statement-breakpoint
ALTER TABLE "users" ADD COLUMN "blocked_clients" jsonb DEFAULT '[]'::jsonb NOT NULL;
5 changes: 5 additions & 0 deletions drizzle/0075_faithful_speed_demon.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
DROP INDEX IF EXISTS "idx_usage_ledger_key_cost";--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "idx_message_request_session_user_info" ON "message_request" USING btree ("session_id","created_at","user_id","key") WHERE "message_request"."deleted_at" IS NULL;--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "idx_usage_ledger_user_cost_cover" ON "usage_ledger" USING btree ("user_id","created_at","cost_usd") WHERE "usage_ledger"."blocked_by" IS NULL;--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "idx_usage_ledger_provider_cost_cover" ON "usage_ledger" USING btree ("final_provider_id","created_at","cost_usd") WHERE "usage_ledger"."blocked_by" IS NULL;--> statement-breakpoint
CREATE INDEX IF NOT EXISTS "idx_usage_ledger_key_cost" ON "usage_ledger" USING btree ("key","created_at","cost_usd") WHERE "usage_ledger"."blocked_by" IS NULL;
13 changes: 13 additions & 0 deletions drizzle/0076_mighty_lionheart.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
ALTER TYPE "public"."notification_type" ADD VALUE 'cache_hit_rate_alert';--> statement-breakpoint
ALTER TABLE "notification_settings" ADD COLUMN "cache_hit_rate_alert_enabled" boolean DEFAULT false NOT NULL;--> statement-breakpoint
ALTER TABLE "notification_settings" ADD COLUMN "cache_hit_rate_alert_webhook" varchar(512);--> statement-breakpoint
ALTER TABLE "notification_settings" ADD COLUMN "cache_hit_rate_alert_window_mode" varchar(10) DEFAULT 'auto';--> statement-breakpoint
ALTER TABLE "notification_settings" ADD COLUMN "cache_hit_rate_alert_check_interval" integer DEFAULT 5;--> statement-breakpoint
ALTER TABLE "notification_settings" ADD COLUMN "cache_hit_rate_alert_historical_lookback_days" integer DEFAULT 7;--> statement-breakpoint
ALTER TABLE "notification_settings" ADD COLUMN "cache_hit_rate_alert_min_eligible_requests" integer DEFAULT 20;--> statement-breakpoint
ALTER TABLE "notification_settings" ADD COLUMN "cache_hit_rate_alert_min_eligible_tokens" integer DEFAULT 0;--> statement-breakpoint
ALTER TABLE "notification_settings" ADD COLUMN "cache_hit_rate_alert_abs_min" numeric(5, 4) DEFAULT '0.05';--> statement-breakpoint
ALTER TABLE "notification_settings" ADD COLUMN "cache_hit_rate_alert_drop_rel" numeric(5, 4) DEFAULT '0.3';--> statement-breakpoint
ALTER TABLE "notification_settings" ADD COLUMN "cache_hit_rate_alert_drop_abs" numeric(5, 4) DEFAULT '0.1';--> statement-breakpoint
ALTER TABLE "notification_settings" ADD COLUMN "cache_hit_rate_alert_cooldown_minutes" integer DEFAULT 30;--> statement-breakpoint
ALTER TABLE "notification_settings" ADD COLUMN "cache_hit_rate_alert_top_n" integer DEFAULT 10;
3 changes: 3 additions & 0 deletions drizzle/0077_nappy_giant_man.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
ALTER TABLE "providers" ADD COLUMN "active_time_start" varchar(5);--> statement-breakpoint
ALTER TABLE "providers" ADD COLUMN "active_time_end" varchar(5);--> statement-breakpoint
ALTER TABLE "providers" DROP COLUMN "join_claude_pool";
Loading
Loading