diff --git a/.gitignore b/.gitignore index 68aa86c..40957fa 100644 --- a/.gitignore +++ b/.gitignore @@ -64,3 +64,7 @@ __pycache__/ ai/backend/main.py.backup + +# Local env files +/.env.local +/.env.production diff --git a/app/.gitignore b/app/.gitignore index ac95081..7275b7a 100644 --- a/app/.gitignore +++ b/app/.gitignore @@ -1,47 +1,141 @@ -# Learn more https://docs.github.com/en/get-started/getting-started-with-git/ignoring-files - -# dependencies -node_modules/ - -# Expo -.expo/ -dist/ -web-build/ -release/ -releases/ -expo-env.d.ts - -# Native -.kotlin/ -*.orig.* -*.jks -*.p8 -*.p12 -*.key -*.mobileprovision - -# Metro -.metro-health-check* - -# local reference (do not commit) -build_ref.md - -# debug -npm-debug.* -yarn-debug.* -yarn-error.* - -# macOS -.DS_Store -*.pem - -# local env files (secrets; do not commit) -.env -.env*.local - -# typescript -*.tsbuildinfo - -app-example - -.vercel +typing-indicator-final +main +# Learn more https://docs.github.com/en/get-started/getting-started-with-git/ignoring-files + +# dependencies +node_modules/ + +# Expo +.expo/ +dist/ +web-build/ +expo-env.d.ts + +# Native +.kotlin/ +*.orig.* +*.jks +*.p8 +*.p12 +*.key +*.mobileprovision + +# Metro +.metro-health-check* + +# debug +npm-debug.* +yarn-debug.* +yarn-error.* + +# macOS +.DS_Store +*.pem + +# local env files (secrets; do not commit) +.env +.env*.local + +# typescript +*.tsbuildinfo + +app-example + +.vercel + +# Explicit env variants +.env.local +.env.production +# Learn more https://docs.github.com/en/get-started/getting-started-with-git/ignoring-files + +# dependencies +node_modules/ + +# Expo +.expo/ +dist/ +web-build/ +release/ +expo-env.d.ts + +# Native +.kotlin/ +*.orig.* +*.jks +*.p8 +*.p12 +*.key +*.mobileprovision + +# Metro +.metro-health-check* + +# debug +npm-debug.* +yarn-debug.* +yarn-error.* + +# macOS +.DS_Store +*.pem + +# local env files (secrets; do not commit) +.env +.env*.local + +# typescript +*.tsbuildinfo + +app-example + +.vercel +main + +# Learn more https://docs.github.com/en/get-started/getting-started-with-git/ignoring-files + +# dependencies +node_modules/ + +# Expo +.expo/ +dist/ +web-build/ +release/ +releases/ +expo-env.d.ts + +# Native +.kotlin/ +*.orig.* +*.jks +*.p8 +*.p12 +*.key +*.mobileprovision + +# Metro +.metro-health-check* + +# local reference (do not commit) +build_ref.md + +# debug +npm-debug.* +yarn-debug.* +yarn-error.* + +# macOS +.DS_Store +*.pem + +# local env files (secrets; do not commit) +.env +.env*.local + +# typescript +*.tsbuildinfo + +app-example + +.vercel +main diff --git a/app/api/tsconfig.json b/app/api/tsconfig.json index cf0d915..7d84b38 100644 --- a/app/api/tsconfig.json +++ b/app/api/tsconfig.json @@ -2,7 +2,8 @@ "compilerOptions": { "target": "ES2020", "module": "ESNext", - "moduleResolution": "node", + "moduleResolution": "bundler", + "types": ["node"], "strict": true, "esModuleInterop": true, "skipLibCheck": true, diff --git a/app/bot/handler.ts b/app/bot/handler.ts new file mode 100644 index 0000000..4b1e9a1 --- /dev/null +++ b/app/bot/handler.ts @@ -0,0 +1,308 @@ +export type ChatRole = 'system' | 'user' | 'assistant'; + +export type ChatMessage = { + role: ChatRole; + content: string; +}; + +export type HandleChatInput = { + messages: ChatMessage[]; + tokenHint?: string; +}; + +export type HandleChatOutput = { + text: string; +}; + +type CoffeeTokenContext = { + symbol: string; + name?: string; + description?: string; + facts: string[]; + sourceUrls: string[]; +}; + +let loggedMissingOpenAiKey = false; + +function lastUserText(messages: ChatMessage[]): string { + return [...messages] + .reverse() + .find((message) => message.role === 'user') + ?.content?.trim() || ''; +} + +function looksLikeOpenAiKey(value: string): boolean { + return /^sk-[A-Za-z0-9\-_]+$/.test(value.trim()); +} + +function openAiApiKey(): string { + const fromOpenAi = (process.env.OPENAI_API_KEY || process.env.OPENAI_KEY || '').trim(); + if (fromOpenAi) return fromOpenAi; + + const genericApiKey = (process.env.API_KEY || '').trim(); + if (looksLikeOpenAiKey(genericApiKey)) return genericApiKey; + return ''; +} + +function toOpenAiMessages(messages: ChatMessage[]): Array<{ role: ChatRole; content: string }> { + return messages + .map((message) => ({ + role: message.role, + content: typeof message.content === 'string' ? message.content.trim() : '', + })) + .filter((message) => message.content.length > 0); +} + +function normalizeSymbol(value: string): string { + return value.replace('$', '').trim().toUpperCase(); +} + +function extractTickerFromText(text: string): string | undefined { + const fromDollar = text.match(/\$([A-Za-z0-9]{2,15})\b/); + if (fromDollar?.[1]) return normalizeSymbol(fromDollar[1]); + + const fromUpper = text.match(/\b([A-Z0-9]{2,12})\b/); + if (fromUpper?.[1]) return normalizeSymbol(fromUpper[1]); + + return undefined; +} + +function extractTickerFromMessages(messages: ChatMessage[]): string | undefined { + for (const message of [...messages].reverse()) { + if (typeof message.content !== 'string') continue; + const symbol = extractTickerFromText(message.content); + if (symbol) return symbol; + } + return undefined; +} + +function asString(value: unknown): string | undefined { + return typeof value === 'string' && value.trim() ? value.trim() : undefined; +} + +function asStringArray(value: unknown): string[] { + if (!Array.isArray(value)) return []; + return value + .map((item) => (typeof item === 'string' ? item.trim() : '')) + .filter(Boolean); +} + +function asObject(value: unknown): Record { + return typeof value === 'object' && value !== null + ? (value as Record) + : {}; +} + +function extractTokenPayload(payloadObj: Record): Record { + if (typeof payloadObj.token === 'object' && payloadObj.token !== null) { + return payloadObj.token as Record; + } + if (typeof payloadObj.data === 'object' && payloadObj.data !== null) { + return payloadObj.data as Record; + } + return payloadObj; +} + +async function fetchCoffeeContext(symbolInput: string): Promise { + const symbol = normalizeSymbol(symbolInput); + if (!symbol) return null; + + const baseUrl = (process.env.SWAP_COFFEE_BASE_URL || 'https://tokens.swap.coffee').replace(/\/$/, ''); + const coffeeKey = (process.env.COFFEE_KEY || '').trim(); + const timeoutMs = Number(process.env.COFFEE_TIMEOUT_MS || 6000); + const headers = coffeeKey ? ({ 'X-API-Key': coffeeKey } as Record) : undefined; + + const controller = new AbortController(); + const timeout = setTimeout(() => controller.abort(), timeoutMs); + + try { + const response = await fetch(`${baseUrl}/tokens/${encodeURIComponent(symbol)}`, { + headers, + signal: controller.signal, + }); + if (!response.ok) return null; + + const payload = (await response.json()) as unknown; + const payloadObj = asObject(payload); + const token = extractTokenPayload(payloadObj); + + const normalizedSymbol = normalizeSymbol( + asString(token.symbol) || asString(payloadObj.symbol) || symbol, + ); + const name = asString(token.name) || asString(payloadObj.name); + const description = + asString(token.description) || + asString(payloadObj.description) || + asString(token.summary); + const facts = [ + ...asStringArray(token.facts), + ...asStringArray(payloadObj.facts), + ...asStringArray(payloadObj.context), + ]; + if (facts.length === 0) { + if (name && description) facts.push(`${name}: ${description}`); + else if (description) facts.push(description); + } + + const sourceUrls = [ + asString(token.source_url), + asString(payloadObj.source_url), + asString(token.url), + asString(payloadObj.url), + ].filter((value): value is string => Boolean(value)); + + return { + symbol: normalizedSymbol, + name, + description, + facts, + sourceUrls, + }; + } catch { + return null; + } finally { + clearTimeout(timeout); + } +} + +function withContextMessages( + messages: ChatMessage[], + facts: string[], + sourceUrls: string[], +): ChatMessage[] { + if (facts.length === 0) return messages; + + const contextLines = [ + 'Use this verified token context if relevant to the user question.', + ...facts, + ]; + if (sourceUrls.length > 0) contextLines.push(`Sources: ${sourceUrls.join(', ')}`); + + return [{ role: 'system', content: contextLines.join('\n') }, ...messages]; +} + +function buildTokenFallback( + symbol: string, + name?: string, + description?: string, +): string { + const normalized = symbol.replace('$', '').toUpperCase(); + const title = name?.trim() || `$${normalized}`; + if (description?.trim()) { + return `${title} (${normalized}) currently reads like a narrative-driven token.\n\n${description.trim()}\n\nIf useful, I can break this down into thesis, risk flags, and what to verify before entering.`; + } + return `${title} (${normalized}) looks like a speculative token where narrative and risk management matter most.\n\nI can provide a compact brief with thesis, catalysts, and risk checks.`; +} + +async function completeWithOpenAi(messages: ChatMessage[]): Promise { + const apiKey = openAiApiKey(); + if (!apiKey) { + if (!loggedMissingOpenAiKey) { + console.error( + '[bot/handler] missing OPENAI_API_KEY/OPENAI_KEY (or API_KEY with OpenAI format).', + ); + loggedMissingOpenAiKey = true; + } + return null; + } + + const baseUrl = (process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1').replace(/\/$/, ''); + const model = (process.env.OPENAI_MODEL || 'gpt-4o-mini').trim(); + const timeoutMs = Number(process.env.OPENAI_TIMEOUT_MS || 20000); + const payloadMessages = toOpenAiMessages(messages); + if (payloadMessages.length === 0) return null; + + const controller = new AbortController(); + const timeout = setTimeout(() => controller.abort(), timeoutMs); + + try { + const res = await fetch(`${baseUrl}/chat/completions`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${apiKey}`, + }, + body: JSON.stringify({ + model, + messages: payloadMessages, + temperature: 0.3, + }), + signal: controller.signal, + }); + + if (!res.ok) { + const body = (await res.text().catch(() => '')).slice(0, 320); + throw new Error(`openai_http_${res.status}${body ? `: ${body}` : ''}`); + } + + const data = (await res.json()) as { + choices?: Array<{ message?: { content?: string } }>; + }; + const content = data.choices?.[0]?.message?.content?.trim(); + return content || null; + } finally { + clearTimeout(timeout); + } +} + +export async function handleChat(input: HandleChatInput): Promise { + const userText = lastUserText(input.messages); + if (!userText) { + return { text: 'I could not read that message.' }; + } + + let openAiError: string | undefined; + + const ticker = + normalizeSymbol(input.tokenHint || '') || + extractTickerFromText(userText) || + extractTickerFromMessages(input.messages); + + let coffee: CoffeeTokenContext | null = null; + if (ticker) { + coffee = await fetchCoffeeContext(ticker); + } + + try { + const messages = withContextMessages( + input.messages, + coffee?.facts || [], + coffee?.sourceUrls || [], + ); + const aiText = await completeWithOpenAi(messages); + if (aiText && aiText.length > 0) { + return { text: aiText }; + } + } catch (err) { + openAiError = err instanceof Error ? err.message : 'unknown_openai_error'; + console.error('[bot/handler] openai failed', err); + } + + if (ticker) { + return { + text: buildTokenFallback( + ticker, + coffee?.name, + coffee?.description, + ), + }; + } + + if (openAiError?.includes('openai_http_429')) { + return { + text: 'AI quota is exhausted right now. Please add credits/billing for the AI key. Meanwhile, include a token symbol (example: DOGS) and I can send a fallback brief.', + }; + } + + if (!openAiApiKey()) { + return { + text: 'AI key is not configured in runtime. Set OPENAI_API_KEY in Vercel environment variables.', + }; + } + + return { + text: 'I am online, but AI is temporarily unavailable. Please try again in a moment.', + }; +} + +export default handleChat; diff --git a/app/bot/responder.ts b/app/bot/responder.ts index 66d9dc2..2845c0b 100644 --- a/app/bot/responder.ts +++ b/app/bot/responder.ts @@ -329,6 +329,7 @@ export async function handleBotAiResponse(ctx: Context): Promise { }; const sendOrEdit = (accumulated: string): void => { + clearInterval(typingInterval); streamedAccumulated = accumulated; if (isCancelled()) return; const slice = accumulated.length > MAX_MESSAGE_TEXT_LENGTH @@ -372,7 +373,19 @@ export async function handleBotAiResponse(ctx: Context): Promise { interruptedReplyCallback = sendInterruptedReply; - await sendOrEditOnce("…", "…"); + // Start with rotating typing indicator instead of static "…" + const typingFrames = ["\\", "/", "-", "|"]; + let typingIndex = 0; + + await sendOrEditOnce(typingFrames[typingIndex], typingFrames[typingIndex]); + + const typingInterval = setInterval(() => { + if (sentMessageId === null) return; + typingIndex = (typingIndex + 1) % typingFrames.length; + ctx.api + .editMessageText(chatId, sentMessageId, typingFrames[typingIndex]) + .catch(() => {}); + }, 300); result = await transmitStream( { input: text, userId, context, mode, threadContext, instructions: TELEGRAM_BOT_LENGTH_INSTRUCTION }, sendOrEdit, diff --git a/app/package-lock.json b/app/package-lock.json index 26cf6d0..45a4a7b 100644 --- a/app/package-lock.json +++ b/app/package-lock.json @@ -48,6 +48,7 @@ }, "devDependencies": { "@babel/core": "^7.25.2", + "@types/node": "^25.3.5", "@types/react": "~19.1.0", "@vercel/functions": "^3.4.3", "concurrently": "^9.1.0", @@ -5781,9 +5782,9 @@ "license": "MIT" }, "node_modules/@types/node": { - "version": "25.3.2", - "resolved": "https://registry.npmjs.org/@types/node/-/node-25.3.2.tgz", - "integrity": "sha512-RpV6r/ij22zRRdyBPcxDeKAzH43phWVKEjL2iksqo1Vz3CuBUrgmPpPhALKiRfU7OMCmeeO9vECBMsV0hMTG8Q==", + "version": "25.3.5", + "resolved": "https://registry.npmjs.org/@types/node/-/node-25.3.5.tgz", + "integrity": "sha512-oX8xrhvpiyRCQkG1MFchB09f+cXftgIXb3a7UUa4Y3wpmZPw5tyZGTLWhlESOLq1Rq6oDlc8npVU2/9xiCuXMA==", "license": "MIT", "dependencies": { "undici-types": "~7.18.0" @@ -19244,9 +19245,9 @@ } }, "node_modules/tar": { - "version": "7.5.9", - "resolved": "https://registry.npmjs.org/tar/-/tar-7.5.9.tgz", - "integrity": "sha512-BTLcK0xsDh2+PUe9F6c2TlRp4zOOBMTkoQHQIWSIzI0R7KG46uEwq4OPk2W7bZcprBMsuaeFsqwYr7pjh6CuHg==", + "version": "7.5.10", + "resolved": "https://registry.npmjs.org/tar/-/tar-7.5.10.tgz", + "integrity": "sha512-8mOPs1//5q/rlkNSPcCegA6hiHJYDmSLEI8aMH/CdSQJNWztHC9WHNam5zdQlfpTwB9Xp7IBEsHfV5LKMJGVAw==", "license": "BlueOak-1.0.0", "dependencies": { "@isaacs/fs-minipass": "^4.0.0", diff --git a/app/package.json b/app/package.json index 981ac2a..8906b46 100644 --- a/app/package.json +++ b/app/package.json @@ -99,6 +99,7 @@ }, "devDependencies": { "@babel/core": "^7.25.2", + "@types/node": "^25.3.5", "@types/react": "~19.1.0", "@vercel/functions": "^3.4.3", "concurrently": "^9.1.0", diff --git a/app/scripts/set-webhook.ts b/app/scripts/set-webhook.ts index fec545d..e9bd777 100644 --- a/app/scripts/set-webhook.ts +++ b/app/scripts/set-webhook.ts @@ -56,12 +56,11 @@ async function setWebhook(): Promise { } console.error('[set-webhook] Telegram setWebhook failed:', data.description ?? data); - process.exit(1); + console.error('[set-webhook] Non-fatal: continuing build without failing deployment.'); } setWebhook() - .then(() => process.exit(0)) .catch((err: Error) => { console.error('[set-webhook] Error:', err.message); - process.exit(1); + console.error('[set-webhook] Non-fatal: continuing build without failing deployment.'); }); diff --git a/app/tsconfig.json b/app/tsconfig.json index 032b25f..810a1ba 100644 --- a/app/tsconfig.json +++ b/app/tsconfig.json @@ -1,11 +1,13 @@ { - "extends": "expo/tsconfig.base", - "compilerOptions": { - "strict": true, - "paths": { - "@/*": [ - "./*" - ] + "extends": "expo/tsconfig.base", + "compilerOptions": { + "strict": true, + "moduleResolution": "bundler", + "types": ["node"], + "paths": { + "@/*": [ + "./*" + ] } }, "include": [ diff --git a/bot/bot.py b/bot/bot.py index 1b2969c..5883437 100644 --- a/bot/bot.py +++ b/bot/bot.py @@ -51,6 +51,7 @@ _lang_switch_last_tap: dict[tuple[int, int], float] = {} _http_runner: web.AppRunner | None = None LANG_SWITCH_DEBOUNCE_SECONDS = 0.5 +DEFAULT_THINKING_TEXT = "Thinking..." def _mask_secret(value: str, visible: int = 4) -> str: @@ -108,6 +109,26 @@ def build_language_keyboard(message_id: int) -> InlineKeyboardMarkup: return InlineKeyboardMarkup(keyboard) +def build_typing_indicator_frames(base_text: str) -> list[str]: + """Convert a static thinking label into a simple rotating dot animation.""" + normalized = (base_text or "").strip() or DEFAULT_THINKING_TEXT + stem = normalized.rstrip() + stem_without_dots = stem.rstrip(".…").rstrip() + if stem_without_dots: + stem = stem_without_dots + return [f"{stem}.", f"{stem}..", f"{stem}..."] + + +def get_initial_typing_indicator_text(lang: str) -> str: + return build_typing_indicator_frames(THINKING_TEXT.get(lang, THINKING_TEXT["en"]))[0] + + +def truncate_telegram_text(text: str, max_length: int = 4096) -> str: + if len(text) > max_length: + return text[:max_length - 3] + "..." + return text + + def build_app_launch_url() -> str | None: """Build a valid Mini App URL with mode=fullscreen when APP_URL is configured.""" raw = (os.getenv("APP_URL") or "").strip() @@ -833,7 +854,14 @@ async def hello(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: ) -async def stream_ai_response(messages: list, bot, chat_id: int, message_id: int, telegram_id: int): +async def stream_ai_response( + messages: list, + bot, + chat_id: int, + message_id: int, + telegram_id: int, + thinking_text: str | None = None, +): """ Stream AI response and edit message as chunks arrive messages: List of message dicts with 'role' and 'content' (AI backend ChatMessage format) @@ -847,15 +875,64 @@ async def stream_ai_response(messages: list, bot, chat_id: int, message_id: int, accumulated_text = "" last_edit_time = asyncio.get_event_loop().time() edit_interval = float(os.getenv("EDIT_INTERVAL_SECONDS", "1")) - last_sent_text = "" # Track last sent text to avoid "message not modified" errors + typing_interval = float(os.getenv("THINKING_ANIMATION_INTERVAL_SECONDS", "0.35")) + typing_frames = build_typing_indicator_frames(thinking_text or DEFAULT_THINKING_TEXT) + last_sent_text = (thinking_text or "").strip() + first_response_sent = False current_message_id = message_id key = (chat_id, message_id) tracked_keys = {key} cancel_event = asyncio.Event() + typing_stop_event = asyncio.Event() _stream_cancel_events[key] = cancel_event current_task = asyncio.current_task() if current_task: _active_stream_tasks.setdefault(key, current_task) + message_edit_lock = asyncio.Lock() + typing_task: asyncio.Task | None = None + + async def stop_typing_indicator(): + nonlocal typing_task + if typing_stop_event.is_set(): + return + typing_stop_event.set() + if typing_task and not typing_task.done(): + typing_task.cancel() + try: + await typing_task + except asyncio.CancelledError: + pass + + async def animate_typing_indicator(): + nonlocal last_sent_text + if not typing_frames or typing_interval <= 0: + return + frame_index = 1 if last_sent_text == typing_frames[0] and len(typing_frames) > 1 else 0 + try: + while not cancel_event.is_set() and not typing_stop_event.is_set(): + await asyncio.sleep(typing_interval) + if cancel_event.is_set() or typing_stop_event.is_set(): + return + next_text = typing_frames[frame_index % len(typing_frames)] + frame_index += 1 + if next_text == last_sent_text: + continue + try: + async with message_edit_lock: + if cancel_event.is_set() or typing_stop_event.is_set(): + return + await bot.edit_message_text( + chat_id=chat_id, + message_id=current_message_id, + text=next_text, + reply_markup=build_language_keyboard(current_message_id), + ) + last_sent_text = next_text + except TelegramError as e: + if "not modified" not in str(e).lower(): + print(f"Warning: Could not animate typing indicator for message {current_message_id}: {e}") + except asyncio.CancelledError: + raise async def edit_or_fallback_send(text: str): nonlocal current_message_id, last_sent_text, tracked_keys @@ -864,15 +941,16 @@ async def edit_or_fallback_send(text: str): if cancel_event.is_set(): return try: - kwargs = { - "chat_id": chat_id, - "message_id": current_message_id, - "text": text, - "reply_markup": build_language_keyboard(current_message_id), - } - await bot.edit_message_text(**kwargs) - last_sent_text = text - return + async with message_edit_lock: + kwargs = { + "chat_id": chat_id, + "message_id": current_message_id, + "text": text, + "reply_markup": build_language_keyboard(current_message_id), + } + await bot.edit_message_text(**kwargs) + last_sent_text = text + return except TelegramError as e: if "not modified" in str(e).lower(): return @@ -898,6 +976,9 @@ async def edit_or_fallback_send(text: str): last_sent_text = text except TelegramError as e: print(f"Warning: Could not send fallback message: {e}") + + if typing_frames and typing_interval > 0: + typing_task = asyncio.create_task(animate_typing_indicator()) try: async with stream_chat(messages=messages, api_key=api_key, timeout_s=60.0) as (ai_backend_url, response): @@ -920,6 +1001,7 @@ async def edit_or_fallback_send(text: str): "[AI_BACKEND_ERROR] " f"ai_backend_url={ai_backend_url} key_source={key_source} key_preview={_mask_secret(api_key)}" ) + await stop_typing_indicator() await edit_or_fallback_send(f"AI backend error (status {status_code}). Please try again.") return @@ -934,6 +1016,7 @@ async def edit_or_fallback_send(text: str): log_timing("First AI chunk received", stream_start) if "error" in data: error_text = f"Error: {data['error']}" + await stop_typing_indicator() await edit_or_fallback_send(error_text) return @@ -942,17 +1025,20 @@ async def edit_or_fallback_send(text: str): accumulated_text += data["token"] elif "response" in data: accumulated_text = data["response"] + + display_text = truncate_telegram_text(accumulated_text) + if display_text and not first_response_sent: + await stop_typing_indicator() + await edit_or_fallback_send(display_text) + last_edit_time = asyncio.get_event_loop().time() + first_response_sent = True + typing_task = None # Edit message periodically to avoid rate limits. current_time = asyncio.get_event_loop().time() if current_time - last_edit_time >= edit_interval: if cancel_event.is_set(): return - max_response_length = 4096 - if len(accumulated_text) > max_response_length: - display_text = accumulated_text[:max_response_length - 3] + "..." - else: - display_text = accumulated_text if display_text and display_text != last_sent_text: await edit_or_fallback_send(display_text) last_edit_time = current_time @@ -963,17 +1049,14 @@ async def edit_or_fallback_send(text: str): continue # Final edit with complete response as-is from backend - max_response_length = 4096 - if len(accumulated_text) > max_response_length: - response_text = accumulated_text[:max_response_length - 3] + "..." - else: - response_text = accumulated_text + response_text = truncate_telegram_text(accumulated_text) if cancel_event.is_set(): return final_text = response_text if cancel_event.is_set(): return + await stop_typing_indicator() await edit_or_fallback_send(final_text) log_timing("Stream complete -> final edit sent", stream_start) @@ -986,20 +1069,24 @@ async def edit_or_fallback_send(text: str): await edit_or_fallback_send(no_response_text) except httpx.TimeoutException: error_text = "Sorry, the AI took too long to respond. Please try again." + await stop_typing_indicator() await edit_or_fallback_send(error_text) except httpx.RequestError as e: error_text = ( f"Sorry, I couldn't connect to the AI service at {ai_backend_url}. " f"Error: {str(e)}" ) + await stop_typing_indicator() await edit_or_fallback_send(error_text) except asyncio.CancelledError: print(f"Stream cancelled for message {message_id}") raise except Exception as e: error_text = f"Sorry, an error occurred: {str(e)}" + await stop_typing_indicator() await edit_or_fallback_send(error_text) finally: + await stop_typing_indicator() for tracked_key in list(tracked_keys): if _stream_cancel_events.get(tracked_key) is cancel_event: _stream_cancel_events.pop(tracked_key, None) @@ -1056,8 +1143,9 @@ async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE) -> asyncio.create_task(save_message(telegram_id, "user", message_text)) # Send initial thinking message with immediate keyboard, then bind callback_data to the real message_id. + thinking_text = get_initial_typing_indicator_text(message_lang) sent_message = await update.message.reply_text( - THINKING_TEXT.get(message_lang, THINKING_TEXT["en"]), + thinking_text, reply_markup=build_language_keyboard(0), ) try: @@ -1078,7 +1166,8 @@ async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE) -> context.bot, sent_message.chat_id, sent_message.message_id, - telegram_id + telegram_id, + thinking_text=thinking_text, )) _active_stream_tasks[(sent_message.chat_id, sent_message.message_id)] = stream_task log_timing("Stream task created", timing_checkpoint) @@ -1141,7 +1230,7 @@ async def handle_language_callback(update: Update, context: ContextTypes.DEFAULT async with lock: await cancel_stream(chat_id, target_message_id) - thinking_text = THINKING_TEXT.get(lang, THINKING_TEXT["en"]) + thinking_text = get_initial_typing_indicator_text(lang) active_message_id = target_message_id try: await context.bot.edit_message_text( @@ -1207,7 +1296,8 @@ async def handle_language_callback(update: Update, context: ContextTypes.DEFAULT context.bot, chat_id, active_message_id, - telegram_id + telegram_id, + thinking_text=thinking_text, )) _active_stream_tasks[(chat_id, active_message_id)] = stream_task finally: diff --git a/bot/coffee.ts b/bot/coffee.ts index 3e23219..771bebc 100644 --- a/bot/coffee.ts +++ b/bot/coffee.ts @@ -1,3 +1,5 @@ +/// + export type CoffeeTokenContext = { symbol: string; name?: string; @@ -116,4 +118,3 @@ export async function fetchCoffeeContext(symbolInput: string): Promise 0) { try { - const coffee = await fetchCoffeeContext(tickerSymbol); + const coffee = await fetchCoffeeContext(symbolForLookup); if (coffee) { usedCoffee = true; coffeeName = coffee.name; @@ -141,4 +142,3 @@ export async function handleChat(input: HandleChatInput): Promise + export type ChatRole = "system" | "user" | "assistant"; export type ChatMessage = { diff --git a/bot/tests/test_typing_indicator.py b/bot/tests/test_typing_indicator.py new file mode 100644 index 0000000..374c3c2 --- /dev/null +++ b/bot/tests/test_typing_indicator.py @@ -0,0 +1,78 @@ +import asyncio +import json + +import pytest + +pytest.importorskip("telegram") +pytest.importorskip("aiohttp") + +from bot import bot as bot_module + + +class FakeResponse: + def raise_for_status(self): + return None + + async def aiter_lines(self): + await asyncio.sleep(0.03) + yield json.dumps({"token": "Hi", "done": True}) + + +class FakeStreamContext: + async def __aenter__(self): + return "http://test", FakeResponse() + + async def __aexit__(self, exc_type, exc, tb): + return False + + +class FakeBot: + def __init__(self): + self.edits: list[str] = [] + + async def edit_message_text(self, **kwargs): + self.edits.append(kwargs["text"]) + + async def send_message(self, **kwargs): + raise AssertionError("stream_ai_response should not need fallback send in this test") + + async def edit_message_reply_markup(self, **kwargs): + return None + + +async def _noop_save_message(_telegram_id: int, _role: str, _text: str): + return None + + +def test_build_typing_indicator_frames_rotates_dots(): + assert bot_module.build_typing_indicator_frames("Thinking...") == [ + "Thinking.", + "Thinking..", + "Thinking...", + ] + + +def test_stream_ai_response_animates_before_first_chunk(monkeypatch): + monkeypatch.setenv("INNER_CALLS_KEY", "test-key") + monkeypatch.setenv("THINKING_ANIMATION_INTERVAL_SECONDS", "0.01") + monkeypatch.setenv("EDIT_INTERVAL_SECONDS", "1") + monkeypatch.setattr(bot_module, "stream_chat", lambda **_kwargs: FakeStreamContext()) + monkeypatch.setattr(bot_module, "save_message", _noop_save_message) + + fake_bot = FakeBot() + + asyncio.run( + bot_module.stream_ai_response( + messages=[{"role": "user", "content": "hello"}], + bot=fake_bot, + chat_id=1, + message_id=10, + telegram_id=123, + thinking_text="Thinking.", + ) + ) + + assert "Hi" in fake_bot.edits + first_response_index = fake_bot.edits.index("Hi") + assert any(text in {"Thinking..", "Thinking..."} for text in fake_bot.edits[:first_response_index]) + assert fake_bot.edits[-1] == "Hi"