Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Empty file added components/__init__.py
Empty file.
97 changes: 97 additions & 0 deletions components/assistant.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
"""Shared assistant helpers for Streamlit pages."""
from __future__ import annotations

from typing import Any, Dict, List

import streamlit as st

from services.ai_core import call_llm, load_page_config


def _offline_assistant_reply(
question: str,
context: Dict[str, Any],
config: Dict[str, Any],
history: List[Dict[str, str]],
) -> str:
context_lines = [f"- {key}: {value}" for key, value in context.items()] if context else []
description = config.get(
"description",
"Review the provided context details and adjust your simulation inputs accordingly.",
)
history_lines: List[str] = []
if history:
history_lines.append("Recent conversation:")
for turn in history[-4:]:
speaker = "You" if turn["role"] == "user" else "Assistant"
history_lines.append(f"{speaker}: {turn['content']}")
message_parts = [
"Assistant response (offline mode)",
description,
]
if context_lines:
message_parts.append("Context snapshot:")
message_parts.extend(context_lines)
if history_lines:
message_parts.extend(history_lines)
message_parts.append(f"Echoing your question for reference: {question}")
message_parts.append("Consult the NeqSim documentation or in-app tooltips for deeper guidance.")
return "\n".join(message_parts)


def render_ai_helper(page_id: str, context: Dict[str, str]) -> None:
"""Render a consistent assistant helper based on page configuration."""

config = load_page_config(page_id)
if not config:
return

title = config.get("title", "AI helper")
description = config.get(
"description",
"Ask the assistant for guidance on configuring the simulation.",
)
history_key = f"{page_id}_assistant_history"
if history_key not in st.session_state:
st.session_state[history_key] = []
history: List[Dict[str, str]] = st.session_state[history_key]

with st.expander(title, expanded=False):
st.write(description)
if context:
st.json(context)
if history:
st.markdown("### Conversation history")
for turn in history[-6:]:
speaker = "You" if turn["role"] == "user" else "Assistant"
st.markdown(f"**{speaker}:** {turn['content']}")
question = st.text_area("Ask a question", key=f"{page_id}_assistant")
if st.button("Get assistant reply", key=f"{page_id}_assistant_button") and question:
history.append({"role": "user", "content": question})
conversation_lines = [
f"{('User' if turn['role'] == 'user' else 'Assistant')}: {turn['content']}"
for turn in history[-6:]
]
prompt_parts = [
"Use the JSON context to assist with NeqSim simulations.",
f"Context: {context}",
]
if conversation_lines:
prompt_parts.append("Prior conversation:\n" + "\n".join(conversation_lines))
prompt_parts.append(f"Current question: {question}")
prompt = "\n\n".join(prompt_parts)
system_prompt = config.get(
"system_prompt", "Provide helpful tips based on the context JSON and conversation history."
)

response = call_llm(
prompt,
system_prompt=system_prompt,
offline_fallback=lambda *_: _offline_assistant_reply(
question, context, config, history
),
)
history.append({"role": "assistant", "content": response.text})
st.session_state[history_key] = history
st.session_state[f"{page_id}_assistant"] = ""
st.markdown(response.text)
83 changes: 83 additions & 0 deletions components/chat.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
"""Reusable chat widget for conversational what-if analysis."""
from __future__ import annotations

import re
from typing import Callable, Tuple

import pandas as pd
import streamlit as st

from services.ai import summarize_flash_results

_TEMPERATURE_DIRECTIVE = re.compile(r"(-?\d+(?:\.\d+)?)\s*(?:c|°c|degc)", re.IGNORECASE)
_PRESSURE_DIRECTIVE = re.compile(r"(-?\d+(?:\.\d+)?)\s*(?:bar|bara)", re.IGNORECASE)


def _apply_adjustments(schedule: pd.DataFrame, message: str) -> pd.DataFrame:
updated = schedule.copy(deep=True)
temp_matches = list(_TEMPERATURE_DIRECTIVE.finditer(message))
pres_matches = list(_PRESSURE_DIRECTIVE.finditer(message))
if temp_matches:
target_temp = float(temp_matches[-1].group(1))
updated['Temperature (C)'] = target_temp
if pres_matches:
target_pres = float(pres_matches[-1].group(1))
updated['Pressure (bara)'] = target_pres
return updated


def render_what_if_chat(
fluid_df: pd.DataFrame,
schedule_df: pd.DataFrame,
is_plus_fluid: bool,
run_flash: Callable[[pd.DataFrame, pd.DataFrame, bool], Tuple[pd.DataFrame, float, float]],
) -> None:
"""Render a sidebar chat widget that can iterate on TP flash scenarios."""

chat_key = "what_if_chat_history"
if chat_key not in st.session_state:
st.session_state[chat_key] = []
history = st.session_state[chat_key]
if history and isinstance(history[0], tuple):
st.session_state[chat_key] = [
{"role": entry[0], "content": entry[1]} for entry in history if len(entry) >= 2
]
history = st.session_state[chat_key]

with st.sidebar.expander("What-if assistant", expanded=False):
st.markdown("Enter adjustments like 'try 40 °C at 15 bar' to explore new flashes.")
user_message = st.text_input("Ask the assistant", key="what_if_prompt")
if st.button("Send", key="what_if_send") and user_message:
history.append({"role": "user", "content": user_message})
updated_schedule = _apply_adjustments(schedule_df, user_message)
results_df, last_temp, last_pres = run_flash(fluid_df, updated_schedule, is_plus_fluid)
prior_turns = [
f"{('User' if turn['role'] == 'user' else 'Assistant')}: {turn['content']}"
for turn in history[-6:]
]
scenario_context = {
"temperature": f"{last_temp:.2f} °C" if not pd.isna(last_temp) else "n/a",
"pressure": f"{last_pres:.2f} bara" if not pd.isna(last_pres) else "n/a",
"latest_request": user_message,
}
if prior_turns:
scenario_context["recent_conversation"] = "\n".join(prior_turns)
summary = summarize_flash_results(
results_df,
scenario_context,
)
history.append(
{
"role": "assistant",
"content": summary,
"temperature": last_temp,
"pressure": last_pres,
}
)
st.session_state[chat_key] = history
st.session_state["what_if_prompt"] = ""
st.experimental_rerun()

for turn in history[-8:]:
speaker = "You" if turn["role"] == "user" else "Assistant"
st.markdown(f"**{speaker}:** {turn['content']}")
22 changes: 22 additions & 0 deletions configs/ai_pages.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
{
"tp_flash": {
"title": "TP Flash assistant",
"description": "Discuss how to prepare feed compositions and interpret TP flash outputs.",
"system_prompt": "You are an experienced thermodynamics engineer helping colleagues run NeqSim simulations."
},
"gas_hydrate": {
"title": "Hydrate assistant",
"description": "Ask about hydrate inhibitors, composition tips, or interpreting the hydrate curve.",
"system_prompt": "Provide practical gas hydrate modelling advice for offshore process engineers."
},
"lng_ageing": {
"title": "LNG ageing assistant",
"description": "Get tips on simulating LNG weathering scenarios and selecting compositions.",
"system_prompt": "Guide the user through LNG ageing simulations with a focus on safety considerations."
},
"property_generator": {
"title": "Property helper",
"description": "Ask how to compute or interpret physical properties in the generator tool.",
"system_prompt": "Support users by describing property correlations and required inputs."
}
}
28 changes: 28 additions & 0 deletions docs/ai.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
# AI-assisted workflows

This project now ships with lightweight AI scaffolding to help configure and interpret
NeqSim simulations. The helpers now include deterministic “offline mode” templates so the
Streamlit UI remains fully functional without API credentials. When a valid
`st.make_request` integration is available the helpers automatically forward prompts to the
configured LLM provider.

## Available assistants

* **TP Flash** – supports natural-language scenario planning, validation guidance, result
summaries, knowledge lookups, and a conversational what-if exploration widget.
* **Gas Hydrate, LNG Ageing, Property Generator** – each page exposes a contextual
assistant that reuses the shared helper framework defined in `components/assistant.py`.

Both the shared helper and the what-if widget maintain per-session conversation history so
follow-up prompts automatically include recent context, whether the app is online or using
offline fallbacks.

## Extending the system

1. Add metadata for the new page to `configs/ai_pages.json`.
2. Import and call `render_ai_helper` with any runtime context you want to surface.
3. If the feature requires model interaction, build a helper in `services/ai.py` or
`services/retrieval.py` and keep API access logic centralized in `services/ai_core.py`.

This structure keeps prompts, retrieval logic, and UI wiring modular so additional
Streamlit pages can opt in with only a few lines of code.
Loading