Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
388 changes: 388 additions & 0 deletions data/q_sql/default_0.0.1/eval_result_0.json

Large diffs are not rendered by default.

368 changes: 368 additions & 0 deletions data/q_sql/default_0.0.1/eval_result_1.json

Large diffs are not rendered by default.

398 changes: 398 additions & 0 deletions data/q_sql/default_0.0.1/eval_result_2.json

Large diffs are not rendered by default.

388 changes: 388 additions & 0 deletions data/q_sql/default_0.0.1/eval_result_3.json

Large diffs are not rendered by default.

373 changes: 373 additions & 0 deletions data/q_sql/default_0.0.1/eval_result_4.json

Large diffs are not rendered by default.

378 changes: 378 additions & 0 deletions data/q_sql/default_0.0.1/eval_result_5.json

Large diffs are not rendered by default.

477 changes: 477 additions & 0 deletions data/q_sql/refine_update_0.0.1/eval_result_0.json

Large diffs are not rendered by default.

448 changes: 448 additions & 0 deletions data/q_sql/refine_update_0.0.1/eval_result_1.json

Large diffs are not rendered by default.

472 changes: 472 additions & 0 deletions data/q_sql/refine_update_0.0.1/eval_result_2.json

Large diffs are not rendered by default.

447 changes: 447 additions & 0 deletions data/q_sql/refine_update_0.0.1/eval_result_3.json

Large diffs are not rendered by default.

456 changes: 456 additions & 0 deletions data/q_sql/refine_update_0.0.1/eval_result_4.json

Large diffs are not rendered by default.

442 changes: 442 additions & 0 deletions data/q_sql/refine_update_0.0.1/eval_result_5.json

Large diffs are not rendered by default.

51 changes: 51 additions & 0 deletions interface/lang2sql.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import streamlit as st
from langchain_core.messages import HumanMessage
from llm_utils.graph import builder
from langchain.chains.sql_database.prompt import SQL_PROMPTS

# Streamlit 앱 제목
st.title("Lang2SQL")

# 사용자 입력 받기
user_query = st.text_area(
"쿼리를 입력하세요:",
value="고객 데이터를 기반으로 유니크한 유저 수를 카운트하는 쿼리",
)

user_database_env = st.selectbox(
"db 환경정보를 입력하세요:",
options=SQL_PROMPTS.keys(),
index=0,
)


# Token usage 집계 함수 정의
def summarize_total_tokens(data):
total_tokens = 0
for item in data:
token_usage = getattr(item, "usage_metadata", {})
total_tokens += token_usage.get("total_tokens", 0)
return total_tokens


# 버튼 클릭 시 실행
if st.button("쿼리 실행"):
# 그래프 컴파일 및 쿼리 실행
graph = builder.compile()

res = graph.invoke(
input={
"messages": [HumanMessage(content=user_query)],
"user_database_env": user_database_env,
"best_practice_query": "",
}
)
total_tokens = summarize_total_tokens(res["messages"])

# 결과 출력
st.write("총 토큰 사용량:", total_tokens)
# st.write("결과:", res["generated_query"].content)
st.write("결과:", "\n\n```sql\n" + res["generated_query"] + "\n```")
st.write("결과 설명:\n\n", res["messages"][-1].content)
st.write("AI가 재해석한 사용자 질문:\n", res["refined_input"].content)
st.write("참고한 테이블 목록:", res["searched_tables"])
52 changes: 6 additions & 46 deletions interface/streamlit_app.py
Original file line number Diff line number Diff line change
@@ -1,51 +1,11 @@
import streamlit as st
from langchain_core.messages import HumanMessage
from llm_utils.graph import builder
from langchain.chains.sql_database.prompt import SQL_PROMPTS

# Streamlit 앱 제목
st.title("Lang2SQL")

# 사용자 입력 받기
user_query = st.text_area(
"쿼리를 입력하세요:",
value="고객 데이터를 기반으로 유니크한 유저 수를 카운트하는 쿼리",
pg = st.navigation(
[
st.Page("lang2sql.py", title="Lang2SQL"),
st.Page("viz_eval.py", title="Lang2SQL Evaluation 시각화"),
]
)

user_database_env = st.selectbox(
"db 환경정보를 입력하세요:",
options=SQL_PROMPTS.keys(),
index=0,
)


# Token usage 집계 함수 정의
def summarize_total_tokens(data):
total_tokens = 0
for item in data:
token_usage = getattr(item, "usage_metadata", {})
total_tokens += token_usage.get("total_tokens", 0)
return total_tokens


# 버튼 클릭 시 실행
if st.button("쿼리 실행"):
# 그래프 컴파일 및 쿼리 실행
graph = builder.compile()

res = graph.invoke(
input={
"messages": [HumanMessage(content=user_query)],
"user_database_env": user_database_env,
"best_practice_query": "",
}
)
total_tokens = summarize_total_tokens(res["messages"])

# 결과 출력
st.write("총 토큰 사용량:", total_tokens)
# st.write("결과:", res["generated_query"].content)
st.write("결과:", "\n\n```sql\n" + res["generated_query"] + "\n```")
st.write("결과 설명:\n\n", res["messages"][-1].content)
st.write("AI가 재해석한 사용자 질문:\n", res["refined_input"].content)
st.write("참고한 테이블 목록:", res["searched_tables"])
pg.run()
Loading