From 3b4593ae20dd570c6eeab8a577622f729239c141 Mon Sep 17 00:00:00 2001 From: Prabhat Yadav Date: Sat, 7 Feb 2026 12:50:02 +0530 Subject: [PATCH 1/2] fix(backend): replace decommissioned gemma2-9b-it with llama-3.3-70b-versatile --- backend/app/modules/bias_detection/check_bias.py | 2 +- backend/app/modules/chat/llm_processing.py | 2 +- backend/app/modules/facts_check/llm_processing.py | 4 ++-- backend/app/modules/langgraph_nodes/judge.py | 2 +- backend/app/modules/langgraph_nodes/sentiment.py | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/backend/app/modules/bias_detection/check_bias.py b/backend/app/modules/bias_detection/check_bias.py index a0644529..dee0faa1 100644 --- a/backend/app/modules/bias_detection/check_bias.py +++ b/backend/app/modules/bias_detection/check_bias.py @@ -61,7 +61,7 @@ def check_bias(text): "content": (f"Give bias score to the following article \n\n{text}"), }, ], - model="gemma2-9b-it", + model="llama-3.3-70b-versatile", temperature=0.3, max_tokens=512, ) diff --git a/backend/app/modules/chat/llm_processing.py b/backend/app/modules/chat/llm_processing.py index 2d5134fa..70e8ace1 100644 --- a/backend/app/modules/chat/llm_processing.py +++ b/backend/app/modules/chat/llm_processing.py @@ -55,7 +55,7 @@ def ask_llm(question, docs): """ response = client.chat.completions.create( - model="gemma2-9b-it", + model="llama-3.3-70b-versatile", messages=[ {"role": "system", "content": "Use only the context to answer."}, {"role": "user", "content": prompt}, diff --git a/backend/app/modules/facts_check/llm_processing.py b/backend/app/modules/facts_check/llm_processing.py index dc223a85..1fe34979 100644 --- a/backend/app/modules/facts_check/llm_processing.py +++ b/backend/app/modules/facts_check/llm_processing.py @@ -63,7 +63,7 @@ def run_claim_extractor_sdk(state): ), }, ], - model="gemma2-9b-it", + model="llama-3.3-70b-versatile", temperature=0.3, max_tokens=512, ) @@ -128,7 +128,7 @@ def run_fact_verifier_sdk(search_results): ), }, ], - model="gemma2-9b-it", + model="llama-3.3-70b-versatile", temperature=0.3, max_tokens=256, ) diff --git a/backend/app/modules/langgraph_nodes/judge.py b/backend/app/modules/langgraph_nodes/judge.py index 57100301..f3eaef06 100644 --- a/backend/app/modules/langgraph_nodes/judge.py +++ b/backend/app/modules/langgraph_nodes/judge.py @@ -24,7 +24,7 @@ # Init once groq_llm = ChatGroq( - model="gemma2-9b-it", + model="llama-3.3-70b-versatile", temperature=0.0, max_tokens=10, ) diff --git a/backend/app/modules/langgraph_nodes/sentiment.py b/backend/app/modules/langgraph_nodes/sentiment.py index fef1d39d..a40c6a41 100644 --- a/backend/app/modules/langgraph_nodes/sentiment.py +++ b/backend/app/modules/langgraph_nodes/sentiment.py @@ -49,7 +49,7 @@ def run_sentiment_sdk(state): ), }, ], - model="gemma2-9b-it", + model="llama-3.3-70b-versatile", temperature=0.2, max_tokens=3, ) From f18101f149a302e97731fd329bc3ee98bcbb2a49 Mon Sep 17 00:00:00 2001 From: Prabhat Yadav Date: Sat, 7 Feb 2026 13:05:49 +0530 Subject: [PATCH 2/2] Fix: Add fallback handling for LLM JSON parsing errors --- backend/app/modules/facts_check/llm_processing.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/backend/app/modules/facts_check/llm_processing.py b/backend/app/modules/facts_check/llm_processing.py index 1fe34979..bba253c9 100644 --- a/backend/app/modules/facts_check/llm_processing.py +++ b/backend/app/modules/facts_check/llm_processing.py @@ -144,7 +144,13 @@ def run_fact_verifier_sdk(search_results): parsed = json.loads(content) except Exception as parse_err: logger.error(f"LLM JSON parse error: {parse_err}") - + parsed = { + "verdict": "Unknown", + "explanation": f"Parse error: {parse_err}", + "original_claim": claim, + "source_link": source, + } + results_list.append(parsed) return {