diff --git a/backend/app/modules/bias_detection/check_bias.py b/backend/app/modules/bias_detection/check_bias.py index a0644529..dee0faa1 100644 --- a/backend/app/modules/bias_detection/check_bias.py +++ b/backend/app/modules/bias_detection/check_bias.py @@ -61,7 +61,7 @@ def check_bias(text): "content": (f"Give bias score to the following article \n\n{text}"), }, ], - model="gemma2-9b-it", + model="llama-3.3-70b-versatile", temperature=0.3, max_tokens=512, ) diff --git a/backend/app/modules/chat/llm_processing.py b/backend/app/modules/chat/llm_processing.py index 2d5134fa..70e8ace1 100644 --- a/backend/app/modules/chat/llm_processing.py +++ b/backend/app/modules/chat/llm_processing.py @@ -55,7 +55,7 @@ def ask_llm(question, docs): """ response = client.chat.completions.create( - model="gemma2-9b-it", + model="llama-3.3-70b-versatile", messages=[ {"role": "system", "content": "Use only the context to answer."}, {"role": "user", "content": prompt}, diff --git a/backend/app/modules/facts_check/llm_processing.py b/backend/app/modules/facts_check/llm_processing.py index dc223a85..bba253c9 100644 --- a/backend/app/modules/facts_check/llm_processing.py +++ b/backend/app/modules/facts_check/llm_processing.py @@ -63,7 +63,7 @@ def run_claim_extractor_sdk(state): ), }, ], - model="gemma2-9b-it", + model="llama-3.3-70b-versatile", temperature=0.3, max_tokens=512, ) @@ -128,7 +128,7 @@ def run_fact_verifier_sdk(search_results): ), }, ], - model="gemma2-9b-it", + model="llama-3.3-70b-versatile", temperature=0.3, max_tokens=256, ) @@ -144,7 +144,13 @@ def run_fact_verifier_sdk(search_results): parsed = json.loads(content) except Exception as parse_err: logger.error(f"LLM JSON parse error: {parse_err}") - + parsed = { + "verdict": "Unknown", + "explanation": f"Parse error: {parse_err}", + "original_claim": claim, + "source_link": source, + } + results_list.append(parsed) return { diff --git a/backend/app/modules/langgraph_nodes/judge.py b/backend/app/modules/langgraph_nodes/judge.py index 57100301..f3eaef06 100644 --- a/backend/app/modules/langgraph_nodes/judge.py +++ b/backend/app/modules/langgraph_nodes/judge.py @@ -24,7 +24,7 @@ # Init once groq_llm = ChatGroq( - model="gemma2-9b-it", + model="llama-3.3-70b-versatile", temperature=0.0, max_tokens=10, ) diff --git a/backend/app/modules/langgraph_nodes/sentiment.py b/backend/app/modules/langgraph_nodes/sentiment.py index fef1d39d..a40c6a41 100644 --- a/backend/app/modules/langgraph_nodes/sentiment.py +++ b/backend/app/modules/langgraph_nodes/sentiment.py @@ -49,7 +49,7 @@ def run_sentiment_sdk(state): ), }, ], - model="gemma2-9b-it", + model="llama-3.3-70b-versatile", temperature=0.2, max_tokens=3, )