diff --git a/community/log_analysis_multi_agent_rag/binary_score_models.py b/community/log_analysis_multi_agent_rag/binary_score_models.py index d894541a5..f6227e29a 100644 --- a/community/log_analysis_multi_agent_rag/binary_score_models.py +++ b/community/log_analysis_multi_agent_rag/binary_score_models.py @@ -1,4 +1,4 @@ -from langchain_core.pydantic_v1 import BaseModel,Field +from pydantic import BaseModel, Field # Data models class GradeDocuments(BaseModel): """Binary score for relevance check on retrieved documents.""" diff --git a/community/log_analysis_multi_agent_rag/graphedges.py b/community/log_analysis_multi_agent_rag/graphedges.py index c56a2d276..859a6c864 100644 --- a/community/log_analysis_multi_agent_rag/graphedges.py +++ b/community/log_analysis_multi_agent_rag/graphedges.py @@ -40,8 +40,8 @@ def grade_generation_vs_documents_and_question(state): print("GRADE GENERATED vs QUESTION") try: - score_text = automation.answer_grader.invoke({"question": question, "generation": generation}) - if "yes" in score_text.lower(): + score = automation.answer_grader.invoke({"question": question, "generation": generation}) + if score and score.get("binary_score") == "yes": print("DECISION: GENERATION ADDRESSES QUESTION") return "useful" else: diff --git a/community/log_analysis_multi_agent_rag/graphnodes.py b/community/log_analysis_multi_agent_rag/graphnodes.py index 9b257d44f..96d2dec75 100644 --- a/community/log_analysis_multi_agent_rag/graphnodes.py +++ b/community/log_analysis_multi_agent_rag/graphnodes.py @@ -50,7 +50,7 @@ def grade_documents(state): score = automation.retrieval_grader.invoke( {"question": question, "document": doc.page_content} ) - grade = score.binary_score + grade = score.get("binary_score") if score else "no" if grade == "yes": print("---GRADE: DOCUMENT RELEVANT---") filtered_docs.append(doc) diff --git a/community/log_analysis_multi_agent_rag/utils.py b/community/log_analysis_multi_agent_rag/utils.py index 6a787e55e..a23398bcf 100644 --- a/community/log_analysis_multi_agent_rag/utils.py +++ b/community/log_analysis_multi_agent_rag/utils.py @@ -1,12 +1,20 @@ from langchain_nvidia_ai_endpoints import ChatNVIDIA from langchain_core.prompts import ChatPromptTemplate -from langchain_core.output_parsers import StrOutputParser +from langchain_core.output_parsers import StrOutputParser, JsonOutputParser from binary_score_models import GradeAnswer,GradeDocuments,GradeHallucinations import os from dotenv import load_dotenv load_dotenv() +import re import json +def clean_text(text): + # Remove blocks (including content) + text = re.sub(r'.*?', '', text, flags=re.DOTALL) + # Remove any standalone tags just in case + text = text.replace('', '').replace('', '') + return text + class Nodeoutputs: def __init__(self, api_key, model, prompts_file): os.environ["NVIDIA_API_KEY"] = api_key @@ -41,7 +49,7 @@ def setup_prompts(self): ("human", self.prompts["grade_human"]), ] ) - self.retrieval_grader = grade_prompt | self.llm.with_structured_output(GradeDocuments) + self.retrieval_grader = grade_prompt | self.llm | StrOutputParser() | clean_text | JsonOutputParser() hallucination_prompt = ChatPromptTemplate.from_messages( [ @@ -49,7 +57,7 @@ def setup_prompts(self): ("human", self.prompts["hallucination_human"]), ] ) - self.hallucination_grader = hallucination_prompt | self.llm.with_structured_output(GradeHallucinations) + self.hallucination_grader = hallucination_prompt | self.llm | StrOutputParser() | clean_text | JsonOutputParser() answer_prompt = ChatPromptTemplate.from_messages( [ @@ -57,7 +65,7 @@ def setup_prompts(self): ("human", self.prompts["answer_human"]), ] ) - self.answer_grader = answer_prompt | self.llm.with_structured_output(GradeAnswer) + self.answer_grader = answer_prompt | self.llm | StrOutputParser() | clean_text | JsonOutputParser() def format_docs(self, docs): return "\n\n".join(doc.page_content for doc in docs)