/ src / graph / nodes.py
nodes.py
 1  from langchain_core.documents import Document
 2  # from src.embeddings import get_embedding_model
 3  # from langchain_chroma.vectorstores import Chroma
 4  
 5  from langchain_core.tools import tool
 6  from langgraph.prebuilt import ToolNode
 7  from langgraph.graph.message import Messages
 8  from langchain_core.messages.base import BaseMessage
 9  from langchain_core.messages import HumanMessage, SystemMessage, ToolMessage
10  from langchain_core.language_models.chat_models import BaseChatModel
11  
12  from src.graph.state import ChatState 
13  from src.graph.appendix import initiate_llm
14  from src.vector_store.retrieval import get_context
15  
16  
17  llm: BaseChatModel = initiate_llm()
18  
19  
20  @tool(response_format="content_and_artifact")
21  def make_retrieval_node(state: ChatState, question: str, nickname: str = "goldman") -> tuple[str, list[Document]]:
22      """
23  
24      Args:
25          state: 
26  
27      Returns:
28          
29      """
30      retrieved_docs: list[Document] = get_context(nickname=nickname, question=question) 
31  
32      serialized = "\n\n".join(
33          (f"Source: {doc.metadata}\n" f"Content: {doc.page_content}")
34          for doc in retrieved_docs
35      )
36  
37      state["question"] = question
38      state["context"] = retrieved_docs 
39      return serialized, retrieved_docs
40  
41  
42  tools = ToolNode([make_retrieval_node])
43  
44  
45  def query_or_response_node(state: ChatState) -> dict[str, list[BaseMessage]]:
46  
47      llm_with_tools = llm.bind_tools([make_retrieval_node])
48  
49      messages_so_far: list[Messages] = state["messages"]
50      human_messages = next(m for m in reversed(messages_so_far) if isinstance(m, HumanMessage))
51      response = llm_with_tools.invoke(human_messages.content)
52  
53      return {
54          "messages": [response]
55      }
56  
57  
58  def generate(state: ChatState) -> dict[str, list[BaseMessage]]: 
59  
60      recent_tool_messages = [] 
61  
62      for message in reversed(state["messages"]):
63          if message.type == "tool":
64              recent_tool_messages.append(message)
65  
66      tool_messages: list[ToolMessage] = recent_tool_messages[::-1]
67      context : str = "\n\n".join(  doc.content for doc in tool_messages  )
68  
69      system_message_content = (
70          """You are a helpful chatbot whose job is to answer questions based on the context given to you.
71          If the user greets you, respond in kind
72  
73          Using the information contained in the context, answer the user's question. Respond only to the question asked, but try to make the response as 
74          detailed as you can, while staying within the bounds of the context provided. If the answer cannot be deduced from the context, say that you do 
75          not know. Where you make reference to specific statements from the context, quote those statements first. Try to avoid repetition. Here's the 
76          context below."""
77          "\n\n"
78          f"{context}"
79      )
80  
81      conversation_messages: list[Messages] = [  message for message in state["messages"]  ]
82  
83      prompt = [ SystemMessage(content=system_message_content) ] + conversation_messages
84      response: BaseMessage = llm.invoke(prompt)
85  
86      return {
87          "message": [response]
88      }
89