/ chat_workflow / workflows / resume_optimizer.py
resume_optimizer.py
  1  import chainlit as cl
  2  from chainlit import logger
  3  from pypdf import PdfReader
  4  from chainlit.input_widget import Select
  5  from langgraph.graph import StateGraph, END
  6  from langchain_core.messages import HumanMessage
  7  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate
  8  from langchain_core.runnables import Runnable, RunnableConfig
  9  from .base import BaseWorkflow, BaseState
 10  from ..llm import llm_factory, ModelCapability
 11  # from ..tools import BasicToolNode
 12  # from ..tools.search import get_search_tools
 13  # from ..tools.time import get_datetime_now
 14  
 15  
 16  class GraphState(BaseState):
 17      # Model name of the chatbot
 18      chat_model: str
 19  
 20      # Resume text
 21      resume_text: str
 22  
 23      # Job descriptions
 24      # job_descriptions: Sequence[str]
 25  
 26  
 27  class ResumeOptimizerWorkflow(BaseWorkflow):
 28      def __init__(self):
 29          super().__init__()
 30  
 31          # TODO: check tool availability
 32          # self.tools = [get_datetime_now] + get_search_tools()
 33          self.capabilities = {ModelCapability.TEXT_TO_TEXT}
 34  
 35      def create_graph(self) -> StateGraph:
 36          graph = StateGraph(GraphState)
 37          # Nodes
 38          graph.add_node("resume_extractor", self.resume_extractor_node)
 39          graph.add_node("chat", self.chat_node)
 40          # graph.add_node("tools", BasicToolNode(self.tools))
 41  
 42          # Edges
 43          graph.add_edge("resume_extractor", "chat")
 44          graph.add_edge("chat", END)
 45          # graph.add_conditional_edges("chat", self.tool_routing)
 46  
 47          # Entry point
 48          graph.set_conditional_entry_point(
 49              lambda state: "resume_extractor" if state["resume_text"] == "" else "chat",
 50          )
 51          return graph
 52  
 53      async def resume_extractor_node(self, state: GraphState, config: RunnableConfig) -> GraphState:
 54          files = None
 55  
 56          # Wait for the user to upload a file
 57          while files == None:
 58              files = await cl.AskFileMessage(
 59                  content="Please upload your resume PDF to begin!", accept=["application/pdf"],
 60              ).send()
 61  
 62          # Check if the file is a PDF
 63          resume_text = ""
 64          if files[0].name.endswith(".pdf"):
 65              # Read the PDF file
 66              pdf_reader = PdfReader(files[0].path)
 67  
 68              # Extract the text from the PDF
 69              for page in pdf_reader.pages:
 70                  resume_text += page.extract_text()
 71  
 72          # TODO: optimize the resume text using LLM
 73  
 74          return {
 75              "messages": [HumanMessage(content=resume_text)],
 76              "resume_text": resume_text,
 77          }
 78  
 79      async def chat_node(self, state: GraphState, config: RunnableConfig) -> GraphState:
 80  
 81          # logger.info(f"State: {state}")
 82          system_prompt = SystemMessagePromptTemplate.from_template("""
 83  You are a helpful assistant that helps users optimize their resumes for job applications. 
 84  
 85  ** Guidelines **
 86  1. Is the Resume Summary and Career Objective Clear?
 87  - Is it concise and does it highlight the candidate's professional strengths and goals?
 88  - Is it relevant to the job being applied for?
 89  2. Does the Skills Section Align with the Job Requirements?
 90  - Does the technical stack include skills valued by the employer?
 91  - Does it avoid outdated technologies and focus on core skills from the job description?
 92  3. Is Work Experience Quantified with Results?
 93  _ Are achievements presented with specific data or percentages (e.g., performance improvement or cost reduction)?
 94  _ Does it emphasize the impact of their contributions, such as faster delivery times or higher user satisfaction?
 95  4. Is the Format Clear and Well-Organized?
 96  _  Are sections and paragraphs clearly separated (e.g., using bold headings and bullet points)?
 97  _  Is the content concise and easy to read?
 98  5. Is the Project Experience Detailed and Precise?
 99  _  Does it clearly describe the project’s goal, technology stack, and the candidate's specific contributions?
100  _  Are the project outcomes presented clearly and effectively?
101  6. Does the Resume Match the Target Position?
102  _  Has the resume been adjusted for the specific job (e.g., keywords optimized for the role)?
103  _  Does it emphasize relevant experience and skills required by the role?
104  7. Is the Language Professional and Free of Errors?
105  _  Has the resume been checked for spelling and grammar mistakes?
106  _  Is the wording professional and free of unnecessary or vague descriptions?
107  8. Are Certifications and Credentials Relevant?
108  _  Does it list certifications that add value to the applied role?
109  _  Does it only include certifications relevant to the candidate's career path?
110  9. Is Open Source Contribution or Technical Work Demonstrated?
111  _  Does the resume include links to GitHub projects or other technical portfolios?
112  _  Does it describe the candidate’s role and contributions to open-source projects?
113  10. Is Contact Information Complete and Correct?
114  _  Does it include a valid phone number and email address?
115  _  Are LinkedIn or other portfolio links included?
116  
117  Based on the above guidelines, please provide a detailed and specific modification suggestions on the resume.
118  
119  """)
120  
121          prompt = ChatPromptTemplate.from_messages([
122              system_prompt,
123              MessagesPlaceholder(variable_name="messages"),
124          ])
125  
126          logger.info(f"Prompt: {prompt}")
127          llm = llm_factory.create_model(self.output_chat_model,
128                                         model=state["chat_model"])
129          chain: Runnable = prompt | llm
130          return {
131              "messages": [await chain.ainvoke({"messages": state["messages"]}, config=config)]
132          }
133  
134      def create_default_state(self) -> GraphState:
135          return {
136              "name": self.name(),
137              "messages": [],
138              "chat_model": "",
139              "resume_text": "",
140          }
141  
142      @classmethod
143      def name(cls) -> str:
144          return "Resume Optimizer"
145  
146      @property
147      def output_chat_model(self) -> str:
148          return "chat_model"
149  
150      @classmethod
151      def chat_profile(cls):
152          return cl.ChatProfile(
153              name=cls.name(),
154              markdown_description="An assistant that helps users optimize their resumes.",
155              icon="https://cdn2.iconfinder.com/data/icons/3d-resume/128/5_Experience.png",
156              starters=[
157                  cl.Starter(
158                      label="Help me analyze my resume.",
159                      message="Help me analyze my resume.",
160                      icon="https://cdn0.iconfinder.com/data/icons/3d-graphic-design-tools-1/128/Zoom_In.png",
161                  ),
162              ],
163          )
164  
165      @property
166      def chat_settings(self) -> cl.ChatSettings:
167          return cl.ChatSettings([
168              Select(
169                  id="chat_model",
170                  label="Chat Model",
171                  values=sorted(llm_factory.list_models(
172                      capabilities=self.capabilities)),
173                  initial_index=0,
174              ),
175          ])