/ tests / llama_index / sample_code / simple_workflow.py
simple_workflow.py
 1  from llama_index.core.workflow import (
 2      Event,
 3      StartEvent,
 4      StopEvent,
 5      Workflow,
 6      step,
 7  )
 8  from llama_index.llms.openai import OpenAI
 9  
10  import mlflow
11  
12  
13  class JokeEvent(Event):
14      joke: str
15  
16  
17  class JokeFlow(Workflow):
18      llm = OpenAI()
19  
20      @step
21      async def generate_joke(self, ev: StartEvent) -> JokeEvent:
22          topic = ev.topic
23          prompt = f"Write your best joke about {topic}."
24          response = await self.llm.acomplete(prompt)
25          return JokeEvent(joke=str(response))
26  
27      @step
28      async def critique_joke(self, ev: JokeEvent) -> StopEvent:
29          joke = ev.joke
30  
31          prompt = f"Give a thorough analysis and critique of the following joke: {joke}"
32          response = await self.llm.acomplete(prompt)
33          return StopEvent(result=str(response))
34  
35  
36  w = JokeFlow(timeout=10, verbose=False)
37  mlflow.models.set_model(w)