/ examples / ollama-multiple-fn.ipynb
ollama-multiple-fn.ipynb
  1  {
  2   "cells": [
  3    {
  4     "cell_type": "markdown",
  5     "metadata": {},
  6     "source": [
  7      "## Multiple Connected Function Calling with Ollama\n",
  8      "\n",
  9      "### Requirements\n",
 10      "\n",
 11      "#### 1. Ollama\n",
 12      "\n",
 13      "Ollama installation instructions per OS (macOS, Linux, Windows) can be found on [their website](https://ollama.com/download). For Linux simply (run cell below if not installed): "
 14     ]
 15    },
 16    {
 17     "cell_type": "code",
 18     "execution_count": null,
 19     "metadata": {},
 20     "outputs": [],
 21     "source": [
 22      "!curl -fsSL https://ollama.com/install.sh | sh"
 23     ]
 24    },
 25    {
 26     "cell_type": "markdown",
 27     "metadata": {},
 28     "source": [
 29      "#### 2. Python Ollama Library\n",
 30      "\n",
 31      "For that:"
 32     ]
 33    },
 34    {
 35     "cell_type": "code",
 36     "execution_count": null,
 37     "metadata": {},
 38     "outputs": [],
 39     "source": [
 40      "%pip install ollama"
 41     ]
 42    },
 43    {
 44     "cell_type": "markdown",
 45     "metadata": {},
 46     "source": [
 47      "#### 3. Pull the model from Ollama\n",
 48      "\n",
 49      "Download the q8 quantized NousHermes-2-Pro-Mistral-7B from Ollama (uploaded by adrienbrault):"
 50     ]
 51    },
 52    {
 53     "cell_type": "code",
 54     "execution_count": null,
 55     "metadata": {},
 56     "outputs": [],
 57     "source": [
 58      "!ollama pull adrienbrault/nous-hermes2pro:Q8_0"
 59     ]
 60    },
 61    {
 62     "cell_type": "markdown",
 63     "metadata": {},
 64     "source": [
 65      "### Usage\n",
 66      "\n",
 67      "#### 1. Define Tools"
 68     ]
 69    },
 70    {
 71     "cell_type": "code",
 72     "execution_count": 1,
 73     "metadata": {},
 74     "outputs": [],
 75     "source": [
 76      "import random\n",
 77      "\n",
 78      "def get_weather_forecast(location: str) -> dict[str, str]:\n",
 79      "    \"\"\"Retrieves the weather forecast for a given location\"\"\"\n",
 80      "    # Mock values for test\n",
 81      "    return {\n",
 82      "        \"location\": location,\n",
 83      "        \"forecast\": \"sunny\",\n",
 84      "        \"temperature\": \"25°C\",\n",
 85      "    }\n",
 86      "\n",
 87      "def get_random_city() -> str:\n",
 88      "    \"\"\"Retrieves a random city from a list of cities\"\"\"\n",
 89      "    cities = [\"Groningen\", \"Enschede\", \"Amsterdam\", \"Istanbul\", \"Baghdad\", \"Rio de Janeiro\", \"Tokyo\", \"Kampala\"]\n",
 90      "    return random.choice(cities)\n",
 91      "\n",
 92      "def get_random_number() -> int:\n",
 93      "    \"\"\"Retrieves a random number\"\"\"\n",
 94      "    # Mock value for test\n",
 95      "    return 31"
 96     ]
 97    },
 98    {
 99     "cell_type": "markdown",
100     "metadata": {},
101     "source": [
102      "#### 2. Define Function Caller\n",
103      "\n",
104      "For this example in Jupyter format, I'm simply putting the functions in a list. In a python project, you can use the implementation here as an inspiration: https://github.com/AtakanTekparmak/ollama_langhcain_fn_calling/tree/main"
105     ]
106    },
107    {
108     "cell_type": "code",
109     "execution_count": 2,
110     "metadata": {},
111     "outputs": [],
112     "source": [
113      "import inspect\n",
114      "\n",
115      "class FunctionCaller:\n",
116      "    \"\"\"\n",
117      "    A class to call functions from tools.py.\n",
118      "    \"\"\"\n",
119      "\n",
120      "    def __init__(self):\n",
121      "        # Initialize the functions dictionary\n",
122      "        self.functions = {\n",
123      "            \"get_weather_forecast\": get_weather_forecast,\n",
124      "            \"get_random_city\": get_random_city,\n",
125      "            \"get_random_number\": get_random_number,\n",
126      "        }\n",
127      "        self.outputs = {}\n",
128      "\n",
129      "    def create_functions_metadata(self) -> list[dict]:\n",
130      "        \"\"\"Creates the functions metadata for the prompt. \"\"\"\n",
131      "        def format_type(p_type: str) -> str:\n",
132      "            \"\"\"Format the type of the parameter.\"\"\"\n",
133      "            # If p_type begins with \"<class\", then it is a class type\n",
134      "            if p_type.startswith(\"<class\"):\n",
135      "                # Get the class name from the type\n",
136      "                p_type = p_type.split(\"'\")[1]\n",
137      "            \n",
138      "            return p_type\n",
139      "            \n",
140      "        functions_metadata = []\n",
141      "        i = 0\n",
142      "        for name, function in self.functions.items():\n",
143      "            i += 1\n",
144      "            descriptions = function.__doc__.split(\"\\n\")\n",
145      "            print(descriptions)\n",
146      "            functions_metadata.append({\n",
147      "                \"name\": name,\n",
148      "                \"description\": descriptions[0],\n",
149      "                \"parameters\": {\n",
150      "                    \"properties\": [ # Get the parameters for the function\n",
151      "                        {   \n",
152      "                            \"name\": param_name,\n",
153      "                            \"type\": format_type(str(param_type)),\n",
154      "                        }\n",
155      "                        # Remove the return type from the parameters\n",
156      "                        for param_name, param_type in function.__annotations__.items() if param_name != \"return\"\n",
157      "                    ],\n",
158      "                    \n",
159      "                    \"required\": [param_name for param_name in function.__annotations__ if param_name != \"return\"],\n",
160      "                } if function.__annotations__ else {},\n",
161      "                \"returns\": [\n",
162      "                    {\n",
163      "                        \"name\": name + \"_output\",\n",
164      "                        \"type\": {param_name: format_type(str(param_type)) for param_name, param_type in function.__annotations__.items() if param_name == \"return\"}[\"return\"]\n",
165      "                    }\n",
166      "                ]\n",
167      "            })\n",
168      "\n",
169      "        return functions_metadata\n",
170      "\n",
171      "    def call_function(self, function):\n",
172      "        \"\"\"\n",
173      "        Call the function from the given input.\n",
174      "\n",
175      "        Args:\n",
176      "            function (dict): A dictionary containing the function details.\n",
177      "        \"\"\"\n",
178      "    \n",
179      "        def check_if_input_is_output(input: dict) -> dict:\n",
180      "            \"\"\"Check if the input is an output from a previous function.\"\"\"\n",
181      "            for key, value in input.items():\n",
182      "                if value in self.outputs:\n",
183      "                    input[key] = self.outputs[value]\n",
184      "            return input\n",
185      "\n",
186      "        # Get the function name from the function dictionary\n",
187      "        function_name = function[\"name\"]\n",
188      "        \n",
189      "        # Get the function params from the function dictionary\n",
190      "        function_input = function[\"params\"] if \"params\" in function else None\n",
191      "        function_input = check_if_input_is_output(function_input) if function_input else None\n",
192      "    \n",
193      "        # Call the function from tools.py with the given input\n",
194      "        # pass all the arguments to the function from the function_input\n",
195      "        output = self.functions[function_name](**function_input) if function_input else self.functions[function_name]()\n",
196      "        self.outputs[function[\"output\"]] = output\n",
197      "        return output\n",
198      "\n",
199      "    "
200     ]
201    },
202    {
203     "cell_type": "markdown",
204     "metadata": {},
205     "source": [
206      "#### 3. Setup The Function Caller and Prompt"
207     ]
208    },
209    {
210     "cell_type": "code",
211     "execution_count": 3,
212     "metadata": {},
213     "outputs": [
214      {
215       "name": "stdout",
216       "output_type": "stream",
217       "text": [
218        "['Retrieves the weather forecast for a given location']\n",
219        "['Retrieves a random city from a list of cities']\n",
220        "['Retrieves a random number']\n"
221       ]
222      }
223     ],
224     "source": [
225      "# Initialize the FunctionCaller \n",
226      "function_caller = FunctionCaller()\n",
227      "\n",
228      "# Create the functions metadata\n",
229      "functions_metadata = function_caller.create_functions_metadata()"
230     ]
231    },
232    {
233     "cell_type": "code",
234     "execution_count": 18,
235     "metadata": {},
236     "outputs": [],
237     "source": [
238      "import json\n",
239      "\n",
240      "# Create the system prompt\n",
241      "prompt_beginning = \"\"\"\n",
242      "You are an AI assistant that can help the user with a variety of tasks. You have access to the following functions:\n",
243      "\n",
244      "\"\"\"\n",
245      "\n",
246      "system_prompt_end = \"\"\"\n",
247      "\n",
248      "When the user asks you a question, if you need to use functions, provide ONLY the function calls, and NOTHING ELSE, in the format:\n",
249      "<function_calls>    \n",
250      "[\n",
251      "    { \"name\": \"function_name_1\", \"params\": { \"param_1\": \"value_1\", \"param_2\": \"value_2\" }, \"output\": \"The output variable name, to be possibly used as input for another function},\n",
252      "    { \"name\": \"function_name_2\", \"params\": { \"param_3\": \"value_3\", \"param_4\": \"output_1\"}, \"output\": \"The output variable name, to be possibly used as input for another function\"},\n",
253      "    ...\n",
254      "]\n",
255      "\"\"\"\n",
256      "system_prompt = prompt_beginning + f\"<tools> {json.dumps(functions_metadata, indent=4)} </tools>\" + system_prompt_end"
257     ]
258    },
259    {
260     "cell_type": "markdown",
261     "metadata": {},
262     "source": [
263      "#### 4. Final Usage"
264     ]
265    },
266    {
267     "cell_type": "code",
268     "execution_count": 35,
269     "metadata": {},
270     "outputs": [
271      {
272       "name": "stdout",
273       "output_type": "stream",
274       "text": [
275        "{'model': 'adrienbrault/nous-hermes2pro:Q4_0', 'created_at': '2024-04-25T16:34:12.473214Z', 'message': {'role': 'assistant', 'content': '<function_calls>\\n[\\n    {\\n        \"name\": \"get_random_city\",\\n        \"output\": \"random_city\"\\n    },\\n    {\\n        \"name\": \"get_weather_forecast\",\\n        \"params\": {\\n            \"location\": \"random_city\"\\n        },\\n        \"output\": \"weather_forecast\"\\n    }\\n]'}, 'done': True, 'total_duration': 6260727917, 'load_duration': 19787625, 'prompt_eval_duration': 1078901000, 'eval_count': 89, 'eval_duration': 5150607000}\n",
276        "Function calls:\n",
277        "[{'name': 'get_random_city', 'output': 'random_city'}, {'name': 'get_weather_forecast', 'params': {'location': 'random_city'}, 'output': 'weather_forecast'}]\n"
278       ]
279      }
280     ],
281     "source": [
282      "import ollama\n",
283      "\n",
284      "# Compose the prompt \n",
285      "user_query = \"Can you get me the weather forecast for a random city?\"\n",
286      "\n",
287      "# Get the response from the model\n",
288      "model_name = 'adrienbrault/nous-hermes2pro:Q8_0'\n",
289      "messages = [\n",
290      "    {'role': 'system', 'content': system_prompt,\n",
291      "    },\n",
292      "    {'role': 'user', 'content': user_query}\n",
293      "]\n",
294      "response = ollama.chat(model=model_name, messages=messages)\n",
295      "print(response)\n",
296      "# Get the function calls from the response\n",
297      "function_calls = response[\"message\"][\"content\"]\n",
298      "# If it ends with a <function_calls>, get everything before it\n",
299      "if function_calls.startswith(\"<function_calls>\"):\n",
300      "    function_calls = function_calls.split(\"<function_calls>\")[1]\n",
301      "\n",
302      "# Read function calls as json\n",
303      "try:\n",
304      "    function_calls_json: list[dict[str, str]] = json.loads(function_calls)\n",
305      "except json.JSONDecodeError:\n",
306      "    function_calls_json = []\n",
307      "    print (\"Model response not in desired JSON format\")\n",
308      "finally:\n",
309      "    print(\"Function calls:\")\n",
310      "    print(function_calls_json)"
311     ]
312    },
313    {
314     "cell_type": "code",
315     "execution_count": 36,
316     "metadata": {},
317     "outputs": [
318      {
319       "name": "stdout",
320       "output_type": "stream",
321       "text": [
322        "Agent Response: {'location': 'Kampala', 'forecast': 'sunny', 'temperature': '25°C'}\n"
323       ]
324      }
325     ],
326     "source": [
327      "# Call the functions\n",
328      "output = \"\"\n",
329      "for function in function_calls_json:\n",
330      "    output = f\"Agent Response: {function_caller.call_function(function)}\"\n",
331      "\n",
332      "print(output)"
333     ]
334    },
335    {
336     "cell_type": "code",
337     "execution_count": null,
338     "metadata": {},
339     "outputs": [],
340     "source": []
341    }
342   ],
343   "metadata": {
344    "kernelspec": {
345     "display_name": "Python 3",
346     "language": "python",
347     "name": "python3"
348    },
349    "language_info": {
350     "codemirror_mode": {
351      "name": "ipython",
352      "version": 3
353     },
354     "file_extension": ".py",
355     "mimetype": "text/x-python",
356     "name": "python",
357     "nbconvert_exporter": "python",
358     "pygments_lexer": "ipython3",
359     "version": "3.12.3"
360    }
361   },
362   "nbformat": 4,
363   "nbformat_minor": 2
364  }