/ examples / server / utils.hpp
utils.hpp
  1  #pragma once
  2  
  3  #include "llama.h"
  4  #include "common.h"
  5  
  6  // Change JSON_ASSERT from assert() to GGML_ASSERT:
  7  #define JSON_ASSERT GGML_ASSERT
  8  #include "json.hpp"
  9  
 10  #include <string>
 11  #include <vector>
 12  #include <sstream>
 13  #include <random>
 14  
 15  #define DEFAULT_OAICOMPAT_MODEL "gpt-3.5-turbo-0613"
 16  
 17  using json = nlohmann::ordered_json;
 18  
 19  // https://community.openai.com/t/openai-chat-list-of-error-codes-and-types/357791/11
 20  enum error_type {
 21      ERROR_TYPE_INVALID_REQUEST,
 22      ERROR_TYPE_AUTHENTICATION,
 23      ERROR_TYPE_SERVER,
 24      ERROR_TYPE_NOT_FOUND,
 25      ERROR_TYPE_PERMISSION,
 26      ERROR_TYPE_UNAVAILABLE, // custom error
 27      ERROR_TYPE_NOT_SUPPORTED, // custom error
 28  };
 29  
 30  extern bool server_verbose;
 31  extern bool server_log_json;
 32  
 33  #ifndef SERVER_VERBOSE
 34  #define SERVER_VERBOSE 1
 35  #endif
 36  
 37  #if SERVER_VERBOSE != 1
 38  #define LOG_VERBOSE(MSG, ...)
 39  #else
 40  #define LOG_VERBOSE(MSG, ...)                                            \
 41      do                                                                   \
 42      {                                                                    \
 43          if (server_verbose)                                              \
 44          {                                                                \
 45              server_log("VERB", __func__, __LINE__, MSG, __VA_ARGS__); \
 46          }                                                                \
 47      } while (0)
 48  #endif
 49  
 50  #define LOG_ERROR(  MSG, ...) server_log("ERR",  __func__, __LINE__, MSG, __VA_ARGS__)
 51  #define LOG_WARNING(MSG, ...) server_log("WARN", __func__, __LINE__, MSG, __VA_ARGS__)
 52  #define LOG_INFO(   MSG, ...) server_log("INFO", __func__, __LINE__, MSG, __VA_ARGS__)
 53  
 54  static inline void server_log(const char * level, const char * function, int line, const char * message, const json & extra);
 55  
 56  template <typename T>
 57  static T json_value(const json & body, const std::string & key, const T & default_value) {
 58      // Fallback null to default value
 59      if (body.contains(key) && !body.at(key).is_null()) {
 60          try {
 61              return body.at(key);
 62          } catch (NLOHMANN_JSON_NAMESPACE::detail::type_error const &) {
 63              std::stringstream ss;
 64              ss << "Wrong type supplied for parameter '" << key << "'. Expected '" << json(default_value).type_name() << "', using default value.";
 65              LOG_WARNING(ss.str().c_str(), body);
 66              return default_value;
 67          }
 68      } else {
 69          return default_value;
 70      }
 71  }
 72  
 73  static inline void server_log(const char * level, const char * function, int line, const char * message, const json & extra) {
 74      std::stringstream ss_tid;
 75      ss_tid << std::this_thread::get_id();
 76      json log = json{
 77          {"tid",       ss_tid.str()},
 78          {"timestamp", time(nullptr)},
 79      };
 80  
 81      if (server_log_json) {
 82          log.merge_patch({
 83              {"level",    level},
 84              {"function", function},
 85              {"line",     line},
 86              {"msg",      message},
 87          });
 88  
 89          if (!extra.empty()) {
 90              log.merge_patch(extra);
 91          }
 92  
 93          printf("%s\n", log.dump(-1, ' ', false, json::error_handler_t::replace).c_str());
 94      } else {
 95          char buf[1024];
 96          snprintf(buf, 1024, "%4s [%24s] %s", level, function, message);
 97  
 98          if (!extra.empty()) {
 99              log.merge_patch(extra);
100          }
101          std::stringstream ss;
102          ss << buf << " |";
103          for (const auto & el : log.items())
104          {
105              const std::string value = el.value().dump(-1, ' ', false, json::error_handler_t::replace);
106              ss << " " << el.key() << "=" << value;
107          }
108  
109          const std::string str = ss.str();
110          printf("%.*s\n", (int)str.size(), str.data());
111      }
112      fflush(stdout);
113  }
114  
115  //
116  // chat template utils
117  //
118  
119  // Format given chat. If tmpl is empty, we take the template from model metadata
120  inline std::string format_chat(const struct llama_model * model, const std::string & tmpl, const std::vector<json> & messages) {
121      size_t alloc_size = 0;
122      // vector holding all allocated string to be passed to llama_chat_apply_template
123      std::vector<std::string> str(messages.size() * 2);
124      std::vector<llama_chat_message> chat(messages.size());
125  
126      for (size_t i = 0; i < messages.size(); ++i) {
127          const auto & curr_msg = messages[i];
128          str[i*2 + 0]    = json_value(curr_msg, "role",    std::string(""));
129          str[i*2 + 1]    = json_value(curr_msg, "content", std::string(""));
130          alloc_size     += str[i*2 + 1].length();
131          chat[i].role    = str[i*2 + 0].c_str();
132          chat[i].content = str[i*2 + 1].c_str();
133      }
134  
135      const char * ptr_tmpl = tmpl.empty() ? nullptr : tmpl.c_str();
136      std::vector<char> buf(alloc_size * 2);
137  
138      // run the first time to get the total output length
139      int32_t res = llama_chat_apply_template(model, ptr_tmpl, chat.data(), chat.size(), true, buf.data(), buf.size());
140  
141      // if it turns out that our buffer is too small, we resize it
142      if ((size_t) res > buf.size()) {
143          buf.resize(res);
144          res = llama_chat_apply_template(model, ptr_tmpl, chat.data(), chat.size(), true, buf.data(), buf.size());
145      }
146  
147      const std::string formatted_chat(buf.data(), res);
148  
149      LOG_VERBOSE("formatted_chat", {{"text", formatted_chat.c_str()}});
150  
151      return formatted_chat;
152  }
153  
154  //
155  // base64 utils (TODO: move to common in the future)
156  //
157  
158  static const std::string base64_chars =
159               "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
160               "abcdefghijklmnopqrstuvwxyz"
161               "0123456789+/";
162  
163  static inline bool is_base64(uint8_t c) {
164      return (isalnum(c) || (c == '+') || (c == '/'));
165  }
166  
167  static inline std::vector<uint8_t> base64_decode(const std::string & encoded_string) {
168      int i = 0;
169      int j = 0;
170      int in_ = 0;
171  
172      int in_len = encoded_string.size();
173  
174      uint8_t char_array_4[4];
175      uint8_t char_array_3[3];
176  
177      std::vector<uint8_t> ret;
178  
179      while (in_len-- && (encoded_string[in_] != '=') && is_base64(encoded_string[in_])) {
180          char_array_4[i++] = encoded_string[in_]; in_++;
181          if (i == 4) {
182              for (i = 0; i < 4; i++) {
183                  char_array_4[i] = base64_chars.find(char_array_4[i]);
184              }
185  
186              char_array_3[0] = ((char_array_4[0]      ) << 2) + ((char_array_4[1] & 0x30) >> 4);
187              char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
188              char_array_3[2] = ((char_array_4[2] & 0x3) << 6) +   char_array_4[3];
189  
190              for (i = 0; (i < 3); i++) {
191                  ret.push_back(char_array_3[i]);
192              }
193  
194              i = 0;
195          }
196      }
197  
198      if (i) {
199          for (j = i; j < 4; j++) {
200              char_array_4[j] = 0;
201          }
202  
203          for (j = 0; j < 4; j++) {
204              char_array_4[j] = base64_chars.find(char_array_4[j]);
205          }
206  
207          char_array_3[0] = ((char_array_4[0]      ) << 2) + ((char_array_4[1] & 0x30) >> 4);
208          char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
209          char_array_3[2] = ((char_array_4[2] & 0x3) << 6) +   char_array_4[3];
210  
211          for (j = 0; j < i - 1; j++) {
212              ret.push_back(char_array_3[j]);
213          }
214      }
215  
216      return ret;
217  }
218  
219  //
220  // random string / id
221  //
222  
223  static std::string random_string() {
224      static const std::string str("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz");
225  
226      std::random_device rd;
227      std::mt19937 generator(rd());
228  
229      std::string result(32, ' ');
230  
231      for (int i = 0; i < 32; ++i) {
232          result[i] = str[generator() % str.size()];
233      }
234  
235      return result;
236  }
237  
238  static std::string gen_chatcmplid() {
239      std::stringstream chatcmplid;
240      chatcmplid << "chatcmpl-" << random_string();
241  
242      return chatcmplid.str();
243  }
244  
245  //
246  // other common utils
247  //
248  
249  static size_t common_part(const std::vector<llama_token> & a, const std::vector<llama_token> & b) {
250      size_t i;
251      for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++) {}
252  
253      return i;
254  }
255  
256  static size_t common_part(const std::string & a, const std::string & b) {
257      size_t i;
258      for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++) {}
259  
260      return i;
261  }
262  
263  static bool ends_with(const std::string & str, const std::string & suffix) {
264      return str.size() >= suffix.size() && 0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix);
265  }
266  
267  static size_t find_partial_stop_string(const std::string &stop, const std::string &text) {
268      if (!text.empty() && !stop.empty()) {
269          const char text_last_char = text.back();
270          for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--) {
271              if (stop[char_index] == text_last_char) {
272                  const std::string current_partial = stop.substr(0, char_index + 1);
273                  if (ends_with(text, current_partial)) {
274                      return text.size() - char_index - 1;
275                  }
276              }
277          }
278      }
279  
280      return std::string::npos;
281  }
282  
283  // TODO: reuse llama_detokenize
284  template <class Iter>
285  static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
286      std::string ret;
287      for (; begin != end; ++begin) {
288          ret += llama_token_to_piece(ctx, *begin);
289      }
290  
291      return ret;
292  }
293  
294  // format incomplete utf-8 multibyte character for output
295  static std::string tokens_to_output_formatted_string(const llama_context * ctx, const llama_token token) {
296      std::string out = token == -1 ? "" : llama_token_to_piece(ctx, token);
297  
298      // if the size is 1 and first bit is 1, meaning it's a partial character
299      //   (size > 1 meaning it's already a known token)
300      if (out.size() == 1 && (out[0] & 0x80) == 0x80) {
301          std::stringstream ss;
302          ss << std::hex << (out[0] & 0xff);
303          std::string res(ss.str());
304          out = "byte: \\x" + res;
305      }
306  
307      return out;
308  }
309  
310  struct completion_token_output {
311      llama_token tok;
312      std::string text_to_send;
313  
314      struct token_prob {
315          llama_token tok;
316          float prob;
317      };
318  
319      std::vector<token_prob> probs;
320  };
321  
322  // convert a vector of completion_token_output to json
323  static json probs_vector_to_json(const llama_context * ctx, const std::vector<completion_token_output> & probs) {
324      json out = json::array();
325  
326      for (const auto & prob : probs) {
327          json probs_for_token = json::array();
328  
329          for (const auto & p : prob.probs) {
330              const std::string tok_str = tokens_to_output_formatted_string(ctx, p.tok);
331              probs_for_token.push_back(json {
332                  {"tok_str", tok_str},
333                  {"prob",    p.prob},
334              });
335          }
336  
337          const std::string tok_str = tokens_to_output_formatted_string(ctx, prob.tok);
338          out.push_back(json {
339              {"content", tok_str},
340              {"probs",   probs_for_token},
341          });
342      }
343  
344      return out;
345  }
346  
347  //
348  // OAI utils
349  //
350  
351  static json oaicompat_completion_params_parse(
352      const struct llama_model * model,
353      const json & body, /* openai api json semantics */
354      const std::string & chat_template) {
355      json llama_params;
356  
357      llama_params["__oaicompat"] = true;
358  
359      // Map OpenAI parameters to llama.cpp parameters
360      //
361      // For parameters that are defined by the OpenAI documentation (e.g.
362      // temperature), we explicitly specify OpenAI's intended default; we
363      // need to do that because sometimes OpenAI disagrees with llama.cpp
364      //
365      // https://platform.openai.com/docs/api-reference/chat/create
366      llama_sampling_params default_sparams;
367      llama_params["model"]             = json_value(body,   "model",             std::string("unknown"));
368      llama_params["frequency_penalty"] = json_value(body,   "frequency_penalty", 0.0);
369      llama_params["logit_bias"]        = json_value(body,   "logit_bias",        json::object());
370      llama_params["n_predict"]         = json_value(body,   "max_tokens",        -1);
371      llama_params["presence_penalty"]  = json_value(body,   "presence_penalty",  0.0);
372      llama_params["seed"]              = json_value(body,   "seed",              LLAMA_DEFAULT_SEED);
373      llama_params["stream"]            = json_value(body,   "stream",            false);
374      llama_params["temperature"]       = json_value(body,   "temperature",       1.0);
375      llama_params["top_p"]             = json_value(body,   "top_p",             1.0);
376  
377      // Apply chat template to the list of messages
378      llama_params["prompt"] = format_chat(model, chat_template, body.at("messages"));
379  
380      // Handle "stop" field
381      if (body.contains("stop") && body.at("stop").is_string()) {
382          llama_params["stop"] = json::array({body.at("stop").get<std::string>()});
383      } else {
384          llama_params["stop"] = json_value(body, "stop", json::array());
385      }
386  
387      // Handle "response_format" field
388      if (body.contains("response_format")) {
389          json response_format      = json_value(body, "response_format", json::object());
390          std::string response_type = json_value(response_format, "type", std::string());
391          if (response_type == "json_object") {
392              llama_params["json_schema"] = json_value(response_format, "schema", json::object());
393          } else if (!response_type.empty() && response_type != "text") {
394              throw std::runtime_error("response_format type must be one of \"text\" or \"json_object\", but got: " + response_type);
395          }
396      }
397  
398      // Handle "n" field
399      int n_choices = json_value(body, "n", 1);
400      if (n_choices != 1) {
401          throw std::runtime_error("Only one completion choice is allowed");
402      }
403  
404      // Handle "logprobs" field
405      // TODO: The response format of this option is not yet OAI-compatible, but seems like no one really using it; We may need to fix it in the future
406      if (body.contains("logprobs")) {
407          llama_params["n_probs"] = json_value(body, "top_logprobs", 20);
408      } else if (body.contains("top_logprobs")) {
409          throw std::runtime_error("top_logprobs requires logprobs to be set to true");
410      }
411  
412      // Params supported by OAI but unsupported by llama.cpp
413      static const std::vector<std::string> unsupported_params { "tools", "tool_choice" };
414      for (auto & param : unsupported_params) {
415          if (body.contains(param)) {
416              throw std::runtime_error("Unsupported param: " + param);
417          }
418      }
419  
420      // Copy remaining properties to llama_params
421      // This allows user to use llama.cpp-specific params like "mirostat", "tfs_z",... via OAI endpoint.
422      // See "launch_slot_with_task()" for a complete list of params supported by llama.cpp
423      for (const auto & item : body.items()) {
424          // Exception: if "n_predict" is present, we overwrite the value specified earlier by "max_tokens"
425          if (!llama_params.contains(item.key()) || item.key() == "n_predict") {
426              llama_params[item.key()] = item.value();
427          }
428      }
429  
430      return llama_params;
431  }
432  
433  static json format_final_response_oaicompat(const json & request, json result, const std::string & completion_id, bool streaming = false) {
434      bool stopped_word        = result.count("stopped_word") != 0;
435      bool stopped_eos         = json_value(result, "stopped_eos", false);
436      int num_tokens_predicted = json_value(result, "tokens_predicted", 0);
437      int num_prompt_tokens    = json_value(result, "tokens_evaluated", 0);
438      std::string content      = json_value(result, "content", std::string(""));
439  
440      std::string finish_reason = "length";
441      if (stopped_word || stopped_eos) {
442          finish_reason = "stop";
443      }
444  
445      json choices =
446          streaming ? json::array({json{{"finish_reason", finish_reason},
447                                          {"index", 0},
448                                          {"delta", json::object()}}})
449                    : json::array({json{{"finish_reason", finish_reason},
450                                          {"index", 0},
451                                          {"message", json{{"content", content},
452                                                           {"role", "assistant"}}}}});
453  
454      std::time_t t = std::time(0);
455  
456      json res = json {
457          {"choices", choices},
458          {"created", t},
459          {"model",
460              json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
461          {"object", streaming ? "chat.completion.chunk" : "chat.completion"},
462          {"usage", json {
463              {"completion_tokens", num_tokens_predicted},
464              {"prompt_tokens",     num_prompt_tokens},
465              {"total_tokens",      num_tokens_predicted + num_prompt_tokens}
466          }},
467          {"id", completion_id}
468      };
469  
470      if (server_verbose) {
471          res["__verbose"] = result;
472      }
473  
474      if (result.contains("completion_probabilities")) {
475          res["completion_probabilities"] = json_value(result, "completion_probabilities", json::array());
476      }
477  
478      return res;
479  }
480  
481  // return value is vector as there is one case where we might need to generate two responses
482  static std::vector<json> format_partial_response_oaicompat(json result, const std::string & completion_id) {
483      if (!result.contains("model") || !result.contains("oaicompat_token_ctr")) {
484          return std::vector<json>({result});
485      }
486  
487      bool first = json_value(result, "oaicompat_token_ctr", 0) == 0;
488      std::string modelname = json_value(result, "model", std::string(DEFAULT_OAICOMPAT_MODEL));
489  
490      bool stopped_word   = json_value(result, "stopped_word",  false);
491      bool stopped_eos    = json_value(result, "stopped_eos",   false);
492      bool stopped_limit  = json_value(result, "stopped_limit", false);
493      std::string content = json_value(result, "content",       std::string(""));
494  
495      std::string finish_reason;
496      if (stopped_word || stopped_eos) {
497          finish_reason = "stop";
498      }
499      if (stopped_limit) {
500          finish_reason = "length";
501      }
502  
503      std::time_t t = std::time(0);
504  
505      json choices;
506  
507      if (!finish_reason.empty()) {
508          choices = json::array({json{{"finish_reason", finish_reason},
509                                      {"index", 0},
510                                      {"delta", json::object()}}});
511      } else {
512          if (first) {
513              if (content.empty()) {
514                  choices = json::array({json{{"finish_reason", nullptr},
515                                              {"index", 0},
516                                              {"delta", json{{"role", "assistant"}}}}});
517              } else {
518                  // We have to send this as two updates to conform to openai behavior
519                  json initial_ret = json{{"choices", json::array({json{
520                                          {"finish_reason", nullptr},
521                                          {"index", 0},
522                                          {"delta", json{
523                                              {"role", "assistant"}
524                                          }}}})},
525                              {"created", t},
526                              {"id", completion_id},
527                              {"model", modelname},
528                              {"object", "chat.completion.chunk"}};
529  
530                  json second_ret = json{
531                              {"choices", json::array({json{{"finish_reason", nullptr},
532                                                              {"index", 0},
533                                                              {"delta", json{
534                                                              {"content", content}}}
535                                                              }})},
536                              {"created", t},
537                              {"id", completion_id},
538                              {"model", modelname},
539                              {"object", "chat.completion.chunk"}};
540  
541                  return std::vector<json>({initial_ret, second_ret});
542              }
543          } else {
544              // Some idiosyncrasy in task processing logic makes several trailing calls
545              // with empty content, we ignore these at the calee site.
546              if (content.empty()) {
547                  return std::vector<json>({json::object()});
548              }
549  
550              choices = json::array({json{
551                  {"finish_reason", nullptr},
552                  {"index", 0},
553                  {"delta",
554                  json{
555                      {"content", content},
556                  }},
557              }});
558          }
559      }
560  
561      json ret = json {
562          {"choices", choices},
563          {"created", t},
564          {"id",      completion_id},
565          {"model",   modelname},
566          {"object",  "chat.completion.chunk"}
567      };
568      if (!finish_reason.empty()) {
569          int num_tokens_predicted = json_value(result, "tokens_predicted", 0);
570          int num_prompt_tokens    = json_value(result, "tokens_evaluated", 0);
571          ret.push_back({"usage", json {
572              {"completion_tokens", num_tokens_predicted},
573              {"prompt_tokens",     num_prompt_tokens},
574              {"total_tokens",      num_tokens_predicted + num_prompt_tokens}
575          }});
576      }
577  
578      return std::vector<json>({ret});
579  }
580  
581  static json format_embeddings_response_oaicompat(const json & request, const json & embeddings) {
582      json data = json::array();
583      int i = 0;
584      for (auto & elem : embeddings) {
585          data.push_back(json{
586              {"embedding", json_value(elem, "embedding", json::array())},
587              {"index",     i++},
588              {"object",    "embedding"}
589          });
590      }
591  
592      json res = json {
593          {"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
594          {"object", "list"},
595          {"usage", json {
596              {"prompt_tokens", 0},
597              {"total_tokens", 0}
598          }},
599          {"data", data}
600      };
601  
602      return res;
603  }
604  
605  static json format_tokenizer_response(const std::vector<llama_token> & tokens) {
606      return json {
607          {"tokens", tokens}
608      };
609  }
610  
611  static json format_detokenized_response(const std::string & content) {
612      return json {
613          {"content", content}
614      };
615  }
616  
617  static json format_error_response(const std::string & message, const enum error_type type) {
618      std::string type_str;
619      int code = 500;
620      switch (type) {
621          case ERROR_TYPE_INVALID_REQUEST:
622              type_str = "invalid_request_error";
623              code = 400;
624              break;
625          case ERROR_TYPE_AUTHENTICATION:
626              type_str = "authentication_error";
627              code = 401;
628              break;
629          case ERROR_TYPE_NOT_FOUND:
630              type_str = "not_found_error";
631              code = 404;
632              break;
633          case ERROR_TYPE_SERVER:
634              type_str = "server_error";
635              code = 500;
636              break;
637          case ERROR_TYPE_PERMISSION:
638              type_str = "permission_error";
639              code = 403;
640              break;
641          case ERROR_TYPE_NOT_SUPPORTED:
642              type_str = "not_supported_error";
643              code = 501;
644              break;
645          case ERROR_TYPE_UNAVAILABLE:
646              type_str = "unavailable_error";
647              code = 503;
648              break;
649      }
650      return json {
651          {"code", code},
652          {"message", message},
653          {"type", type_str},
654      };
655  }