/ examples / embedding / embedding.cpp
embedding.cpp
  1  #include "common.h"
  2  #include "llama.h"
  3  
  4  #include <ctime>
  5  
  6  #if defined(_MSC_VER)
  7  #pragma warning(disable: 4244 4267) // possible loss of data
  8  #endif
  9  
 10  static std::vector<std::string> split_lines(const std::string & s) {
 11      std::string line;
 12      std::vector<std::string> lines;
 13      std::stringstream ss(s);
 14      while (std::getline(ss, line)) {
 15          lines.push_back(line);
 16      }
 17      return lines;
 18  }
 19  
 20  static void batch_add_seq(llama_batch & batch, const std::vector<int32_t> & tokens, int seq_id) {
 21      for (size_t i = 0; i < tokens.size(); i++) {
 22          llama_batch_add(batch, tokens[i], i, { seq_id }, i == tokens.size() - 1);
 23      }
 24  }
 25  
 26  static void batch_decode(llama_context * ctx, llama_batch & batch, float * output, int n_seq, int n_embd) {
 27      // clear previous kv_cache values (irrelevant for embeddings)
 28      llama_kv_cache_clear(ctx);
 29  
 30      // run model
 31      fprintf(stderr, "%s: n_tokens = %d, n_seq = %d\n", __func__, batch.n_tokens, n_seq);
 32      if (llama_decode(ctx, batch) < 0) {
 33          fprintf(stderr, "%s : failed to decode\n", __func__);
 34      }
 35  
 36      for (int i = 0; i < batch.n_tokens; i++) {
 37          if (!batch.logits[i]) {
 38              continue;
 39          }
 40  
 41          // try to get sequence embeddings - supported only when pooling_type is not NONE
 42          const float * embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]);
 43          if (embd == NULL) {
 44              embd = llama_get_embeddings_ith(ctx, i);
 45              if (embd == NULL) {
 46                  fprintf(stderr, "%s: failed to get embeddings for token %d\n", __func__, i);
 47                  continue;
 48              }
 49          }
 50  
 51          float * out = output + batch.seq_id[i][0] * n_embd;
 52          //TODO: I would also add a parameter here to enable normalization or not.
 53          /*fprintf(stdout, "unnormalized_embedding:");
 54          for (int hh = 0; hh < n_embd; hh++) {
 55              fprintf(stdout, "%9.6f ", embd[hh]);
 56          }
 57          fprintf(stdout, "\n");*/
 58          llama_embd_normalize(embd, out, n_embd);
 59      }
 60  }
 61  
 62  int main(int argc, char ** argv) {
 63      gpt_params params;
 64  
 65      if (!gpt_params_parse(argc, argv, params)) {
 66          gpt_params_print_usage(argc, argv, params);
 67          return 1;
 68      }
 69  
 70      params.embedding = true;
 71      // For non-causal models, batch size must be equal to ubatch size
 72      params.n_ubatch = params.n_batch;
 73  
 74      print_build_info();
 75  
 76      if (params.seed == LLAMA_DEFAULT_SEED) {
 77          params.seed = time(NULL);
 78      }
 79  
 80      fprintf(stderr, "%s: seed  = %u\n", __func__, params.seed);
 81  
 82      std::mt19937 rng(params.seed);
 83  
 84      llama_backend_init();
 85      llama_numa_init(params.numa);
 86  
 87      llama_model * model;
 88      llama_context * ctx;
 89  
 90      // load the model
 91      std::tie(model, ctx) = llama_init_from_gpt_params(params);
 92      if (model == NULL) {
 93          fprintf(stderr, "%s: error: unable to load model\n", __func__);
 94          return 1;
 95      }
 96  
 97      const int n_ctx_train = llama_n_ctx_train(model);
 98      const int n_ctx = llama_n_ctx(ctx);
 99  
100      if (n_ctx > n_ctx_train) {
101          fprintf(stderr, "%s: warning: model was trained on only %d context tokens (%d specified)\n",
102                  __func__, n_ctx_train, n_ctx);
103      }
104  
105      // print system information
106      {
107          fprintf(stderr, "\n");
108          fprintf(stderr, "%s\n", gpt_params_get_system_info(params).c_str());
109      }
110  
111      // split the prompt into lines
112      std::vector<std::string> prompts = split_lines(params.prompt);
113  
114      // max batch size
115      const uint64_t n_batch = params.n_batch;
116      GGML_ASSERT(params.n_batch >= params.n_ctx);
117  
118      // tokenize the prompts and trim
119      std::vector<std::vector<int32_t>> inputs;
120      for (const auto & prompt : prompts) {
121          auto inp = ::llama_tokenize(ctx, prompt, true, false);
122          if (inp.size() > n_batch) {
123              fprintf(stderr, "%s: error: number of tokens in input line (%lld) exceeds batch size (%lld), increase batch size and re-run\n",
124                      __func__, (long long int) inp.size(), (long long int) n_batch);
125              return 1;
126          }
127          inputs.push_back(inp);
128      }
129  
130      // check if the last token is SEP
131      // it should be automatically added by the tokenizer when 'tokenizer.ggml.add_eos_token' is set to 'true'
132      for (auto & inp : inputs) {
133          if (inp.empty() || inp.back() != llama_token_sep(model)) {
134              fprintf(stderr, "%s: warning: last token in the prompt is not SEP\n", __func__);
135              fprintf(stderr, "%s:          'tokenizer.ggml.add_eos_token' should be set to 'true' in the GGUF header\n", __func__);
136          }
137      }
138  
139      // tokenization stats
140      if (params.verbose_prompt) {
141          for (int i = 0; i < (int) inputs.size(); i++) {
142              fprintf(stderr, "%s: prompt %d: '%s'\n", __func__, i, prompts[i].c_str());
143              fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, inputs[i].size());
144              for (int j = 0; j < (int) inputs[i].size(); j++) {
145                  fprintf(stderr, "%6d -> '%s'\n", inputs[i][j], llama_token_to_piece(ctx, inputs[i][j]).c_str());
146              }
147              fprintf(stderr, "\n\n");
148          }
149      }
150  
151      // initialize batch
152      const int n_prompts = prompts.size();
153      struct llama_batch batch = llama_batch_init(n_batch, 0, 1);
154  
155      // allocate output
156      const int n_embd = llama_n_embd(model);
157      std::vector<float> embeddings(n_prompts * n_embd, 0);
158      float * emb = embeddings.data();
159  
160      // break into batches
161      int p = 0; // number of prompts processed already
162      int s = 0; // number of prompts in current batch
163      for (int k = 0; k < n_prompts; k++) {
164          // clamp to n_batch tokens
165          auto & inp = inputs[k];
166  
167          const uint64_t n_toks = inp.size();
168  
169          // encode if at capacity
170          if (batch.n_tokens + n_toks > n_batch) {
171              float * out = emb + p * n_embd;
172              batch_decode(ctx, batch, out, s, n_embd);
173              llama_batch_clear(batch);
174              p += s;
175              s = 0;
176          }
177  
178          // add to batch
179          batch_add_seq(batch, inp, s);
180          s += 1;
181      }
182  
183      // final batch
184      float * out = emb + p * n_embd;
185      batch_decode(ctx, batch, out, s, n_embd);
186  
187      // print the first part of the embeddings or for a single prompt, the full embedding
188      fprintf(stdout, "\n");
189      for (int j = 0; j < n_prompts; j++) {
190          fprintf(stdout, "embedding %d: ", j);
191          for (int i = 0; i < (n_prompts > 1 ? std::min(16, n_embd) : n_embd); i++) {
192              fprintf(stdout, "%9.6f ", emb[j * n_embd + i]);
193          }
194          fprintf(stdout, "\n");
195      }
196  
197      // print cosine similarity matrix
198      if (n_prompts > 1) {
199          fprintf(stdout, "\n");
200          printf("cosine similarity matrix:\n\n");
201          for (int i = 0; i < n_prompts; i++) {
202              for (int j = 0; j < n_prompts; j++) {
203                  float sim = llama_embd_similarity_cos(emb + i * n_embd, emb + j * n_embd, n_embd);
204                  fprintf(stdout, "%6.2f ", sim);
205              }
206              fprintf(stdout, "\n");
207          }
208      }
209  
210      // clean up
211      llama_print_timings(ctx);
212      llama_batch_free(batch);
213      llama_free(ctx);
214      llama_free_model(model);
215      llama_backend_free();
216  
217      return 0;
218  }