/ common / ngram-cache.cpp
ngram-cache.cpp
  1  #include "ngram-cache.h"
  2  #include "common.h"
  3  #include "log.h"
  4  
  5  #include <cstdint>
  6  #include <fstream>
  7  
  8  void llama_ngram_cache_update(llama_ngram_cache & ngram_cache, int ngram_min, int ngram_max,
  9                                std::vector<llama_token> & inp, int nnew, bool print_progress) {
 10      const int64_t t_start_ms = ggml_time_ms();
 11      const int64_t inp_size = inp.size();
 12  
 13      const int64_t n_todo = inp_size * (ngram_max - ngram_min + 1);
 14      int64_t n_done = 0;
 15  
 16      for (int64_t ngram_size = ngram_min; ngram_size <= ngram_max; ++ngram_size) {
 17          const int64_t i_start = std::max(inp_size - nnew, ngram_size);
 18          for (int64_t i = i_start; i < inp_size; ++i) {
 19              const int64_t ngram_start = i - ngram_size;
 20              llama_ngram ngram(&inp[ngram_start], ngram_size);
 21              const llama_token token = inp[i];
 22  
 23              llama_ngram_cache::iterator part_it = ngram_cache.find(ngram);
 24              if (part_it == ngram_cache.end()) {
 25                  llama_ngram_cache_part part;
 26                  part.emplace(token, 1);
 27                  ngram_cache.emplace(ngram, part);
 28              } else {
 29                  llama_ngram_cache_part::iterator token_count_it = part_it->second.find(token);
 30                  if (token_count_it == part_it->second.end()) {
 31                      part_it->second.emplace(token, 1);
 32                  } else {
 33                      token_count_it->second++;
 34                  }
 35              }
 36              ++n_done;
 37  
 38              if (print_progress && n_done % 10000000 == 0) {
 39                  const int64_t t_now_ms = ggml_time_ms();
 40                  const int64_t eta_ms   = (inp_size*(ngram_max-ngram_min+1) - n_done) * (t_now_ms - t_start_ms) / n_done;
 41                  const int64_t eta_min  = eta_ms / (60*1000);
 42                  const int64_t eta_s    = (eta_ms - 60*1000*eta_min) / 1000;
 43  
 44                  fprintf(stderr, "%s: %" PRId64 "/%" PRId64 " done, ETA: %02" PRId64 ":%02" PRId64 "\n", __func__, n_done, n_todo, eta_min, eta_s);
 45              }
 46          }
 47      }
 48  }
 49  
 50  // Helper function to get a token from the combined, speculative sequence of inp and draft.
 51  static llama_token get_token(const std::vector<llama_token> & inp, const std::vector<llama_token> & draft, const size_t i) {
 52      return i < inp.size() ? inp[i] : draft[1 + i - inp.size()];
 53  }
 54  
 55  // If sample size or percentage are below these thresholds the draft is aborted early:
 56  constexpr int    draft_min_sample_size_lax[LLAMA_NGRAM_MAX] = { 2,  2,  1,  1};
 57  constexpr int        draft_min_percent_lax[LLAMA_NGRAM_MAX] = {66, 50, 50, 50};
 58  constexpr int draft_min_sample_size_strict[LLAMA_NGRAM_MAX] = { 4,  3,  2,  2};
 59  constexpr int     draft_min_percent_strict[LLAMA_NGRAM_MAX] = {75, 66, 66, 66};
 60  
 61  // Helper function that tries to draft a token from only the static ngram cache:
 62  static llama_token try_draft(llama_ngram_cache & nc_static, const llama_ngram ngram_static) {
 63      llama_ngram_cache::iterator part_static_it = nc_static.find(ngram_static);
 64      if (part_static_it == nc_static.end()) {
 65          return -1;
 66      }
 67      const llama_ngram_cache_part part_static = part_static_it->second;
 68  
 69      int max_count_static  = 0;
 70      int sum_count_static  = 0;
 71      llama_token max_token = -1;
 72  
 73      for (std::pair<llama_token, int> token_count_static : part_static) {
 74          const llama_token token = token_count_static.first;
 75          const int32_t count_static  = token_count_static.second;
 76  
 77          if (count_static > max_count_static) {
 78              max_token        = token;
 79              max_count_static = count_static;
 80          }
 81          sum_count_static += count_static;
 82      }
 83  
 84      if (sum_count_static < draft_min_sample_size_lax[LLAMA_NGRAM_STATIC-1]) {
 85          return -1;
 86      }
 87      if (100*max_count_static < draft_min_percent_lax[LLAMA_NGRAM_STATIC-1]*sum_count_static) {
 88          return -1;
 89      }
 90      return max_token;
 91  }
 92  
 93  // Try to draft a token from primary cache (context/dynamic), validate with static cache:
 94  static llama_token try_draft(
 95      llama_ngram_cache & nc_primary, const std::vector<llama_ngram> & ngrams_primary, llama_ngram_cache_part & part_static,
 96      const int * min_sample_size, const int * min_percent) {
 97  
 98      llama_token drafted_token = -1;
 99  
100      for (int i = ngrams_primary.size()-1; i >= 0 && drafted_token == -1; --i) {
101          const llama_ngram ngram_primary = ngrams_primary[i];
102  
103          llama_ngram_cache::iterator part_primary_it = nc_primary.find(ngram_primary);
104          if (part_primary_it == nc_primary.end()) {
105              continue;
106          }
107          const llama_ngram_cache_part part_primary = part_primary_it->second;
108  
109          int max_count_primary = 0;
110          int max_count_static  = 0;
111          int sum_count_primary = 0;
112          llama_token max_token = -1;
113  
114          for (std::pair<llama_token, int> token_count_primary : part_primary) {
115              const llama_token token = token_count_primary.first;
116  
117              llama_ngram_cache_part::iterator token_count_static_it = part_static.find(token);
118  
119              const int32_t count_primary = token_count_primary.second;
120              const int32_t count_static  = token_count_static_it != part_static.end() ? 100*token_count_static_it->second : 1;
121  
122              if (count_primary*count_static > max_count_primary*max_count_static) {
123                  max_token         = token;
124                  max_count_primary = count_primary;
125                  max_count_static  = count_static;
126              }
127              sum_count_primary += count_primary;
128          }
129  
130          if (sum_count_primary < min_sample_size[i]) {
131              continue;
132          }
133          if (100*max_count_primary < min_percent[i]*sum_count_primary) {
134              continue;;
135          }
136          drafted_token = max_token;
137      }
138  
139      return drafted_token;
140  }
141  
142  void llama_ngram_cache_draft(
143      std::vector<llama_token> & inp, std::vector<llama_token> & draft, int n_draft, int ngram_min, int ngram_max,
144      llama_ngram_cache & nc_context, llama_ngram_cache & nc_dynamic, llama_ngram_cache & nc_static
145  ) {
146      GGML_ASSERT(draft.size() == 1);
147      const int inp_size = inp.size();
148  
149      if (inp_size < LLAMA_NGRAM_STATIC) {
150          return;
151      }
152  
153      while ((int) draft.size()-1 < n_draft) {
154          llama_token drafted_token = -1;
155  
156          const int ngram_start_static = inp_size-LLAMA_NGRAM_STATIC + draft.size()-1;
157          llama_ngram ngram_static;
158          for (int j = ngram_start_static; j < ngram_start_static + LLAMA_NGRAM_STATIC; ++j) {
159              ngram_static.tokens[j-ngram_start_static] = get_token(inp, draft, j);
160          }
161          llama_ngram_cache::iterator part_static_it = nc_static.find(ngram_static);
162          llama_ngram_cache_part part_static;
163          if (part_static_it != nc_static.end()) {
164              part_static = part_static_it->second;
165          }
166  
167          // cd = context + dynamic
168          std::vector<llama_ngram> ngrams_cd;
169          for (int ngram_size_cd = ngram_min; ngram_size_cd <= ngram_max; ++ngram_size_cd) {
170              const int ngram_start_cd = inp_size-ngram_size_cd + draft.size()-1;
171              llama_ngram ngram_cd;
172              for (int j = ngram_start_cd; j < ngram_start_cd + ngram_size_cd; ++j) {
173                  ngram_cd.tokens[j-ngram_start_cd] = get_token(inp, draft, j);
174              }
175              ngrams_cd.push_back(ngram_cd);
176          }
177          if (drafted_token == -1) {
178              drafted_token = try_draft(nc_context, ngrams_cd, part_static, draft_min_sample_size_lax, draft_min_percent_lax);
179          }
180          if (drafted_token == -1) {
181              drafted_token = try_draft(nc_dynamic, ngrams_cd, part_static, draft_min_sample_size_strict, draft_min_percent_strict);
182          }
183          if (drafted_token == -1) {
184              drafted_token = try_draft(nc_static, ngram_static);
185          }
186  
187          if (drafted_token == -1) {
188              break;
189          }
190  
191          LOG(" - draft candidate: token=%d\n", drafted_token);
192          draft.push_back(drafted_token);
193      }
194  }
195  
196  void llama_ngram_cache_save(llama_ngram_cache & ngram_cache, std::string & filename) {
197      std::ofstream file_out(filename, std::ios::binary);
198      for (std::pair<llama_ngram, llama_ngram_cache_part> item : ngram_cache) {
199          const llama_ngram      ngram        = item.first;
200          llama_ngram_cache_part token_counts = item.second;
201          GGML_ASSERT(!token_counts.empty());
202          const int32_t ntokens = token_counts.size();
203          GGML_ASSERT(ntokens > 0);
204  
205          file_out.write(reinterpret_cast<const char *>(&ngram),   sizeof(llama_ngram));
206          file_out.write(reinterpret_cast<const char *>(&ntokens), sizeof(int32_t));
207          for (std::pair<llama_token, int32_t> item2 : token_counts) {
208              const llama_token token = item2.first;
209              const int32_t     count = item2.second;
210              GGML_ASSERT(count > 0);
211  
212              file_out.write(reinterpret_cast<const char *>(&token), sizeof(llama_token));
213              file_out.write(reinterpret_cast<const char *>(&count), sizeof(int32_t));
214          }
215      }
216  
217  }
218  
219  llama_ngram_cache llama_ngram_cache_load(std::string & filename) {
220      std::ifstream hashmap_file(filename, std::ios::binary);
221      if (!hashmap_file) {
222          throw std::ifstream::failure("Unable to open file " + filename);
223      }
224      llama_ngram_cache ngram_cache;
225  
226      llama_ngram ngram;
227      int32_t     ntokens;
228      llama_token token;
229      int32_t     count;
230  
231      char * ngramc   = reinterpret_cast<char*>(&ngram);
232      char * ntokensc = reinterpret_cast<char*>(&ntokens);
233      char * tokenc   = reinterpret_cast<char*>(&token);
234      char * countc   = reinterpret_cast<char*>(&count);
235      while(hashmap_file.read(ngramc, sizeof(llama_ngram))) {
236          GGML_ASSERT(!hashmap_file.eof());
237          GGML_ASSERT(hashmap_file.read(ntokensc, sizeof(int32_t)));
238          GGML_ASSERT(ntokens > 0);
239          llama_ngram_cache_part token_counts;
240  
241          for (int i = 0; i < ntokens; ++i) {
242              GGML_ASSERT(!hashmap_file.eof());
243              GGML_ASSERT(hashmap_file.read(tokenc, sizeof(llama_token)));
244              GGML_ASSERT(!hashmap_file.eof());
245              GGML_ASSERT(hashmap_file.read(countc, sizeof(int32_t)));
246              GGML_ASSERT(count > 0);
247              token_counts.emplace(token, count);
248          }
249  
250          ngram_cache.emplace(ngram, token_counts);
251      }
252      GGML_ASSERT(hashmap_file.eof());
253  
254      return ngram_cache;
255  }
256  
257  void llama_ngram_cache_merge(llama_ngram_cache & ngram_cache_target, llama_ngram_cache & ngram_cache_add) {
258      for (std::pair<llama_ngram, llama_ngram_cache_part> ngram_part : ngram_cache_add) {
259          const llama_ngram      ngram = ngram_part.first;
260          llama_ngram_cache_part  part = ngram_part.second;
261  
262          llama_ngram_cache::iterator part_merged_it = ngram_cache_target.find(ngram);
263          if (part_merged_it == ngram_cache_target.end()) {
264              ngram_cache_target.emplace(ngram, part);
265              continue;
266          }
267  
268          for (std::pair<llama_token, int32_t> token_count : part) {
269              const llama_token token = token_count.first;
270              const int32_t     count = token_count.second;
271              GGML_ASSERT(count > 0);
272  
273              llama_ngram_cache_part::iterator token_count_merged_it = part_merged_it->second.find(token);
274              if (token_count_merged_it == part_merged_it->second.end()) {
275                  part_merged_it->second.emplace(token, count);
276                  continue;
277              }
278  
279              token_count_merged_it->second += count;
280          }
281      }
282  }