/ src / bench / bench_bitcoin.cpp
bench_bitcoin.cpp
  1  // Copyright (c) 2015-2022 The Bitcoin Core developers
  2  // Distributed under the MIT software license, see the accompanying
  3  // file COPYING or http://www.opensource.org/licenses/mit-license.php.
  4  
  5  #include <bench/bench.h>
  6  #include <common/args.h>
  7  #include <crypto/sha256.h>
  8  #include <tinyformat.h>
  9  #include <util/fs.h>
 10  #include <util/string.h>
 11  #include <test/util/setup_common.h>
 12  
 13  #include <chrono>
 14  #include <cstdint>
 15  #include <cstdlib>
 16  #include <exception>
 17  #include <iostream>
 18  #include <sstream>
 19  #include <vector>
 20  
 21  using util::SplitString;
 22  
 23  static const char* DEFAULT_BENCH_FILTER = ".*";
 24  static constexpr int64_t DEFAULT_MIN_TIME_MS{10};
 25  /** Priority level default value, run "all" priority levels */
 26  static const std::string DEFAULT_PRIORITY{"all"};
 27  
 28  static void SetupBenchArgs(ArgsManager& argsman)
 29  {
 30      SetupHelpOptions(argsman);
 31      SetupCommonTestArgs(argsman);
 32  
 33      argsman.AddArg("-asymptote=<n1,n2,n3,...>", "Test asymptotic growth of the runtime of an algorithm, if supported by the benchmark", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 34      argsman.AddArg("-filter=<regex>", strprintf("Regular expression filter to select benchmark by name (default: %s)", DEFAULT_BENCH_FILTER), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 35      argsman.AddArg("-list", "List benchmarks without executing them", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 36      argsman.AddArg("-min-time=<milliseconds>", strprintf("Minimum runtime per benchmark, in milliseconds (default: %d)", DEFAULT_MIN_TIME_MS), ArgsManager::ALLOW_ANY | ArgsManager::DISALLOW_NEGATION, OptionsCategory::OPTIONS);
 37      argsman.AddArg("-output-csv=<output.csv>", "Generate CSV file with the most important benchmark results", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 38      argsman.AddArg("-output-json=<output.json>", "Generate JSON file with all benchmark results", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 39      argsman.AddArg("-sanity-check", "Run benchmarks for only one iteration with no output", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 40      argsman.AddArg("-priority-level=<l1,l2,l3>", strprintf("Run benchmarks of one or multiple priority level(s) (%s), default: '%s'",
 41                                                             benchmark::ListPriorities(), DEFAULT_PRIORITY), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 42  }
 43  
 44  // parses a comma separated list like "10,20,30,50"
 45  static std::vector<double> parseAsymptote(const std::string& str) {
 46      std::stringstream ss(str);
 47      std::vector<double> numbers;
 48      double d;
 49      char c;
 50      while (ss >> d) {
 51          numbers.push_back(d);
 52          ss >> c;
 53      }
 54      return numbers;
 55  }
 56  
 57  static uint8_t parsePriorityLevel(const std::string& str) {
 58      uint8_t levels{0};
 59      for (const auto& level: SplitString(str, ',')) {
 60          levels |= benchmark::StringToPriority(level);
 61      }
 62      return levels;
 63  }
 64  
 65  static std::vector<std::string> parseTestSetupArgs(const ArgsManager& argsman)
 66  {
 67      // Parses unit test framework arguments supported by the benchmark framework.
 68      std::vector<std::string> args;
 69      static std::vector<std::string> AVAILABLE_ARGS = {"-testdatadir"};
 70      for (const std::string& arg_name : AVAILABLE_ARGS) {
 71          auto op_arg = argsman.GetArg(arg_name);
 72          if (op_arg) args.emplace_back(strprintf("%s=%s", arg_name, *op_arg));
 73      }
 74      return args;
 75  }
 76  
 77  int main(int argc, char** argv)
 78  {
 79      ArgsManager argsman;
 80      SetupBenchArgs(argsman);
 81      SHA256AutoDetect();
 82      std::string error;
 83      if (!argsman.ParseParameters(argc, argv, error)) {
 84          tfm::format(std::cerr, "Error parsing command line arguments: %s\n", error);
 85          return EXIT_FAILURE;
 86      }
 87  
 88      if (HelpRequested(argsman)) {
 89          std::cout << "Usage:  bench_bitcoin [options]\n"
 90                       "\n"
 91                    << argsman.GetHelpMessage()
 92                    << "Description:\n"
 93                       "\n"
 94                       "  bench_bitcoin executes microbenchmarks. The quality of the benchmark results\n"
 95                       "  highly depend on the stability of the machine. It can sometimes be difficult\n"
 96                       "  to get stable, repeatable results, so here are a few tips:\n"
 97                       "\n"
 98                       "  * Use pyperf [1] to disable frequency scaling, turbo boost etc. For best\n"
 99                       "    results, use CPU pinning and CPU isolation (see [2]).\n"
100                       "\n"
101                       "  * Each call of run() should do exactly the same work. E.g. inserting into\n"
102                       "    a std::vector doesn't do that as it will reallocate on certain calls. Make\n"
103                       "    sure each run has exactly the same preconditions.\n"
104                       "\n"
105                       "  * If results are still not reliable, increase runtime with e.g.\n"
106                       "    -min-time=5000 to let a benchmark run for at least 5 seconds.\n"
107                       "\n"
108                       "  * bench_bitcoin uses nanobench [3] for which there is extensive\n"
109                       "    documentation available online.\n"
110                       "\n"
111                       "Environment Variables:\n"
112                       "\n"
113                       "  To attach a profiler you can run a benchmark in endless mode. This can be\n"
114                       "  done with the environment variable NANOBENCH_ENDLESS. E.g. like so:\n"
115                       "\n"
116                       "    NANOBENCH_ENDLESS=MuHash ./bench_bitcoin -filter=MuHash\n"
117                       "\n"
118                       "  In rare cases it can be useful to suppress stability warnings. This can be\n"
119                       "  done with the environment variable NANOBENCH_SUPPRESS_WARNINGS, e.g:\n"
120                       "\n"
121                       "    NANOBENCH_SUPPRESS_WARNINGS=1 ./bench_bitcoin\n"
122                       "\n"
123                       "Notes:\n"
124                       "\n"
125                       "  1. pyperf\n"
126                       "     https://github.com/psf/pyperf\n"
127                       "\n"
128                       "  2. CPU pinning & isolation\n"
129                       "     https://pyperf.readthedocs.io/en/latest/system.html\n"
130                       "\n"
131                       "  3. nanobench\n"
132                       "     https://github.com/martinus/nanobench\n"
133                       "\n";
134  
135          return EXIT_SUCCESS;
136      }
137  
138      try {
139          benchmark::Args args;
140          args.asymptote = parseAsymptote(argsman.GetArg("-asymptote", ""));
141          args.is_list_only = argsman.GetBoolArg("-list", false);
142          args.min_time = std::chrono::milliseconds(argsman.GetIntArg("-min-time", DEFAULT_MIN_TIME_MS));
143          args.output_csv = argsman.GetPathArg("-output-csv");
144          args.output_json = argsman.GetPathArg("-output-json");
145          args.regex_filter = argsman.GetArg("-filter", DEFAULT_BENCH_FILTER);
146          args.sanity_check = argsman.GetBoolArg("-sanity-check", false);
147          args.priority = parsePriorityLevel(argsman.GetArg("-priority-level", DEFAULT_PRIORITY));
148          args.setup_args = parseTestSetupArgs(argsman);
149  
150          benchmark::BenchRunner::RunAll(args);
151  
152          return EXIT_SUCCESS;
153      } catch (const std::exception& e) {
154          tfm::format(std::cerr, "Error: %s\n", e.what());
155          return EXIT_FAILURE;
156      }
157  }