/ thirdparty / hyperfine / src / cli.rs
cli.rs
  1  use std::ffi::OsString;
  2  
  3  use clap::{
  4      builder::NonEmptyStringValueParser, crate_version, Arg, ArgAction, ArgMatches, Command,
  5      ValueHint,
  6  };
  7  
  8  pub fn get_cli_arguments<'a, I, T>(args: I) -> ArgMatches
  9  where
 10      I: IntoIterator<Item = T>,
 11      T: Into<OsString> + Clone + 'a,
 12  {
 13      let command = build_command();
 14      command.get_matches_from(args)
 15  }
 16  
 17  /// Build the clap command for parsing command line arguments
 18  fn build_command() -> Command {
 19      Command::new("hyperfine")
 20          .version(crate_version!())
 21          .next_line_help(true)
 22          .hide_possible_values(true)
 23          .about("A command-line benchmarking tool.")
 24          .help_expected(true)
 25          .max_term_width(80)
 26          .arg(
 27              Arg::new("command")
 28                  .help("The command to benchmark. This can be the name of an executable, a command \
 29                         line like \"grep -i todo\" or a shell command like \"sleep 0.5 && echo test\". \
 30                         The latter is only available if the shell is not explicitly disabled via \
 31                         '--shell=none'. If multiple commands are given, hyperfine will show a \
 32                         comparison of the respective runtimes.")
 33                  .required(true)
 34                  .action(ArgAction::Append)
 35                  .value_hint(ValueHint::CommandString)
 36                  .value_parser(NonEmptyStringValueParser::new()),
 37          )
 38          .arg(
 39              Arg::new("warmup")
 40                  .long("warmup")
 41                  .short('w')
 42                  .value_name("NUM")
 43                  .action(ArgAction::Set)
 44                  .help(
 45                      "Perform NUM warmup runs before the actual benchmark. This can be used \
 46                       to fill (disk) caches for I/O-heavy programs.",
 47                  ),
 48          )
 49          .arg(
 50              Arg::new("min-runs")
 51                  .long("min-runs")
 52                  .short('m')
 53                  .action(ArgAction::Set)
 54                  .value_name("NUM")
 55                  .help("Perform at least NUM runs for each command (default: 10)."),
 56          )
 57          .arg(
 58              Arg::new("max-runs")
 59                  .long("max-runs")
 60                  .short('M')
 61                  .action(ArgAction::Set)
 62                  .value_name("NUM")
 63                  .help("Perform at most NUM runs for each command. By default, there is no limit."),
 64          )
 65          .arg(
 66              Arg::new("runs")
 67                  .long("runs")
 68                  .conflicts_with_all(["max-runs", "min-runs"])
 69                  .short('r')
 70                  .action(ArgAction::Set)
 71                  .value_name("NUM")
 72                  .help("Perform exactly NUM runs for each command. If this option is not specified, \
 73                         hyperfine automatically determines the number of runs."),
 74          )
 75          .arg(
 76              Arg::new("setup")
 77                  .long("setup")
 78                  .short('s')
 79                  .action(ArgAction::Set)
 80                  .value_name("CMD")
 81                  .value_hint(ValueHint::CommandString)
 82                  .help(
 83                      "Execute CMD before each set of timing runs. This is useful for \
 84                       compiling your software with the provided parameters, or to do any \
 85                       other work that should happen once before a series of benchmark runs, \
 86                       not every time as would happen with the --prepare option."
 87                  ),
 88          )
 89          .arg(
 90              Arg::new("reference")
 91                  .long("reference")
 92                  .action(ArgAction::Set)
 93                  .value_name("CMD")
 94                  .help(
 95                      "The reference command for the relative comparison of results. \
 96                      If this is unset, results are compared with the fastest command as reference."
 97                  )
 98          )
 99          .arg(
100              Arg::new("reference-name")
101                  .long("reference-name")
102                  .action(ArgAction::Set)
103                  .value_name("CMD")
104                  .help("Give a meaningful name to the reference command.")
105                  .requires("reference")
106          )
107          .arg(
108              Arg::new("prepare")
109                  .long("prepare")
110                  .short('p')
111                  .action(ArgAction::Append)
112                  .num_args(1)
113                  .value_name("CMD")
114                  .value_hint(ValueHint::CommandString)
115                  .help(
116                      "Execute CMD before each timing run. This is useful for \
117                       clearing disk caches, for example.\nThe --prepare option can \
118                       be specified once for all commands or multiple times, once for \
119                       each command. In the latter case, each preparation command will \
120                       be run prior to the corresponding benchmark command.",
121                  ),
122          )
123          .arg(
124              Arg::new("conclude")
125                  .long("conclude")
126                  .short('C')
127                  .action(ArgAction::Append)
128                  .num_args(1)
129                  .value_name("CMD")
130                  .value_hint(ValueHint::CommandString)
131                  .help(
132                      "Execute CMD after each timing run. This is useful for killing \
133                       long-running processes started (e.g. a web server started in --prepare), \
134                       for example.\nThe --conclude option can be specified once for all \
135                       commands or multiple times, once for each command. In the latter case, \
136                       each conclude command will be run after the corresponding benchmark \
137                       command.",
138                  ),
139          )
140          .arg(
141              Arg::new("cleanup")
142                  .long("cleanup")
143                  .short('c')
144                  .action(ArgAction::Set)
145                  .value_name("CMD")
146                  .value_hint(ValueHint::CommandString)
147                  .help(
148                      "Execute CMD after the completion of all benchmarking \
149                       runs for each individual command to be benchmarked. \
150                       This is useful if the commands to be benchmarked produce \
151                       artifacts that need to be cleaned up."
152                  ),
153          )
154          .arg(
155              Arg::new("parameter-scan")
156                  .long("parameter-scan")
157                  .short('P')
158                  .action(ArgAction::Set)
159                  .allow_hyphen_values(true)
160                  .value_names(["VAR", "MIN", "MAX"])
161                  .help(
162                      "Perform benchmark runs for each value in the range MIN..MAX. Replaces the \
163                       string '{VAR}' in each command by the current parameter value.\n\n  \
164                       Example:  hyperfine -P threads 1 8 'make -j {threads}'\n\n\
165                       This performs benchmarks for 'make -j 1', 'make -j 2', …, 'make -j 8'.\n\n\
166                       To have the value increase following different patterns, use shell arithmetics.\n\n  \
167                       Example: hyperfine -P size 0 3 'sleep $((2**{size}))'\n\n\
168                       This performs benchmarks with power of 2 increases: 'sleep 1', 'sleep 2', 'sleep 4', …\n\
169                       The exact syntax may vary depending on your shell and OS."
170                  ),
171          )
172          .arg(
173              Arg::new("parameter-step-size")
174                  .long("parameter-step-size")
175                  .short('D')
176                  .action(ArgAction::Set)
177                  .value_names(["DELTA"])
178                  .requires("parameter-scan")
179                  .help(
180                      "This argument requires --parameter-scan to be specified as well. \
181                       Traverse the range MIN..MAX in steps of DELTA.\n\n  \
182                       Example:  hyperfine -P delay 0.3 0.7 -D 0.2 'sleep {delay}'\n\n\
183                       This performs benchmarks for 'sleep 0.3', 'sleep 0.5' and 'sleep 0.7'.",
184                  ),
185          )
186          .arg(
187              Arg::new("parameter-list")
188                  .long("parameter-list")
189                  .short('L')
190                  .action(ArgAction::Append)
191                  .allow_hyphen_values(true)
192                  .value_names(["VAR", "VALUES"])
193                  .conflicts_with_all(["parameter-scan", "parameter-step-size"])
194                  .help(
195                      "Perform benchmark runs for each value in the comma-separated list VALUES. \
196                       Replaces the string '{VAR}' in each command by the current parameter value\
197                       .\n\nExample:  hyperfine -L compiler gcc,clang '{compiler} -O2 main.cpp'\n\n\
198                       This performs benchmarks for 'gcc -O2 main.cpp' and 'clang -O2 main.cpp'.\n\n\
199                       The option can be specified multiple times to run benchmarks for all \
200                       possible parameter combinations.\n"
201                  ),
202          )
203          .arg(
204              Arg::new("shell")
205                  .long("shell")
206                  .short('S')
207                  .action(ArgAction::Set)
208                  .value_name("SHELL")
209                  .overrides_with("shell")
210                  .value_hint(ValueHint::CommandString)
211                  .help("Set the shell to use for executing benchmarked commands. This can be the \
212                         name or the path to the shell executable, or a full command line \
213                         like \"bash --norc\". It can also be set to \"default\" to explicitly select \
214                         the default shell on this platform. Finally, this can also be set to \
215                         \"none\" to disable the shell. In this case, commands will be executed \
216                         directly. They can still have arguments, but more complex things like \
217                         \"sleep 0.1; sleep 0.2\" are not possible without a shell.")
218          )
219          .arg(
220              Arg::new("no-shell")
221                  .short('N')
222                  .action(ArgAction::SetTrue)
223                  .conflicts_with_all(["shell", "debug-mode"])
224                  .help("An alias for '--shell=none'.")
225          )
226          .arg(
227              Arg::new("ignore-failure")
228                  .long("ignore-failure")
229                  .action(ArgAction::Set)
230                  .value_name("MODE")
231                  .num_args(0..=1)
232                  .default_missing_value("all-non-zero")
233                  .require_equals(true)
234                  .short('i')
235                  .help("Ignore failures of the benchmarked programs. Without a value or with \
236                         'all-non-zero', all non-zero exit codes are ignored. You can also provide \
237                         a comma-separated list of exit codes to ignore (e.g., --ignore-failure=1,2)."),
238          )
239          .arg(
240              Arg::new("style")
241                  .long("style")
242                  .action(ArgAction::Set)
243                  .value_name("TYPE")
244                  .value_parser(["auto", "basic", "full", "nocolor", "color", "none"])
245                  .help(
246                      "Set output style type (default: auto). Set this to 'basic' to disable output \
247                       coloring and interactive elements. Set it to 'full' to enable all effects \
248                       even if no interactive terminal was detected. Set this to 'nocolor' to \
249                       keep the interactive output without any colors. Set this to 'color' to keep \
250                       the colors without any interactive output. Set this to 'none' to disable all \
251                       the output of the tool.",
252                  ),
253          )
254          .arg(
255              Arg::new("sort")
256              .long("sort")
257              .action(ArgAction::Set)
258              .value_name("METHOD")
259              .value_parser(["auto", "command", "mean-time"])
260              .default_value("auto")
261              .hide_default_value(true)
262              .help(
263                  "Specify the sort order of the speed comparison summary and the exported tables for \
264                   markup formats (Markdown, AsciiDoc, org-mode):\n  \
265                     * 'auto' (default): the speed comparison will be ordered by time and\n    \
266                       the markup tables will be ordered by command (input order).\n  \
267                     * 'command': order benchmarks in the way they were specified\n  \
268                     * 'mean-time': order benchmarks by mean runtime\n"
269              ),
270          )
271          .arg(
272              Arg::new("time-unit")
273                  .long("time-unit")
274                  .short('u')
275                  .action(ArgAction::Set)
276                  .value_name("UNIT")
277                  .value_parser(["microsecond", "millisecond", "second"])
278                  .help("Set the time unit to be used. Possible values: microsecond, millisecond, second. \
279                         If the option is not given, the time unit is determined automatically. \
280                         This option affects the standard output as well as all export formats except for CSV and JSON."),
281          )
282          .arg(
283              Arg::new("export-asciidoc")
284                  .long("export-asciidoc")
285                  .action(ArgAction::Set)
286                  .value_name("FILE")
287                  .value_hint(ValueHint::FilePath)
288                  .help("Export the timing summary statistics as an AsciiDoc table to the given FILE. \
289                         The output time unit can be changed using the --time-unit option."),
290          )
291          .arg(
292              Arg::new("export-csv")
293                  .long("export-csv")
294                  .action(ArgAction::Set)
295                  .value_name("FILE")
296                  .value_hint(ValueHint::FilePath)
297                  .help("Export the timing summary statistics as CSV to the given FILE. If you need \
298                         the timing results for each individual run, use the JSON export format. \
299                         The output time unit is always seconds."),
300          )
301          .arg(
302              Arg::new("export-json")
303                  .long("export-json")
304                  .action(ArgAction::Set)
305                  .value_name("FILE")
306                  .value_hint(ValueHint::FilePath)
307                  .help("Export the timing summary statistics and timings of individual runs as JSON to the given FILE. \
308                         The output time unit is always seconds"),
309          )
310          .arg(
311              Arg::new("export-markdown")
312                  .long("export-markdown")
313                  .action(ArgAction::Set)
314                  .value_name("FILE")
315                  .value_hint(ValueHint::FilePath)
316                  .help("Export the timing summary statistics as a Markdown table to the given FILE. \
317                         The output time unit can be changed using the --time-unit option."),
318          )
319          .arg(
320              Arg::new("export-orgmode")
321                  .long("export-orgmode")
322                  .action(ArgAction::Set)
323                  .value_name("FILE")
324                  .value_hint(ValueHint::FilePath)
325                  .help("Export the timing summary statistics as an Emacs org-mode table to the given FILE. \
326                         The output time unit can be changed using the --time-unit option."),
327          )
328          .arg(
329              Arg::new("show-output")
330                  .long("show-output")
331                  .action(ArgAction::SetTrue)
332                  .conflicts_with("style")
333                  .help(
334                      "Print the stdout and stderr of the benchmark instead of suppressing it. \
335                       This will increase the time it takes for benchmarks to run, \
336                       so it should only be used for debugging purposes or \
337                       when trying to benchmark output speed.",
338                  ),
339          )
340          .arg(
341              Arg::new("output")
342                  .long("output")
343                  .conflicts_with("show-output")
344                  .action(ArgAction::Append)
345                  .value_name("WHERE")
346                  .help(
347                      "Control where the output of the benchmark is redirected. Note \
348                       that some programs like 'grep' detect when standard output is \
349                       /dev/null and apply certain optimizations. To avoid that, consider \
350                       using '--output=pipe'.\n\
351                       \n\
352                       <WHERE> can be:\n\
353                       \n  \
354                         null:     Redirect output to /dev/null (the default).\n\
355                       \n  \
356                         pipe:     Feed the output through a pipe before discarding it.\n\
357                       \n  \
358                         inherit:  Don't redirect the output at all (same as '--show-output').\n\
359                       \n  \
360                         <FILE>:   Write the output to the given file.\n\n\
361                      This option can be specified once for all commands or multiple times, once for \
362                      each command. Note: If you want to log the output of each and every iteration, \
363                      you can use a shell redirection and the '$HYPERFINE_ITERATION' environment variable:\n    \
364                      hyperfine 'my-command > output-${HYPERFINE_ITERATION}.log'\n\n",
365                  ),
366          )
367          .arg(
368              Arg::new("input")
369                  .long("input")
370                  .action(ArgAction::Set)
371                  .num_args(1)
372                  .value_name("WHERE")
373                  .help("Control where the input of the benchmark comes from.\n\
374                         \n\
375                         <WHERE> can be:\n\
376                         \n  \
377                           null:     Read from /dev/null (the default).\n\
378                         \n  \
379                           <FILE>:   Read the input from the given file."),
380          )
381          .arg(
382              Arg::new("command-name")
383                  .long("command-name")
384                  .short('n')
385                  .action(ArgAction::Append)
386                  .num_args(1)
387                  .value_name("NAME")
388                  .help("Give a meaningful name to a command. This can be specified multiple times \
389                         if several commands are benchmarked."),
390          )
391          // This option is hidden for now, as it is not yet clear yet if we want to 'stabilize' this,
392          // see discussion in https://github.com/sharkdp/hyperfine/issues/527
393          .arg(
394              Arg::new("min-benchmarking-time")
395              .long("min-benchmarking-time")
396              .action(ArgAction::Set)
397              .hide(true)
398              .help("Set the minimum time (in seconds) to run benchmarks. Note that the number of \
399                     benchmark runs is additionally influenced by the `--min-runs`, `--max-runs`, and \
400                     `--runs` option.")
401          )
402          .arg(
403              Arg::new("debug-mode")
404              .long("debug-mode")
405              .action(ArgAction::SetTrue)
406              .hide(true)
407              .help("Enable debug mode which does not actually run commands, but returns fake times when the command is 'sleep <time>'.")
408          )
409  }
410  
411  #[test]
412  fn verify_app() {
413      build_command().debug_assert();
414  }