parser.py
  1  import os
  2  import torch
  3  import argparse
  4  
  5  from lcb_runner.utils.scenarios import Scenario
  6  
  7  
  8  def get_args():
  9      parser = argparse.ArgumentParser()
 10      parser.add_argument(
 11          "--model",
 12          type=str,
 13          default="gpt-3.5-turbo-0301",
 14          help="Name of the model to use matching `lm_styles.py`",
 15      )
 16      parser.add_argument(
 17          "--model_path",
 18          type=str,
 19          default = None,
 20          help="Name of the model to use matching `lm_styles.py`",
 21      )
 22      parser.add_argument(
 23          "--output_name",
 24          type=str,
 25          default = None,
 26          help="Name of the model to use matching `lm_styles.py`",
 27      )
 28      parser.add_argument(
 29          "--scenario",
 30          type=Scenario,
 31          default=Scenario.codegeneration,
 32          help="Type of scenario to run",
 33      )
 34      parser.add_argument(
 35          "--n", type=int, default=10, help="Number of samples to generate"
 36      )
 37      parser.add_argument(
 38          "--temperature", type=float, default=0.2, help="Temperature for sampling"
 39      )
 40      parser.add_argument("--top_p", type=float, default=0.95, help="Top p for sampling")
 41      parser.add_argument(
 42          "--max_tokens", type=int, default=1200, help="Max tokens for sampling"
 43      )
 44      parser.add_argument(
 45          "--multiprocess",
 46          default=0,
 47          type=int,
 48          help="Number of processes to use for generation (vllm runs do not use this)",
 49      )
 50      parser.add_argument(
 51          "--stop",
 52          default="###",
 53          type=str,
 54          help="Stop token (use `,` to separate multiple tokens)",
 55      )
 56      parser.add_argument("--continue_existing", action="store_true")
 57      parser.add_argument(
 58          "--use_cache", action="store_true", help="Use cache for generation"
 59      )
 60      parser.add_argument("--debug", action="store_true", help="Debug mode")
 61      parser.add_argument("--evaluate", action="store_true", help="Evaluate the results")
 62      parser.add_argument(
 63          "--num_process_evaluate",
 64          type=int,
 65          default=12,
 66          help="Number of processes to use for evaluation",
 67      )
 68      parser.add_argument(
 69          "--output_dir",
 70          type=str,
 71          default = None,
 72          help="Number of processes to use for evaluation",
 73      )
 74  
 75      parser.add_argument("--timeout", type=int, default=18, help="Timeout for evaluation")
 76      parser.add_argument(
 77          "--tensor_parallel_size",
 78          type=int,
 79          default=-1,
 80          help="Tensor parallel size for vllm",
 81      )
 82      parser.add_argument("--dtype", type=str, default="bfloat16", help="Dtype for vllm")
 83  
 84      args = parser.parse_args()
 85  
 86      args.stop = args.stop.split(",")
 87  
 88      if args.tensor_parallel_size == -1:
 89          args.tensor_parallel_size = torch.cuda.device_count()
 90  
 91      if args.multiprocess == -1:
 92          args.multiprocess = os.cpu_count()
 93  
 94      return args
 95  
 96  
 97  def test():
 98      args = get_args()
 99      print(args)
100  
101  
102  if __name__ == "__main__":
103      test()