/ examples / Qwen2.5-Coder-fim.py
Qwen2.5-Coder-fim.py
 1  from transformers import AutoTokenizer, AutoModelForCausalLM
 2  # load model
 3  device = "cuda" # the device to load the model onto
 4  
 5  tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-32B")
 6  model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-Coder-32B", device_map="auto").eval()
 7  
 8  input_text = """<|fim_prefix|>def quicksort(arr):
 9      if len(arr) <= 1:
10          return arr
11      pivot = arr[len(arr) // 2]
12      <|fim_suffix|>
13      middle = [x for x in arr if x == pivot]
14      right = [x for x in arr if x > pivot]
15      return quicksort(left) + middle + quicksort(right)<|fim_middle|>"""
16  
17  model_inputs = tokenizer([input_text], return_tensors="pt").to(device)
18  eos_token_ids = [151664, 151662, 151659, 151660, 151661, 151662, 151663, 151664, 151645, 151643]
19  
20  # Use `max_new_tokens` to control the maximum output length.
21  generated_ids = model.generate(model_inputs.input_ids, max_new_tokens=512, do_sample=False, eos_token_id=eos_token_ids)[0]
22  # The generated_ids include prompt_ids, we only need to decode the tokens after prompt_ids.
23  output_text = tokenizer.decode(generated_ids[len(model_inputs.input_ids[0]):], skip_special_tokens=True)
24  
25  print(f"Prompt: {input_text}\n\nGenerated text: {output_text}")