from transformers import AutoModelForCausalLM, AutoTokenizer

MODEL_PATH = "THUDM/GLM-Z1-Rumination-32B-0414"

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH,cache_dir="./cache", trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, cache_dir="./cache", device_map="auto")

message = [{"role": "user", "content": "Let a, b be positive real numbers such that ab = a + b + 3. Determine the range of possible values for a + b."}]

inputs = tokenizer.apply_chat_template(
    message,
    return_tensors="pt",
    add_generation_prompt=True,
    return_dict=True,
).to(model.device)

generate_kwargs = {
    "input_ids": inputs["input_ids"],
    "attention_mask": inputs["attention_mask"],
    "temperature": 0.95,
    "top_p": 0.7,
    "do_sample": True,
}
out = model.generate(**generate_kwargs)
print(tokenizer.decode(out[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True))
