File size: 1,324 Bytes
b670dda |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
# /// script
# requires-python = ">=3.12"
# dependencies = [
# "transformers",
# "torch",
# ]
# ///
try:
# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("LGAI-EXAONE/EXAONE-4.0-32B")
model = AutoModelForCausalLM.from_pretrained("LGAI-EXAONE/EXAONE-4.0-32B")
messages = [
{"role": "user", "content": "Who are you?"},
]
inputs = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
).to(model.device)
outputs = model.generate(**inputs, max_new_tokens=40)
print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
with open('LGAI-EXAONE_EXAONE-4.0-32B_1.txt', 'w') as f:
f.write('Everything was good in LGAI-EXAONE_EXAONE-4.0-32B_1.txt')
except Exception as e:
with open('LGAI-EXAONE_EXAONE-4.0-32B_1.txt', 'w') as f:
import traceback
traceback.print_exc(file=f)
finally:
from huggingface_hub import upload_file
upload_file(
path_or_fileobj='LGAI-EXAONE_EXAONE-4.0-32B_1.txt',
repo_id='model-metadata/custom_code_execution_files',
path_in_repo='LGAI-EXAONE_EXAONE-4.0-32B_1.txt',
repo_type='dataset',
) |