ariG23498 HF Staff commited on
Commit
6ac98bb
·
verified ·
1 Parent(s): 9daf9c2

Upload HuggingFaceTB_SmolLM3-3B_0.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. HuggingFaceTB_SmolLM3-3B_0.py +89 -0
HuggingFaceTB_SmolLM3-3B_0.py CHANGED
@@ -21,6 +21,95 @@ try:
21
  tokenizer = AutoTokenizer.from_pretrained(model_id)
22
 
23
  pipe = pipeline("text-generation", model=model_id, tokenizer=tokenizer)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  with open('HuggingFaceTB_SmolLM3-3B_0.txt', 'w') as f:
25
  f.write('Everything was good in HuggingFaceTB_SmolLM3-3B_0.txt')
26
  except Exception as e:
 
21
  tokenizer = AutoTokenizer.from_pretrained(model_id)
22
 
23
  pipe = pipeline("text-generation", model=model_id, tokenizer=tokenizer)
24
+
25
+ messages = [
26
+ {"role": "user", "content": "Give me a brief explanation of gravity in simple terms."},
27
+ ]
28
+ pipe(messages)
29
+
30
+ messages = [
31
+ {"role": "system", "content": "/no_think"},
32
+ {"role": "user", "content": "Give me a brief explanation of gravity in simple terms."},
33
+ ]
34
+ pipe(messages)
35
+
36
+ from transformers import AutoModelForCausalLM, AutoTokenizer
37
+
38
+ model_name = "HuggingFaceTB/SmolLM3-3B"
39
+ device = "cuda" # for GPU usage or "cpu" for CPU usage
40
+
41
+ # load the tokenizer and the model
42
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
43
+ model = AutoModelForCausalLM.from_pretrained(
44
+ model_name,
45
+ ).to(device)
46
+
47
+ # prepare the model input
48
+ prompt = "Give me a brief explanation of gravity in simple terms."
49
+ messages_think = [
50
+ {"role": "user", "content": prompt}
51
+ ]
52
+
53
+ text = tokenizer.apply_chat_template(
54
+ messages_think,
55
+ tokenize=False,
56
+ add_generation_prompt=True,
57
+ )
58
+ model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
59
+
60
+ # Generate the output
61
+ generated_ids = model.generate(**model_inputs, max_new_tokens=32768)
62
+
63
+ # Get and decode the output
64
+ output_ids = generated_ids[0][len(model_inputs.input_ids[0]) :]
65
+ print(tokenizer.decode(output_ids, skip_special_tokens=True))
66
+
67
+ prompt = "Give me a brief explanation of gravity in simple terms."
68
+ messages = [
69
+ {"role": "system", "content": "/no_think"},
70
+ {"role": "user", "content": prompt}
71
+ ]
72
+
73
+ text = tokenizer.apply_chat_template(
74
+ messages,
75
+ tokenize=False,
76
+ add_generation_prompt=True,
77
+ )
78
+
79
+ model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
80
+
81
+ # Generate the output
82
+ generated_ids = model.generate(**model_inputs, max_new_tokens=32768)
83
+
84
+ # Get and decode the output
85
+ output_ids = generated_ids[0][len(model_inputs.input_ids[0]) :]
86
+ print(tokenizer.decode(output_ids, skip_special_tokens=True))
87
+
88
+ tools = [
89
+ {
90
+ "name": "get_weather",
91
+ "description": "Get the weather in a city",
92
+ "parameters": {"type": "object", "properties": {"city": {"type": "string", "description": "The city to get the weather for"}}}}
93
+ ]
94
+
95
+ messages = [
96
+ {
97
+ "role": "user",
98
+ "content": "Hello! How is the weather today in Copenhagen?"
99
+ }
100
+ ]
101
+
102
+ inputs = tokenizer.apply_chat_template(
103
+ messages,
104
+ enable_thinking=False, # True works as well, your choice!
105
+ xml_tools=tools,
106
+ add_generation_prompt=True,
107
+ tokenize=True,
108
+ return_tensors="pt"
109
+ ).to(model.device)
110
+
111
+ outputs = model.generate(inputs)
112
+ print(tokenizer.decode(outputs[0]))
113
  with open('HuggingFaceTB_SmolLM3-3B_0.txt', 'w') as f:
114
  f.write('Everything was good in HuggingFaceTB_SmolLM3-3B_0.txt')
115
  except Exception as e: