z-dickson commited on
Commit
7a4929a
·
verified ·
1 Parent(s): d6aea7c

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +8 -22
README.md CHANGED
@@ -168,33 +168,18 @@ A sequence-to-sequence model fine-tuned to extract structured event summaries fr
168
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
169
  import torch, json
170
 
171
- model_id = "your-namespace/your-model-repo"
172
  tok = AutoTokenizer.from_pretrained(model_id)
173
  model = AutoModelForSeq2SeqLM.from_pretrained(model_id).to("cuda" if torch.cuda.is_available() else "cpu")
174
- model.eval()
175
 
176
- text = "Following the devastating floods in Slovenia, our party calls for stronger climate resilience measures."
177
- enc = tok(text, return_tensors="pt", truncation=True, max_length=1024).to(model.device)
178
- gen = model.generate(**enc, max_new_tokens=128, num_beams=4, do_sample=False)
179
- out = tok.decode(gen[0], skip_special_tokens=True)
180
-
181
- # Safe parse
182
- def parse_json(s):
183
- try: return json.loads(s)
184
- except Exception:
185
- if "{" in s and "}" in s:
186
- s2 = s[s.find("{"): s.rfind("}")+1]
187
- try: return json.loads(s2)
188
- except Exception: pass
189
- return None
190
-
191
- print(out)
192
- print(parse_json(out))
193
- ```
194
 
195
- ## Expected output example
 
 
 
 
 
196
 
197
- ```{json}
198
  {
199
  "response_to_event": "Yes",
200
  "event_name": "Floods in Slovenia",
@@ -207,3 +192,4 @@ print(parse_json(out))
207
 
208
 
209
 
 
 
168
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
169
  import torch, json
170
 
171
+ model_id = "https://huggingface.co/z-dickson/BART_political_event_detection"
172
  tok = AutoTokenizer.from_pretrained(model_id)
173
  model = AutoModelForSeq2SeqLM.from_pretrained(model_id).to("cuda" if torch.cuda.is_available() else "cpu")
 
174
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
 
176
+ text = "Following the devastating floods in Slovenia, our party calls for stronger climate resilience measures."
177
+ inputs = tok(text, return_tensors="pt").to(model.device)
178
+ outputs = model.generate(**inputs, max_new_tokens=128)
179
+ response = tok.decode(outputs[0], skip_special_tokens=True)
180
+ response_json = json.loads(response)
181
+ response_json
182
 
 
183
  {
184
  "response_to_event": "Yes",
185
  "event_name": "Floods in Slovenia",
 
192
 
193
 
194
 
195
+