import gradio as gr import torch from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig from peft import PeftModel MODEL_ADAPTER_ID = "vivekjada/medical-o1-llm-sft-lora" BASE_ID = "unsloth/Llama-3.1-8B-Instruct-unsloth-bnb-4bit" bnb = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16) base = AutoModelForCausalLM.from_pretrained(BASE_ID, quantization_config=bnb, device_map="auto") model = PeftModel.from_pretrained(base, MODEL_ADAPTER_ID) tok = AutoTokenizer.from_pretrained(BASE_ID) SYSTEM = ("You are a careful medical assistant. You provide educational information—not medical advice. " "Reason step-by-step and end with a concise final answer.") def respond(question, max_new_tokens, temperature, top_p): prompt = (f"<|system|>\n{SYSTEM}\n<|end|>\n" f"<|user|>\n{question}\n<|end|>\n" f"<|assistant|>\n") inputs = tok(prompt, return_tensors="pt").to(model.device) with torch.no_grad(): out = model.generate(**inputs, max_new_tokens=int(max_new_tokens), temperature=float(temperature), top_p=float(top_p), do_sample=True, eos_token_id=tok.eos_token_id) text = tok.decode(out[0], skip_special_tokens=True) reply = text.split("<|assistant|>")[-1].strip() return ("⚠️ **Disclaimer:** This demo is for educational purposes only and is **not** medical advice.\n\n" + reply) demo = gr.Interface( fn=respond, inputs=[ gr.Textbox(label="Enter a medical question", lines=6, placeholder="e.g., How to interpret borderline TSH in a 1st-trimester patient?"), gr.Slider(64, 1024, value=384, step=32, label="Max new tokens"), gr.Slider(0.0, 1.0, value=0.2, step=0.05, label="Temperature"), gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="Top-p"), ], outputs=gr.Markdown(label="Model response"), title="Medical o1 Reasoning (SFT, LoRA)", description="Llama-3.1-8B (Unsloth 4-bit) fine-tuned on medical o1 reasoning. Educational only." ) if __name__ == "__main__": demo.launch()