import gradio as gr from transformers import AutoModelForCausalLM, AutoProcessor from PIL import Image import torch model_id = "skt/A.X-4.0-VL-Light" model = AutoModelForCausalLM.from_pretrained( model_id, trust_remote_code=True, torch_dtype=torch.bfloat16 ).to("cuda") processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True) def ask_image_question(image, text): messages = [{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": text}]}] inputs = processor(images=[image], conversations=[messages], return_tensors="pt").to("cuda") generation_kwargs = { "max_new_tokens": 256, "top_p": 0.8, "temperature": 0.5, "top_k": 20, "repetition_penalty": 1.05, "do_sample": True, } generated_ids = model.generate(**inputs, **generation_kwargs) output = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] return output gr.Interface(fn=ask_image_question, inputs=[gr.Image(type="pil"), gr.Textbox(label="질문")], outputs="text").launch()