import gradio as gr from transformers import CLIPProcessor, CLIPModel from PIL import Image # Load FashionCLIP (Patrickjohncyh’s version) model = CLIPModel.from_pretrained("patrickjohncyh/fashion-clip") processor = CLIPProcessor.from_pretrained("patrickjohncyh/fashion-clip") def analyze_fashion(image): inputs = processor(images=image, return_tensors="pt") outputs = model.get_image_features(**inputs) # Convert tensor to list for JSON response return {"features": outputs.detach().numpy().tolist()} demo = gr.Interface(fn=analyze_fashion, inputs=gr.Image(type="pil"), outputs="json") demo.launch()