Spaces:
Runtime error
Runtime error
| from huggingface_hub import from_pretrained_keras | |
| from keras_cv import models | |
| import gradio as gr | |
| from tensorflow import keras | |
| from diffusers import StableDiffusionPipeline | |
| keras.mixed_precision.set_global_policy("mixed_float16") | |
| # prepare model | |
| resolution = 512 | |
| # checkpoint of the converted Stable Diffusion from KerasCV | |
| model_ckpt = "nielsgl/dreambooth-bored-ape" | |
| pipeline = StableDiffusionPipeline.from_pretrained(model_ckpt) | |
| pipeline.to("cuda") | |
| unique_id = "drawbayc" | |
| class_label = "monkey" | |
| prompt = f"A drawing of {unique_id} {class_label} as a cowboy" | |
| image = pipeline(prompt, num_inference_steps=50).images[0] | |
| # generate images | |
| def infer(prompt, negative_prompt, guidance_scale=10, num_inference_steps=50): | |
| neg = negative_prompt if negative_prompt else None | |
| imgs = [] | |
| while len(imgs) != 2: | |
| next_prompt = pipeline(prompt, negative_prompt=neg, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, num_images_per_prompt=5) | |
| for img, is_neg in zip(next_prompt.images, next_prompt.nsfw_content_detected): | |
| if not is_neg: | |
| imgs.append(img) | |
| if len(imgs) == 2: | |
| break | |
| return imgs | |
| output = gr.Gallery(label="Outputs").style(grid=(1,2)) | |
| # customize interface | |
| title = "KerasCV Stable Diffusion Demo on images of Bored Apes." | |
| description = "This is a dreambooth model fine-tuned on images the NFT collection of the Bored Ape Yacht Club. To try it, input the concept with `drawbayc ape`." | |
| examples=[ | |
| ["A drawing of a drawbayc ape as a cowboy", "", 12, 50], | |
| ["A drawing of a drawbayc ape as a clown", "", 12, 50], | |
| ["A drawing of a drawbayc ape as a turtle", "", 12, 50], | |
| ] | |
| base_14 = "https://huggingface.co/nielsgl/dreambooth-bored-ape/resolve/main/" | |
| model_card_1 = f""" | |
| # KerasCV Stable Diffusion in Diffusers π§¨π€ | |
| # KerasCV Stable Diffusion in Diffusers π§¨π€ | |
| DreamBooth model for the `drawbayc ape` concept trained by nielsgl on the `nielsgl/bayc-tiny` dataset, images from this [Kaggle dataset](https://www.kaggle.com/datasets/stanleyjzheng/bored-apes-yacht-club). | |
| It can be used by modifying the `instance_prompt`: **a drawing of drawbayc ape** | |
| ## Description | |
| The Stable Diffusion V2 pipeline contained in the corresponding repository (`nielsgl/dreambooth-bored-ape`) was created using a modified version of [this Space](https://huggingface.co/spaces/sayakpaul/convert-kerascv-sd-diffusers) for StableDiffusionV2 from KerasCV. The purpose is to convert the KerasCV Stable Diffusion weights in a way that is compatible with [Diffusers](https://github.com/huggingface/diffusers). This allows users to fine-tune using KerasCV and use the fine-tuned weights in Diffusers taking advantage of its nifty features (like [schedulers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/schedulers), [fast attention](https://huggingface.co/docs/diffusers/optimization/fp16), etc.). | |
| This model was created as part of the Keras DreamBooth Sprint π₯. Visit the [organisation page](https://huggingface.co/keras-dreambooth) for instructions on how to take part! | |
| ## Demo | |
| """ | |
| model_card_2 = f""" | |
| ## Examples | |
| ### Stable Diffusion V2.1 | |
| ## Examples | |
| > A drawing of drawbayc monkey dressed as an astronaut | |
|  | |
| > A drawing of drawbayc monkey dressed as the pope | |
|  | |
| ## Usage with Stable Diffusion V2.1 | |
| ```python | |
| from diffusers import StableDiffusionPipeline | |
| pipeline = StableDiffusionPipeline.from_pretrained('nielsgl/dreambooth-bored-ape') | |
| image = pipeline().images[0] | |
| image | |
| ``` | |
| """ | |
| with gr.Blocks() as demo: | |
| with gr.Row(): | |
| gr.Markdown(model_card_1) | |
| with gr.Row(): | |
| with gr.Column(): | |
| prompt_pos = gr.Textbox(label="Positive Prompt", value="a drawing of drawbayc ape as an astronaut") | |
| prompt_neg = gr.Textbox(label="Negative Prompt", value="bad anatomy, blurry") | |
| prompt_gs = gr.Number(label='Guidance scale', value=12) | |
| prompt_steps = gr.Slider(label="Inference Steps",value=50) | |
| prompt_btn = gr.Button("Generate") | |
| with gr.Column(): | |
| output = gr.Gallery(label="Outputs").style(grid=(1,2)) | |
| prompt_btn.click(infer, inputs=[prompt_pos, prompt_neg, prompt_gs, prompt_steps], outputs=[output]) | |
| with gr.Row(): | |
| gr.Examples(examples, inputs=[prompt_pos, prompt_neg, prompt_gs, prompt_steps], outputs=output, fn=infer, cache_examples=True) | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown(model_card_2) | |
| with gr.Column(): | |
| gr.Markdown(" ") | |
| demo.queue().launch() | |