Ryukijano commited on
Commit
1a6a2a3
·
verified ·
1 Parent(s): 89e7803

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -1,7 +1,7 @@
1
  # app.py for Hugging Face Space: Connecting Meta Llama 3.2 Vision, Segment Anything 2, and Diffusion Model
2
  import gradio as gr
3
  import spaces # Import the spaces module to use GPU-specific decorators
4
- from transformers import MllamaForConditionalGeneration, AutoProcessor
5
  from diffusers import StableDiffusionPipeline
6
  import torch
7
  import os
@@ -16,9 +16,9 @@ vision_model = MllamaForConditionalGeneration.from_pretrained(
16
  llama_vision_model_id,
17
  torch_dtype=torch.bfloat16,
18
  device_map="auto",
19
- use_auth_token=hf_token
20
  )
21
- processor = AutoProcessor.from_pretrained(llama_vision_model_id, use_auth_token=hf_token)
22
 
23
  # Set up Meta Segment Anything 2 model (using private model with token)
24
  segment_model_id = "meta/segment-anything-2"
@@ -26,13 +26,13 @@ segment_pipe = pipeline(
26
  "image-segmentation",
27
  model=segment_model_id,
28
  device=0, # Force usage of GPU
29
- use_auth_token=hf_token, # Use Hugging Face token for authentication
30
  )
31
 
32
  # Set up Stable Diffusion Lite model
33
  stable_diffusion_model_id = "runwayml/stable-diffusion-v1-5"
34
  diffusion_pipe = StableDiffusionPipeline.from_pretrained(
35
- stable_diffusion_model_id, torch_dtype=torch.float16, use_auth_token=hf_token
36
  )
37
  diffusion_pipe = diffusion_pipe.to("cuda") # Force usage of GPU
38
 
 
1
  # app.py for Hugging Face Space: Connecting Meta Llama 3.2 Vision, Segment Anything 2, and Diffusion Model
2
  import gradio as gr
3
  import spaces # Import the spaces module to use GPU-specific decorators
4
+ from transformers import MllamaForConditionalGeneration, AutoProcessor, pipeline
5
  from diffusers import StableDiffusionPipeline
6
  import torch
7
  import os
 
16
  llama_vision_model_id,
17
  torch_dtype=torch.bfloat16,
18
  device_map="auto",
19
+ token=hf_token # Updated to use 'token' instead of 'use_auth_token'
20
  )
21
+ processor = AutoProcessor.from_pretrained(llama_vision_model_id, token=hf_token)
22
 
23
  # Set up Meta Segment Anything 2 model (using private model with token)
24
  segment_model_id = "meta/segment-anything-2"
 
26
  "image-segmentation",
27
  model=segment_model_id,
28
  device=0, # Force usage of GPU
29
+ token=hf_token # Updated to use 'token' instead of 'use_auth_token'
30
  )
31
 
32
  # Set up Stable Diffusion Lite model
33
  stable_diffusion_model_id = "runwayml/stable-diffusion-v1-5"
34
  diffusion_pipe = StableDiffusionPipeline.from_pretrained(
35
+ stable_diffusion_model_id, torch_dtype=torch.float16, token=hf_token # Updated to use 'token'
36
  )
37
  diffusion_pipe = diffusion_pipe.to("cuda") # Force usage of GPU
38