Ryukijano commited on
Commit
5ecdeb3
·
verified ·
1 Parent(s): 25ac4af

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -5
app.py CHANGED
@@ -20,10 +20,14 @@ vision_model = MllamaForConditionalGeneration.from_pretrained(
20
  )
21
  processor = AutoProcessor.from_pretrained(llama_vision_model_id, token=hf_token)
22
 
23
- # Set up segmentation model using Segment Anything 2 (sam2_hiera_small.pt)
24
  segment_model_id = "camenduru/segment-anything-2"
25
- segment_model_path = "sam2_hiera_small.pt"
26
- segment_pipe = torch.load(segment_model_path, map_location="cuda") # Load segmentation model on GPU
 
 
 
 
27
 
28
  # Set up Stable Diffusion Lite model
29
  stable_diffusion_model_id = "runwayml/stable-diffusion-v1-5"
@@ -42,8 +46,7 @@ def process_image(image):
42
  caption = processor.decode(output[0], skip_special_tokens=True)
43
 
44
  # Step 2: Segment important parts of the image using Segment Anything 2
45
- # Use the loaded segment model to perform segmentation
46
- segmented_result = segment_pipe(image=image) # Assuming a callable model or appropriate method
47
  segments = segmented_result
48
 
49
  # Step 3: Modify segmented image using Diffusion model
 
20
  )
21
  processor = AutoProcessor.from_pretrained(llama_vision_model_id, token=hf_token)
22
 
23
+ # Set up segmentation model using Segment Anything 2 from Hugging Face Hub
24
  segment_model_id = "camenduru/segment-anything-2"
25
+ segment_pipe = pipeline(
26
+ "image-segmentation",
27
+ model=segment_model_id,
28
+ device=0, # Force usage of GPU
29
+ token=hf_token # Updated to use 'token'
30
+ )
31
 
32
  # Set up Stable Diffusion Lite model
33
  stable_diffusion_model_id = "runwayml/stable-diffusion-v1-5"
 
46
  caption = processor.decode(output[0], skip_special_tokens=True)
47
 
48
  # Step 2: Segment important parts of the image using Segment Anything 2
49
+ segmented_result = segment_pipe(image=image)
 
50
  segments = segmented_result
51
 
52
  # Step 3: Modify segmented image using Diffusion model