mostlycached commited on
Commit
0f221db
·
verified ·
1 Parent(s): 7bb0e7b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -10
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import gradio as gr
2
  import numpy as np
3
- import io
4
  import PIL.Image as Image
5
  from rembg import remove
6
  from PIL import ImageDraw, ImageFont
@@ -27,16 +26,30 @@ def text_behind_image(input_image, text, text_color, font_size, text_opacity):
27
  # Get dimensions
28
  width, height = img.size
29
 
30
- # First, remove background from the original image to get the person silhouette
31
  try:
32
- person_img = remove(img, model_name="u2net_human_seg")
 
33
  except:
34
  # Fallback to default model if human_seg not available
35
- person_img = remove(img)
 
 
 
 
 
 
 
 
 
36
 
37
- # Create a new image with white background for the text
38
- text_bg = Image.new('RGBA', (width, height), (255, 255, 255, 255))
39
- draw = ImageDraw.Draw(text_bg)
 
 
 
 
40
 
41
  # Prepare the text
42
  text = text.strip().upper()
@@ -113,9 +126,13 @@ def text_behind_image(input_image, text, text_color, font_size, text_opacity):
113
  # Draw the text with opacity
114
  draw.text((x, y), line, font=font, fill=text_color_rgb + (int(text_opacity * 255),))
115
 
116
- # Now combine the text background with the person
117
- # The text goes behind the person, so we composite person on top of text
118
- final_image = Image.alpha_composite(text_bg, person_img)
 
 
 
 
119
 
120
  # Convert to RGB for display
121
  return np.array(final_image.convert('RGB'))
 
1
  import gradio as gr
2
  import numpy as np
 
3
  import PIL.Image as Image
4
  from rembg import remove
5
  from PIL import ImageDraw, ImageFont
 
26
  # Get dimensions
27
  width, height = img.size
28
 
29
+ # Step 1: Extract the person from the image
30
  try:
31
+ # Try to use the human segmentation model first
32
+ person_only = remove(img, model_name="u2net_human_seg")
33
  except:
34
  # Fallback to default model if human_seg not available
35
+ person_only = remove(img)
36
+
37
+ # Step 2: Create a mask from the person cutout
38
+ person_mask = Image.new('RGBA', (width, height), (0, 0, 0, 0))
39
+ person_mask.paste(person_only, (0, 0), person_only)
40
+
41
+ # Step 3: Extract the background (original image without the person)
42
+ # Create inverted mask (where the person is black, background is white)
43
+ inverted_mask = Image.new('L', (width, height), 255)
44
+ inverted_mask.paste(0, (0, 0), person_only)
45
 
46
+ # Extract just the background
47
+ background = img.copy()
48
+ background.putalpha(inverted_mask)
49
+
50
+ # Step 4: Create the text layer
51
+ text_layer = Image.new('RGBA', (width, height), (0, 0, 0, 0))
52
+ draw = ImageDraw.Draw(text_layer)
53
 
54
  # Prepare the text
55
  text = text.strip().upper()
 
126
  # Draw the text with opacity
127
  draw.text((x, y), line, font=font, fill=text_color_rgb + (int(text_opacity * 255),))
128
 
129
+ # Step 5: Composite all layers together
130
+ # First, composite the text on top of the background
131
+ # We use the inverted mask so text only shows where the person isn't
132
+ background_with_text = Image.alpha_composite(background, text_layer)
133
+
134
+ # Then, composite the person on top
135
+ final_image = Image.alpha_composite(background_with_text, person_mask)
136
 
137
  # Convert to RGB for display
138
  return np.array(final_image.convert('RGB'))