sohamnk commited on
Commit
e672b92
Β·
verified Β·
1 Parent(s): a378a7a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -14
app.py CHANGED
@@ -1,9 +1,10 @@
1
  # -------------------------------------------------------------------------- #
2
- # UNIFIED AI SERVICE V3.4 (Color-Enhanced Segmentation)
3
  # -------------------------------------------------------------------------- #
4
  # This service uses DINOv2 for image embeddings and BGE for text embeddings.
5
- # - The segmentation prompt now includes colors for better accuracy.
6
- # - For debugging, segmented images are uploaded to Uploadcare.
 
7
  # --------------------------------------------------------------------------
8
  import sys
9
  sys.stdout.reconfigure(line_buffering=True)
@@ -29,6 +30,7 @@ from transformers import AutoProcessor as AutoGndProcessor, AutoModelForZeroShot
29
 
30
  app = Flask(__name__)
31
 
 
32
  TEXT_FIELDS_TO_EMBED = ["brand", "material", "markings"]
33
  SCORE_WEIGHTS = {
34
  "text_score": 0.4,
@@ -111,15 +113,9 @@ def jaccard_similarity(set1, set2):
111
  return 1.0 if not intersection else 0.0
112
  return len(intersection) / len(union)
113
 
114
- def segment_guided_object(image: Image.Image, object_label: str, colors: list = []) -> Image.Image:
115
- # --- UPDATED: Create a more descriptive prompt using colors ---
116
- color_str = " ".join(c.lower() for c in colors if c)
117
- if color_str:
118
- prompt = f"a {color_str} {object_label}."
119
- else:
120
- prompt = f"a {object_label}."
121
-
122
- print(f" [Segment] Using prompt: '{prompt}'")
123
  image_rgb = image.convert("RGB")
124
  image_np = np.array(image_rgb)
125
  h, w = image_np.shape[:2]
@@ -183,6 +179,7 @@ def process_item():
183
  data = request.json
184
  print(f"\n[PROCESS] Received request for: {data.get('objectName')}")
185
 
 
186
  response = {
187
  "canonicalLabel": data.get('objectName', '').lower().strip(),
188
  "brand_embedding": get_text_embedding(data.get('brand')),
@@ -199,8 +196,7 @@ def process_item():
199
  img_response.raise_for_status()
200
  image = Image.open(BytesIO(img_response.content))
201
 
202
- # --- UPDATED: Pass colors to the segmentation function ---
203
- segmented_image = segment_guided_object(image, data['objectName'], data.get('colors', []))
204
  debug_url = upload_to_uploadcare(segmented_image)
205
  print(f" - 🐞 DEBUG URL: {debug_url}")
206
 
 
1
  # -------------------------------------------------------------------------- #
2
+ # UNIFIED AI SERVICE V3.3 (Added Markings Comparison)
3
  # -------------------------------------------------------------------------- #
4
  # This service uses DINOv2 for image embeddings and BGE for text embeddings.
5
+ # - Filtering is handled by the Node.js backend.
6
+ # - For debugging, segmented images are uploaded to Uploadcare and the URL
7
+ # is printed to the console log.
8
  # --------------------------------------------------------------------------
9
  import sys
10
  sys.stdout.reconfigure(line_buffering=True)
 
30
 
31
  app = Flask(__name__)
32
 
33
+ # --- UPDATED: Added "markings" to the list of fields to compare ---
34
  TEXT_FIELDS_TO_EMBED = ["brand", "material", "markings"]
35
  SCORE_WEIGHTS = {
36
  "text_score": 0.4,
 
113
  return 1.0 if not intersection else 0.0
114
  return len(intersection) / len(union)
115
 
116
+ def segment_guided_object(image: Image.Image, object_label: str) -> Image.Image:
117
+ prompt = f"a {object_label}."
118
+ print(f" [Segment] Using simple prompt: '{prompt}'")
 
 
 
 
 
 
119
  image_rgb = image.convert("RGB")
120
  image_np = np.array(image_rgb)
121
  h, w = image_np.shape[:2]
 
179
  data = request.json
180
  print(f"\n[PROCESS] Received request for: {data.get('objectName')}")
181
 
182
+ # --- UPDATED: Added markings_embedding ---
183
  response = {
184
  "canonicalLabel": data.get('objectName', '').lower().strip(),
185
  "brand_embedding": get_text_embedding(data.get('brand')),
 
196
  img_response.raise_for_status()
197
  image = Image.open(BytesIO(img_response.content))
198
 
199
+ segmented_image = segment_guided_object(image, data['objectName'])
 
200
  debug_url = upload_to_uploadcare(segmented_image)
201
  print(f" - 🐞 DEBUG URL: {debug_url}")
202