Spaces:
Sleeping
Sleeping
| import traceback | |
| import numpy as np | |
| import cv2 | |
| from flask import request, jsonify | |
| # Import app, models, and logic functions | |
| from pipeline import app, models, logic | |
| def process_item(): | |
| print("\n" + "="*50) | |
| print("β‘ [Request] Received new request to /process") | |
| try: | |
| data = request.get_json() | |
| if not data: return jsonify({"error": "Invalid JSON payload"}), 400 | |
| object_name = data.get('objectName') | |
| description = data.get('objectDescription') | |
| image_url = data.get('objectImage') | |
| if not all([object_name, description]): | |
| return jsonify({"error": "objectName and objectDescription are required."}), 400 | |
| canonical_label = logic.get_canonical_label(object_name) | |
| text_embedding = logic.get_text_embedding(description, models) | |
| response_data = { | |
| "canonicalLabel": canonical_label, | |
| "text_embedding": text_embedding, | |
| } | |
| if image_url: | |
| print("--- Image URL provided, processing visual features... ---") | |
| image = logic.download_image_from_url(image_url) | |
| object_crop = logic.detect_and_crop(image, canonical_label, models) | |
| visual_features = logic.extract_features(object_crop) | |
| response_data.update(visual_features) | |
| else: | |
| print("--- No image URL provided, skipping visual feature extraction. ---") | |
| print("β Successfully processed item.") | |
| print("="*50) | |
| return jsonify(response_data), 200 | |
| except Exception as e: | |
| print(f"β Error in /process: {e}") | |
| traceback.print_exc() | |
| return jsonify({"error": str(e)}), 500 | |
| def compare_items(): | |
| print("\n" + "="*50) | |
| print("β‘ [Request] Received new request to /compare") | |
| try: | |
| data = request.get_json() | |
| if not data: return jsonify({"error": "Invalid JSON payload"}), 400 | |
| query_item = data.get('queryItem') | |
| search_list = data.get('searchList') | |
| if not all([query_item, search_list]): | |
| return jsonify({"error": "queryItem and searchList are required."}), 400 | |
| query_text_emb = np.array(query_item['text_embedding']) | |
| results = [] | |
| print(f"--- Comparing 1 query item against {len(search_list)} items ---") | |
| for item in search_list: | |
| item_id = item.get('_id') | |
| print(f"\n [Checking] Item ID: {item_id}") | |
| try: | |
| text_emb_found = np.array(item['text_embedding']) | |
| text_score = logic.cosine_similarity(query_text_emb, text_emb_found) | |
| print(f" - Text Score: {text_score:.4f}") | |
| has_query_image = 'shape_features' in query_item and query_item['shape_features'] | |
| has_item_image = 'shape_features' in item and item['shape_features'] | |
| if has_query_image and has_item_image: | |
| print(" - Both items have images. Performing visual comparison.") | |
| from pipeline import FEATURE_WEIGHTS # Import constant | |
| query_shape = np.array(query_item['shape_features']) | |
| query_color = np.array(query_item['color_features']).astype("float32") | |
| query_texture = np.array(query_item['texture_features']).astype("float32") | |
| found_shape = np.array(item['shape_features']) | |
| found_color = np.array(item['color_features']).astype("float32") | |
| found_texture = np.array(item['texture_features']).astype("float32") | |
| shape_dist = cv2.matchShapes(query_shape, found_shape, cv2.CONTOURS_MATCH_I1, 0.0) | |
| shape_score = 1.0 / (1.0 + shape_dist) | |
| color_score = cv2.compareHist(query_color, found_color, cv2.HISTCMP_CORREL) | |
| texture_score = cv2.compareHist(query_texture, found_texture, cv2.HISTCMP_CORREL) | |
| raw_image_score = (FEATURE_WEIGHTS["shape"] * shape_score + | |
| FEATURE_WEIGHTS["color"] * color_score + | |
| FEATURE_WEIGHTS["texture"] * texture_score) | |
| print(f"Raw Image Score: {raw_image_score:.4f}") | |
| image_score = logic.stretch_image_score(raw_image_score) | |
| final_score = 0.4 * image_score + 0.6 * text_score | |
| print(f" - Image Score: {image_score:.4f} | Final Score: {final_score:.4f}") | |
| else: | |
| print(" - One or both items missing image. Using text score only.") | |
| final_score = text_score | |
| from pipeline import FINAL_SCORE_THRESHOLD # Import constant | |
| if final_score >= FINAL_SCORE_THRESHOLD: | |
| print(f" - β ACCEPTED (Score >= {FINAL_SCORE_THRESHOLD})") | |
| results.append({ | |
| "_id": item_id, | |
| "score": round(final_score, 4), | |
| "objectName": item.get("objectName"), | |
| "objectDescription": item.get("objectDescription"), | |
| "objectImage": item.get("objectImage"), | |
| }) | |
| else: | |
| print(f" - β REJECTED (Score < {FINAL_SCORE_THRESHOLD})") | |
| except Exception as e: | |
| print(f" [Skipping] Item {item_id} due to processing error: {e}") | |
| continue | |
| results.sort(key=lambda x: x["score"], reverse=True) | |
| print(f"\nβ Search complete. Found {len(results)} potential matches.") | |
| print("="*50) | |
| return jsonify({"matches": results}), 200 | |
| except Exception as e: | |
| print(f"β Error in /compare: {e}") | |
| traceback.print_exc() | |
| return jsonify({"error": str(e)}), 500 |