File size: 14,603 Bytes
c1bee18
c2e6d7e
c1bee18
 
 
 
9a50492
c40f3d0
 
 
c1bee18
 
c40f3d0
c1bee18
 
 
 
 
9a50492
c1bee18
 
c40f3d0
 
 
c1bee18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8c7976b
c1bee18
 
c2e6d7e
c1bee18
 
 
 
 
 
 
 
c40f3d0
c1bee18
c2e6d7e
c1bee18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c40f3d0
 
 
 
 
c1bee18
c40f3d0
 
 
 
 
 
 
 
 
 
c1bee18
 
 
 
c40f3d0
8c7976b
c1bee18
 
 
c40f3d0
 
c2e6d7e
c40f3d0
 
8c7976b
c40f3d0
 
 
 
 
 
 
8c7976b
c40f3d0
 
c1bee18
c40f3d0
 
 
 
8c7976b
c40f3d0
 
 
 
 
 
 
 
 
 
 
 
c1bee18
 
c40f3d0
 
 
 
 
 
c1bee18
 
43a0ca3
 
 
 
 
 
 
 
 
8c7976b
43a0ca3
 
c2e6d7e
43a0ca3
 
 
 
 
 
 
 
 
 
c2e6d7e
43a0ca3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d8d1319
43a0ca3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1675f59
 
 
 
43a0ca3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8c7976b
43a0ca3
 
 
 
 
c2e6d7e
43a0ca3
 
8c7976b
43a0ca3
 
 
 
 
 
 
8c7976b
43a0ca3
 
 
 
 
 
 
8c7976b
43a0ca3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52fc803
43a0ca3
 
 
 
 
 
 
bc34cae
9a50492
52fc803
bc34cae
 
9a50492
43a0ca3
 
 
 
 
 
 
 
 
8c7976b
 
43a0ca3
 
 
52fc803
c1bee18
 
 
 
 
 
 
 
bc34cae
9a50492
52fc803
bc34cae
 
9a50492
c1bee18
 
 
 
 
 
 
 
 
 
8c7976b
 
c1bee18
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
"""
Image generation functionality handler for AI-Inferoxy AI Hub.
Handles text-to-image generation with multiple providers.
"""

import os
import gradio as gr
import time
import threading
from concurrent.futures import ThreadPoolExecutor, TimeoutError as FutureTimeoutError
from huggingface_hub import InferenceClient
from huggingface_hub.errors import HfHubHTTPError
from requests.exceptions import ConnectionError, Timeout, RequestException
from hf_token_utils import get_proxy_token, report_token_status
from utils import (
    IMAGE_CONFIG, 
    validate_proxy_key, 
    format_error_message, 
    format_success_message,
)

# Timeout configuration for image generation
IMAGE_GENERATION_TIMEOUT = 300  # 5 minutes max for image generation


def validate_dimensions(width, height):
    """Validate that dimensions are divisible by 8 (required by most diffusion models)"""
    if width % 8 != 0 or height % 8 != 0:
        return False, "Width and height must be divisible by 8"
    return True, ""


def generate_image(
    prompt: str,
    model_name: str,
    provider: str,
    negative_prompt: str = "",
    width: int = IMAGE_CONFIG["width"],
    height: int = IMAGE_CONFIG["height"],
    num_inference_steps: int = IMAGE_CONFIG["num_inference_steps"],
    guidance_scale: float = IMAGE_CONFIG["guidance_scale"],
    seed: int = IMAGE_CONFIG["seed"],
    client_name: str | None = None,
):
    """
    Generate an image using the specified model and provider through AI-Inferoxy.
    """
    # Validate proxy API key
    is_valid, error_msg = validate_proxy_key()
    if not is_valid:
        return None, error_msg
    
    proxy_api_key = os.getenv("PROXY_KEY")
    
    token_id = None
    try:
        # Get token from AI-Inferoxy proxy server with timeout handling
        print(f"πŸ”‘ Image: Requesting token from proxy...")
        token, token_id = get_proxy_token(api_key=proxy_api_key)
        print(f"βœ… Image: Got token: {token_id}")
        
        print(f"🎨 Image: Using model='{model_name}', provider='{provider}'")
        
        # Create client with specified provider
        client = InferenceClient(
            provider=provider,
            api_key=token
        )
        
        print(f"πŸš€ Image: Client created, preparing generation params...")
        
        # Prepare generation parameters
        generation_params = {
            "model": model_name,
            "prompt": prompt,
            "width": width,
            "height": height,
            "num_inference_steps": num_inference_steps,
            "guidance_scale": guidance_scale,
        }
        
        # Add optional parameters if provided
        if negative_prompt:
            generation_params["negative_prompt"] = negative_prompt
        if seed != -1:
            generation_params["seed"] = seed
        
        print(f"πŸ“ Image: Dimensions: {width}x{height}, steps: {num_inference_steps}, guidance: {guidance_scale}")
        print(f"πŸ“‘ Image: Making generation request with {IMAGE_GENERATION_TIMEOUT}s timeout...")
        
        # Create generation function for timeout handling
        def generate_image_task():
            return client.text_to_image(**generation_params)
        
        # Execute with timeout using ThreadPoolExecutor
        with ThreadPoolExecutor(max_workers=1) as executor:
            future = executor.submit(generate_image_task)
            
            try:
                # Generate image with timeout
                image = future.result(timeout=IMAGE_GENERATION_TIMEOUT)
            except FutureTimeoutError:
                future.cancel()  # Cancel the running task
                raise TimeoutError(f"Image generation timed out after {IMAGE_GENERATION_TIMEOUT} seconds")
        
        print(f"πŸ–ΌοΈ Image: Generation completed! Image type: {type(image)}")
        
        # Report successful token usage
        if token_id:
            report_token_status(token_id, "success", api_key=proxy_api_key, client_name=client_name)
        
        return image, format_success_message("Image generated", f"using {model_name} on {provider}")
        
    except ConnectionError as e:
        # Handle proxy connection errors
        error_msg = f"Cannot connect to AI-Inferoxy server: {str(e)}"
        print(f"πŸ”Œ Image connection error: {error_msg}")
        if token_id:
            report_token_status(token_id, "error", error_msg, api_key=proxy_api_key, client_name=client_name)
        return None, format_error_message("Connection Error", "Unable to connect to the proxy server. Please check if it's running.")
        
    except TimeoutError as e:
        # Handle timeout errors
        error_msg = f"Image generation timed out: {str(e)}"
        print(f"⏰ Image timeout: {error_msg}")
        if token_id:
            report_token_status(token_id, "error", error_msg, api_key=proxy_api_key, client_name=client_name)
        return None, format_error_message("Timeout Error", f"Image generation took too long (>{IMAGE_GENERATION_TIMEOUT//60} minutes). Try reducing image size or steps.")
        
    except HfHubHTTPError as e:
        # Handle HuggingFace API errors
        error_msg = str(e)
        print(f"πŸ€— Image HF error: {error_msg}")
        if token_id:
            report_token_status(token_id, "error", error_msg, api_key=proxy_api_key, client_name=client_name)
        
        # Provide more user-friendly error messages
        if "401" in error_msg:
            return None, format_error_message("Authentication Error", "Invalid or expired API token. The proxy will provide a new token on retry.")
        elif "402" in error_msg:
            return None, format_error_message("Quota Exceeded", "API quota exceeded. The proxy will try alternative providers.")
        elif "429" in error_msg:
            return None, format_error_message("Rate Limited", "Too many requests. Please wait a moment and try again.")
        elif "content policy" in error_msg.lower() or "safety" in error_msg.lower():
            return None, format_error_message("Content Policy", "Image prompt was rejected by content policy. Please try a different prompt.")
        else:
            return None, format_error_message("HuggingFace API Error", error_msg)
        
    except Exception as e:
        # Handle all other errors
        error_msg = str(e)
        print(f"❌ Image unexpected error: {error_msg}")
        if token_id:
            report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
        return None, format_error_message("Unexpected Error", f"An unexpected error occurred: {error_msg}")


def generate_image_to_image(
    input_image,
    prompt: str,
    model_name: str,
    provider: str,
    negative_prompt: str = "",
    num_inference_steps: int = IMAGE_CONFIG["num_inference_steps"],
    guidance_scale: float = IMAGE_CONFIG["guidance_scale"],
    seed: int = IMAGE_CONFIG["seed"],
    client_name: str | None = None,
):
    """
    Generate an image using image-to-image generation with the specified model and provider through AI-Inferoxy.
    """
    # Validate proxy API key
    is_valid, error_msg = validate_proxy_key()
    if not is_valid:
        return None, error_msg
    
    proxy_api_key = os.getenv("PROXY_KEY")
    
    token_id = None
    try:
        # Get token from AI-Inferoxy proxy server with timeout handling
        print(f"πŸ”‘ Image-to-Image: Requesting token from proxy...")
        token, token_id = get_proxy_token(api_key=proxy_api_key)
        print(f"βœ… Image-to-Image: Got token: {token_id}")
        
        print(f"🎨 Image-to-Image: Using model='{model_name}', provider='{provider}'")
        
        # Create client with specified provider
        client = InferenceClient(
            provider=provider,
            api_key=token
        )
        
        print(f"πŸš€ Image-to-Image: Client created, preparing generation params...")
        
        # Prepare generation parameters
        generation_params = {
            "image": input_image,
            "prompt": prompt,
            "num_inference_steps": num_inference_steps,
            "guidance_scale": guidance_scale,
        }
        
        # Add optional parameters if provided
        if negative_prompt:
            generation_params["negative_prompt"] = negative_prompt
        if seed != -1:
            generation_params["seed"] = seed
        
        print(f"πŸ“‘ Image-to-Image: Making generation request with {IMAGE_GENERATION_TIMEOUT}s timeout...")
        
        # Create generation function for timeout handling
        def generate_image_task():
            return client.image_to_image(
                model=model_name,
                **generation_params
            )
        
        # Execute with timeout using ThreadPoolExecutor
        with ThreadPoolExecutor(max_workers=1) as executor:
            future = executor.submit(generate_image_task)
            
            try:
                # Generate image with timeout
                image = future.result(timeout=IMAGE_GENERATION_TIMEOUT)
            except FutureTimeoutError:
                future.cancel()  # Cancel the running task
                raise TimeoutError(f"Image-to-image generation timed out after {IMAGE_GENERATION_TIMEOUT} seconds")
        
        print(f"πŸ–ΌοΈ Image-to-Image: Generation completed! Image type: {type(image)}")
        
        # Report successful token usage
        if token_id:
            report_token_status(token_id, "success", api_key=proxy_api_key, client_name=client_name)
        
        return image, format_success_message("Image-to-image generated", f"using {model_name} on {provider}")
        
    except ConnectionError as e:
        # Handle proxy connection errors
        error_msg = f"Cannot connect to AI-Inferoxy server: {str(e)}"
        print(f"πŸ”Œ Image-to-Image connection error: {error_msg}")
        if token_id:
            report_token_status(token_id, "error", error_msg, api_key=proxy_api_key, client_name=client_name)
        return None, format_error_message("Connection Error", "Unable to connect to the proxy server. Please check if it's running.")
        
    except TimeoutError as e:
        # Handle timeout errors
        error_msg = f"Image-to-image generation timed out: {str(e)}"
        print(f"⏰ Image-to-Image timeout: {error_msg}")
        if token_id:
            report_token_status(token_id, "error", error_msg, api_key=proxy_api_key, client_name=client_name)
        return None, format_error_message("Timeout Error", f"Image-to-image generation took too long (>{IMAGE_GENERATION_TIMEOUT//60} minutes). Try reducing steps.")
        
    except HfHubHTTPError as e:
        # Handle HuggingFace API errors
        error_msg = str(e)
        print(f"πŸ€— Image-to-Image HF error: {error_msg}")
        if token_id:
            report_token_status(token_id, "error", error_msg, api_key=proxy_api_key, client_name=client_name)
        
        # Provide more user-friendly error messages
        if "401" in error_msg:
            return None, format_error_message("Authentication Error", "Invalid or expired API token. The proxy will provide a new token on retry.")
        elif "402" in error_msg:
            return None, format_error_message("Quota Exceeded", "API quota exceeded. The proxy will try alternative providers.")
        elif "429" in error_msg:
            return None, format_error_message("Rate Limited", "Too many requests. Please wait a moment and try again.")
        elif "content policy" in error_msg.lower() or "safety" in error_msg.lower():
            return None, format_error_message("Content Policy", "Image prompt was rejected by content policy. Please try a different prompt.")
        else:
            return None, format_error_message("HuggingFace API Error", error_msg)
        
    except Exception as e:
        # Handle all other errors
        error_msg = str(e)
        print(f"❌ Image-to-Image unexpected error: {error_msg}")
        if token_id:
            report_token_status(token_id, "error", error_msg, api_key=proxy_api_key)
        return None, format_error_message("Unexpected Error", f"An unexpected error occurred: {error_msg}")


def handle_image_to_image_generation(input_image_val, prompt_val, model_val, provider_val, negative_prompt_val, steps_val, guidance_val, seed_val, hf_token: gr.OAuthToken = None, hf_profile: gr.OAuthProfile = None):
    """
    Handle image-to-image generation request with validation.
    """
    # Validate input image
    if input_image_val is None:
        return None, format_error_message("Validation Error", "Please upload an input image")
    
    # Require sign-in via HF OAuth token
    access_token = getattr(hf_token, "token", None) if hf_token is not None else None
    username = getattr(hf_profile, "username", None) if hf_profile is not None else None
    if not access_token:
        return None, format_error_message("Access Required", "Please sign in with Hugging Face (sidebar Login button).")
    
    # Generate image-to-image
    return generate_image_to_image(
        input_image=input_image_val,
        prompt=prompt_val,
        model_name=model_val,
        provider=provider_val,
        negative_prompt=negative_prompt_val,
        num_inference_steps=steps_val,
        guidance_scale=guidance_val,
        seed=seed_val,
        client_name=username
    )


def handle_image_generation(prompt_val, model_val, provider_val, negative_prompt_val, width_val, height_val, steps_val, guidance_val, seed_val, hf_token: gr.OAuthToken = None, hf_profile: gr.OAuthProfile = None):
    """
    Handle image generation request with validation.
    """
    # Validate dimensions
    is_valid, error_msg = validate_dimensions(width_val, height_val)
    if not is_valid:
        return None, format_error_message("Validation Error", error_msg)
    
    # Require sign-in via HF OAuth token
    access_token = getattr(hf_token, "token", None) if hf_token is not None else None
    username = getattr(hf_profile, "username", None) if hf_profile is not None else None
    if not access_token:
        return None, format_error_message("Access Required", "Please sign in with Hugging Face (sidebar Login button).")
    
    # Generate image
    return generate_image(
        prompt=prompt_val,
        model_name=model_val,
        provider=provider_val,
        negative_prompt=negative_prompt_val,
        width=width_val,
        height=height_val,
        num_inference_steps=steps_val,
        guidance_scale=guidance_val,
        seed=seed_val,
        client_name=username
    )