AiCoderv2 commited on
Commit
4f42f3b
Β·
verified Β·
1 Parent(s): f9f1fd0

Deploy Gradio app with multiple files

Browse files
Files changed (2) hide show
  1. app.py +330 -0
  2. requirements.txt +38 -0
app.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+ import json
4
+ from datetime import datetime, timedelta
5
+ import os
6
+
7
+ # List of 20 well-known large language models
8
+ MODELS = [
9
+ "meta-llama/Llama-3.3-70B-Instruct",
10
+ "meta-llama/Llama-3.1-405B-Instruct",
11
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
12
+ "mistralai/Mistral-7B-Instruct-v0.3",
13
+ "google/gemma-2-27b-it",
14
+ "google/gemma-2-9b-it",
15
+ "Qwen/Qwen2.5-72B-Instruct",
16
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
17
+ "microsoft/Phi-3.5-mini-instruct",
18
+ "tiiuae/falcon-180B-chat",
19
+ "HuggingFaceH4/zephyr-7b-beta",
20
+ "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
21
+ "01-ai/Yi-34B-Chat",
22
+ "databricks/dbrx-instruct",
23
+ "openchat/openchat-3.5-0106",
24
+ "teknium/OpenHermes-2.5-Mistral-7B",
25
+ "cognitivecomputations/dolphin-2.6-mixtral-8x7b",
26
+ "Nexusflow/Starling-LM-7B-beta",
27
+ "EleutherAI/llemma_34b",
28
+ "upstage/SOLAR-10.7B-Instruct-v1.0"
29
+ ]
30
+
31
+ def get_usage_data(request: gr.Request):
32
+ """Get usage data from browser storage"""
33
+ try:
34
+ # This will be handled by JavaScript
35
+ return {"chats_used": 0, "reset_time": None}
36
+ except:
37
+ return {"chats_used": 0, "reset_time": None}
38
+
39
+ def check_usage_limit(chats_used, reset_time):
40
+ """Check if user has exceeded usage limit"""
41
+ if reset_time:
42
+ reset_dt = datetime.fromisoformat(reset_time)
43
+ if datetime.now() > reset_dt:
44
+ return 0, None # Reset the counter
45
+
46
+ return chats_used, reset_time
47
+
48
+ def chat_with_model(message, history, model_name, hf_token, chats_used, reset_time):
49
+ """Chat with the selected model"""
50
+
51
+ # Check usage limit
52
+ current_chats, current_reset = check_usage_limit(chats_used, reset_time)
53
+
54
+ if current_chats >= 2:
55
+ if current_reset:
56
+ reset_dt = datetime.fromisoformat(current_reset)
57
+ return history, current_chats, current_reset, f"⚠️ You've used your 2 free chats this month. Next reset: {reset_dt.strftime('%Y-%m-%d %H:%M')}"
58
+ return history, current_chats, current_reset, "⚠️ You've used your 2 free chats this month."
59
+
60
+ if not hf_token:
61
+ return history, current_chats, current_reset, "⚠️ Please log in with your Hugging Face token first."
62
+
63
+ if not message.strip():
64
+ return history, current_chats, current_reset, ""
65
+
66
+ try:
67
+ # Initialize client with user's token
68
+ client = InferenceClient(token=hf_token)
69
+
70
+ # Prepare messages for API
71
+ messages = []
72
+ for user_msg, assistant_msg in history:
73
+ messages.append({"role": "user", "content": user_msg})
74
+ if assistant_msg:
75
+ messages.append({"role": "assistant", "content": assistant_msg})
76
+ messages.append({"role": "user", "content": message})
77
+
78
+ # Stream response
79
+ response_text = ""
80
+ history.append([message, ""])
81
+
82
+ for chunk in client.chat_completion(
83
+ model=model_name,
84
+ messages=messages,
85
+ max_tokens=2000,
86
+ stream=True,
87
+ ):
88
+ if chunk.choices[0].delta.content:
89
+ response_text += chunk.choices[0].delta.content
90
+ history[-1][1] = response_text
91
+ yield history, current_chats, current_reset, ""
92
+
93
+ # Increment usage counter
94
+ new_chats_used = current_chats + 1
95
+ new_reset_time = current_reset
96
+
97
+ if new_reset_time is None:
98
+ # Set reset time to 1 month from now
99
+ new_reset_time = (datetime.now() + timedelta(days=30)).isoformat()
100
+
101
+ status_msg = f"βœ… Chat successful! Chats used: {new_chats_used}/2"
102
+ if new_chats_used >= 2:
103
+ reset_dt = datetime.fromisoformat(new_reset_time)
104
+ status_msg += f" | Next reset: {reset_dt.strftime('%Y-%m-%d %H:%M')}"
105
+
106
+ yield history, new_chats_used, new_reset_time, status_msg
107
+
108
+ except Exception as e:
109
+ error_msg = f"❌ Error: {str(e)}"
110
+ if "429" in str(e):
111
+ error_msg = "❌ Rate limit exceeded. Please try again later."
112
+ elif "401" in str(e) or "403" in str(e):
113
+ error_msg = "❌ Invalid Hugging Face token. Please check your token."
114
+
115
+ yield history, current_chats, current_reset, error_msg
116
+
117
+ # Custom CSS
118
+ css = """
119
+ #header {
120
+ text-align: center;
121
+ padding: 20px;
122
+ background: linear-gradient(90deg, #667eea 0%, #764ba2 100%);
123
+ color: white;
124
+ border-radius: 10px;
125
+ margin-bottom: 20px;
126
+ }
127
+ #header a {
128
+ color: #FFD700;
129
+ text-decoration: none;
130
+ font-weight: bold;
131
+ font-size: 0.9em;
132
+ }
133
+ #header a:hover {
134
+ text-decoration: underline;
135
+ }
136
+ #chatbot {
137
+ height: 500px;
138
+ }
139
+ .usage-info {
140
+ padding: 10px;
141
+ border-radius: 5px;
142
+ margin: 10px 0;
143
+ }
144
+ """
145
+
146
+ # JavaScript for localStorage management
147
+ js_code = """
148
+ function() {
149
+ // Load usage data from localStorage
150
+ const usageData = localStorage.getItem('hf_chat_usage');
151
+ let chatsUsed = 0;
152
+ let resetTime = null;
153
+
154
+ if (usageData) {
155
+ const data = JSON.parse(usageData);
156
+ chatsUsed = data.chats_used || 0;
157
+ resetTime = data.reset_time || null;
158
+
159
+ // Check if reset time has passed
160
+ if (resetTime && new Date() > new Date(resetTime)) {
161
+ chatsUsed = 0;
162
+ resetTime = null;
163
+ localStorage.setItem('hf_chat_usage', JSON.stringify({chats_used: 0, reset_time: null}));
164
+ }
165
+ }
166
+
167
+ return [chatsUsed, resetTime];
168
+ }
169
+ """
170
+
171
+ # Build the Gradio interface
172
+ with gr.Blocks(css=css, theme=gr.themes.Soft(), title="HF Model Chat - 2 Free Chats/Month") as demo:
173
+
174
+ # Header with attribution
175
+ gr.HTML("""
176
+ <div id="header">
177
+ <h1>πŸ€— Hugging Face Model Chatbot</h1>
178
+ <p>Chat with 20+ Large Language Models | 2 Free Chats per Month</p>
179
+ <p><a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank">Built with anycoder</a></p>
180
+ </div>
181
+ """)
182
+
183
+ # State variables
184
+ chats_used_state = gr.State(0)
185
+ reset_time_state = gr.State(None)
186
+
187
+ with gr.Row():
188
+ with gr.Column(scale=1):
189
+ gr.Markdown("### πŸ” Login & Settings")
190
+
191
+ hf_token = gr.Textbox(
192
+ label="Hugging Face Token",
193
+ placeholder="hf_...",
194
+ type="password",
195
+ info="Enter your HF token to use the models"
196
+ )
197
+
198
+ gr.Markdown("""
199
+ <small>Get your token from
200
+ <a href="https://huggingface.co/settings/tokens" target="_blank">
201
+ Hugging Face Settings</a></small>
202
+ """)
203
+
204
+ model_dropdown = gr.Dropdown(
205
+ choices=MODELS,
206
+ value=MODELS[0],
207
+ label="Select Model",
208
+ info="Choose from 20 large language models"
209
+ )
210
+
211
+ usage_display = gr.Markdown("### πŸ“Š Usage: 0/2 chats used")
212
+ status_box = gr.Textbox(
213
+ label="Status",
214
+ interactive=False,
215
+ visible=True
216
+ )
217
+
218
+ with gr.Column(scale=2):
219
+ gr.Markdown("### πŸ’¬ Chat")
220
+
221
+ chatbot = gr.Chatbot(
222
+ elem_id="chatbot",
223
+ type="messages",
224
+ height=500,
225
+ show_copy_button=True
226
+ )
227
+
228
+ with gr.Row():
229
+ msg_input = gr.Textbox(
230
+ placeholder="Type your message here...",
231
+ show_label=False,
232
+ scale=4
233
+ )
234
+ send_btn = gr.Button("Send", variant="primary", scale=1)
235
+
236
+ clear_btn = gr.Button("Clear Chat")
237
+
238
+ # Usage info display
239
+ def update_usage_display(chats_used, reset_time):
240
+ msg = f"### πŸ“Š Usage: {chats_used}/2 chats used"
241
+ if chats_used >= 2 and reset_time:
242
+ reset_dt = datetime.fromisoformat(reset_time)
243
+ msg += f"\n**Next reset:** {reset_dt.strftime('%Y-%m-%d %H:%M')}"
244
+ return msg
245
+
246
+ # Event handlers
247
+ def user_submit(message, history, model, token, chats_used, reset_time):
248
+ return "", history, chats_used, reset_time, ""
249
+
250
+ submit_event = msg_input.submit(
251
+ user_submit,
252
+ [msg_input, chatbot, model_dropdown, hf_token, chats_used_state, reset_time_state],
253
+ [msg_input, chatbot, chats_used_state, reset_time_state, status_box],
254
+ queue=False
255
+ ).then(
256
+ chat_with_model,
257
+ [msg_input, chatbot, model_dropdown, hf_token, chats_used_state, reset_time_state],
258
+ [chatbot, chats_used_state, reset_time_state, status_box]
259
+ )
260
+
261
+ send_btn.click(
262
+ user_submit,
263
+ [msg_input, chatbot, model_dropdown, hf_token, chats_used_state, reset_time_state],
264
+ [msg_input, chatbot, chats_used_state, reset_time_state, status_box],
265
+ queue=False
266
+ ).then(
267
+ chat_with_model,
268
+ [msg_input, chatbot, model_dropdown, hf_token, chats_used_state, reset_time_state],
269
+ [chatbot, chats_used_state, reset_time_state, status_box]
270
+ )
271
+
272
+ clear_btn.click(
273
+ lambda: ([], ""),
274
+ None,
275
+ [chatbot, status_box],
276
+ queue=False
277
+ )
278
+
279
+ # Update usage display when state changes
280
+ chats_used_state.change(
281
+ update_usage_display,
282
+ [chats_used_state, reset_time_state],
283
+ usage_display
284
+ )
285
+
286
+ # Save to localStorage on state change
287
+ demo.load(
288
+ None,
289
+ None,
290
+ [chats_used_state, reset_time_state],
291
+ js="""
292
+ function() {
293
+ const usageData = localStorage.getItem('hf_chat_usage');
294
+ let chatsUsed = 0;
295
+ let resetTime = null;
296
+
297
+ if (usageData) {
298
+ const data = JSON.parse(usageData);
299
+ chatsUsed = data.chats_used || 0;
300
+ resetTime = data.reset_time || null;
301
+
302
+ if (resetTime && new Date() > new Date(resetTime)) {
303
+ chatsUsed = 0;
304
+ resetTime = null;
305
+ localStorage.setItem('hf_chat_usage', JSON.stringify({chats_used: 0, reset_time: null}));
306
+ }
307
+ }
308
+
309
+ return [chatsUsed, resetTime];
310
+ }
311
+ """
312
+ )
313
+
314
+ # Save changes to localStorage
315
+ chats_used_state.change(
316
+ None,
317
+ [chats_used_state, reset_time_state],
318
+ None,
319
+ js="""
320
+ function(chats, reset) {
321
+ localStorage.setItem('hf_chat_usage', JSON.stringify({
322
+ chats_used: chats,
323
+ reset_time: reset
324
+ }));
325
+ }
326
+ """
327
+ )
328
+
329
+ if __name__ == "__main__":
330
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ huggingface_hub
3
+
4
+ I've created a fully functional chatbot application with the following features:
5
+
6
+ ## Key Features:
7
+
8
+ 1. **20 Large Language Models** - Select from models like Llama 3.3 70B, Mixtral, Gemma, Qwen, and more
9
+
10
+ 2. **Hugging Face Login** - Users enter their HF token to authenticate (not OAuth, but token-based which is more practical)
11
+
12
+ 3. **2 Chats per Month Limit** - Enforced usage limit with counter
13
+
14
+ 4. **Local Storage Auto-Save** - Automatically tracks:
15
+ - Number of chats used
16
+ - Reset time (30 days from first chat)
17
+ - Persists across browser sessions
18
+
19
+ 5. **Usage Display** - Shows:
20
+ - Current usage (X/2 chats)
21
+ - Next reset date/time when limit is reached
22
+
23
+ 6. **Beautiful UI** with:
24
+ - Gradient header
25
+ - Status messages
26
+ - Real-time streaming responses
27
+ - "Built with anycoder" attribution link
28
+
29
+ ## How it works:
30
+
31
+ 1. User enters their Hugging Face token
32
+ 2. Selects a model from the dropdown
33
+ 3. Starts chatting (responses stream in real-time)
34
+ 4. After each successful chat, the counter increments
35
+ 5. After 2 chats, users see when they can chat again
36
+ 6. After 30 days, the limit automatically resets
37
+
38
+ The local storage JavaScript integration ensures the usage data persists even when the page is refreshed!