jon-fernandes commited on
Commit
a7083ce
·
verified ·
1 Parent(s): dd7bdcb

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +272 -0
app.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ import os
6
+ import numpy as np
7
+ from urllib3.exceptions import HTTPError
8
+ os.system('pip install dashscope modelscope -U')
9
+ os.system('pip install gradio==3.*')
10
+
11
+ # os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
12
+ from argparse import ArgumentParser
13
+ from pathlib import Path
14
+
15
+ import copy
16
+ import gradio as gr
17
+ import os
18
+ import re
19
+ import secrets
20
+ import tempfile
21
+ import requests
22
+ from http import HTTPStatus
23
+ from dashscope import MultiModalConversation
24
+ import dashscope
25
+ import dotenv
26
+ from dotenv import load_dotenv
27
+ load_dotenv()
28
+ API_KEY = os.getenv('API_KEY')
29
+ dashscope.api_key = API_KEY
30
+
31
+ DEFAULT_CKPT_PATH = 'qwen/Qwen-VL-Chat'
32
+ REVISION = 'v1.0.4'
33
+ BOX_TAG_PATTERN = r"<box>([\s\S]*?)</box>"
34
+ PUNCTUATION = "!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏."
35
+
36
+
37
+ def _get_args():
38
+ parser = ArgumentParser()
39
+ parser.add_argument("-c", "--checkpoint-path", type=str, default=DEFAULT_CKPT_PATH,
40
+ help="Checkpoint name or path, default to %(default)r")
41
+ parser.add_argument("--revision", type=str, default=REVISION)
42
+ parser.add_argument("--cpu-only", action="store_true", help="Run demo with CPU only")
43
+
44
+ parser.add_argument("--share", action="store_true", default=False,
45
+ help="Create a publicly shareable link for the interface.")
46
+ parser.add_argument("--inbrowser", action="store_true", default=False,
47
+ help="Automatically launch the interface in a new tab on the default browser.")
48
+ parser.add_argument("--server-port", type=int, default=7860,
49
+ help="Demo server port.")
50
+ parser.add_argument("--server-name", type=str, default="127.0.0.1",
51
+ help="Demo server name.")
52
+
53
+ args = parser.parse_args()
54
+ return args
55
+
56
+ def _parse_text(text):
57
+ lines = text.split("\n")
58
+ lines = [line for line in lines if line != ""]
59
+ count = 0
60
+ for i, line in enumerate(lines):
61
+ if "```" in line:
62
+ count += 1
63
+ items = line.split("`")
64
+ if count % 2 == 1:
65
+ lines[i] = f'<pre><code class="language-{items[-1]}">'
66
+ else:
67
+ lines[i] = f"<br></code></pre>"
68
+ else:
69
+ if i > 0:
70
+ if count % 2 == 1:
71
+ line = line.replace("`", r"\`")
72
+ line = line.replace("<", "&lt;")
73
+ line = line.replace(">", "&gt;")
74
+ line = line.replace(" ", "&nbsp;")
75
+ line = line.replace("*", "&ast;")
76
+ line = line.replace("_", "&lowbar;")
77
+ line = line.replace("-", "&#45;")
78
+ line = line.replace(".", "&#46;")
79
+ line = line.replace("!", "&#33;")
80
+ line = line.replace("(", "&#40;")
81
+ line = line.replace(")", "&#41;")
82
+ line = line.replace("$", "&#36;")
83
+ lines[i] = "<br>" + line
84
+ text = "".join(lines)
85
+ return text
86
+
87
+
88
+ """
89
+ ('/tmp/gradio/1837abb0176495ff182050801ebff1fa9b18fc4a/aiyinsitan.jpg',),
90
+ None],
91
+ ['这是谁?',
92
+ '图中是爱因斯坦,阿尔伯特·爱因斯坦(Albert '
93
+ 'Einstein),是出生于德国、拥有瑞士和美国国籍的犹太裔理论物理学家,他创立了现代物理学的两大支柱的相对论及量子力学。'],
94
+ ['框处里面的人', '图中框内是爱因斯坦的半身照,照片中爱因斯坦穿着一件西装,留着标志性的胡子和蜷曲的头发。'],
95
+ ['框出里面的人',
96
+ ('/tmp/gradio/71cf5c2551009fd9a00e0d80bc7ab7fb8de211b5/tmp115aba5d70.jpg',)],
97
+ [None, '里面的人'],
98
+ ('介绍一下',
99
+ '阿尔伯特·爱因斯坦(Albert '
100
+ 'Einstein),是出生于德国、拥有瑞士和美国国籍的犹太裔理论物理学家,他创立了现代物理学的两大支柱的相对论及量子力学。他的贡献包括他提出的相对论(尤其是狭义相对论和广义相对论)、量子力学的开创性贡献以及他对于 '
101
+ 'gravity 的贡献。爱因斯坦也是诺贝尔奖得主以及美国公民。')]
102
+ """
103
+
104
+ def _remove_image_special(text):
105
+ text = text.replace('<ref>', '').replace('</ref>', '')
106
+ return re.sub(r'<box>.*?(</box>|$)', '', text)
107
+
108
+ def _launch_demo(args):
109
+ uploaded_file_dir = os.environ.get("GRADIO_TEMP_DIR") or str(
110
+ Path(tempfile.gettempdir()) / "gradio"
111
+ )
112
+
113
+ def predict(_chatbot, task_history):
114
+ chat_query = _chatbot[-1][0]
115
+ query = task_history[-1][0]
116
+ if len(chat_query) == 0:
117
+ _chatbot.pop()
118
+ task_history.pop()
119
+ return _chatbot
120
+ print("User: " + _parse_text(query))
121
+ history_cp = copy.deepcopy(task_history)
122
+ full_response = ""
123
+ messages = []
124
+ content = []
125
+ for q, a in history_cp:
126
+ if isinstance(q, (tuple, list)):
127
+ content.append({'image': f'file://{q[0]}'})
128
+ else:
129
+ content.append({'text': q})
130
+ messages.append({'role': 'user', 'content': content})
131
+ messages.append({'role': 'assistant', 'content': [{'text': a}]})
132
+ content = []
133
+ messages.pop()
134
+ responses = MultiModalConversation.call(
135
+ model='qwen-vl-max', messages=messages,
136
+ seed=np.random.randint(0, np.iinfo(np.int64).max),
137
+ top_p=0.001,
138
+ stream=True,
139
+ )
140
+ for response in responses:
141
+ if not response.status_code == HTTPStatus.OK:
142
+ raise HTTPError(f'response.code: {response.code}\nresponse.message: {response.message}')
143
+ response = response.output.choices[0].message.content
144
+ response_text = []
145
+ for ele in response:
146
+ if 'text' in ele:
147
+ response_text.append(ele['text'])
148
+ elif 'box' in ele:
149
+ response_text.append(ele['box'])
150
+ response_text = ''.join(response_text)
151
+ _chatbot[-1] = (_parse_text(chat_query), _remove_image_special(response_text))
152
+ yield _chatbot
153
+
154
+ if len(response) > 1:
155
+ result_image = response[-1]['result_image']
156
+ resp = requests.get(result_image)
157
+ os.makedirs(uploaded_file_dir, exist_ok=True)
158
+ name = f"tmp{secrets.token_hex(20)}.jpg"
159
+ filename = os.path.join(uploaded_file_dir, name)
160
+ with open(filename, 'wb') as f:
161
+ f.write(resp.content)
162
+ response = ''.join(r['box'] if 'box' in r else r['text'] for r in response[:-1])
163
+ _chatbot.append((None, (filename,)))
164
+ else:
165
+ response = response[0]['text']
166
+ _chatbot[-1] = (_parse_text(chat_query), response)
167
+ full_response = _parse_text(response)
168
+
169
+ task_history[-1] = (query, full_response)
170
+ print("Qwen-VL-Chat: " + _parse_text(full_response))
171
+ # task_history = task_history[-10:]
172
+ yield _chatbot
173
+
174
+
175
+ def regenerate(_chatbot, task_history):
176
+ if not task_history:
177
+ return _chatbot
178
+ item = task_history[-1]
179
+ if item[1] is None:
180
+ return _chatbot
181
+ task_history[-1] = (item[0], None)
182
+ chatbot_item = _chatbot.pop(-1)
183
+ if chatbot_item[0] is None:
184
+ _chatbot[-1] = (_chatbot[-1][0], None)
185
+ else:
186
+ _chatbot.append((chatbot_item[0], None))
187
+ return predict(_chatbot, task_history)
188
+
189
+ def add_text(history, task_history, text):
190
+ task_text = text
191
+ history = history if history is not None else []
192
+ task_history = task_history if task_history is not None else []
193
+ history = history + [(_parse_text(text), None)]
194
+ task_history = task_history + [(task_text, None)]
195
+ return history, task_history, ""
196
+
197
+ def add_file(history, task_history, file):
198
+ history = history if history is not None else []
199
+ task_history = task_history if task_history is not None else []
200
+ history = history + [((file.name,), None)]
201
+ task_history = task_history + [((file.name,), None)]
202
+ return history, task_history
203
+
204
+ def reset_user_input():
205
+ return gr.update(value="")
206
+
207
+ def reset_state(task_history):
208
+ task_history.clear()
209
+ return []
210
+
211
+ with gr.Blocks() as demo:
212
+ gr.Markdown("""<center><font size=8>Qwen-VL-Max</center>""")
213
+ gr.Markdown(
214
+ """\
215
+ <center><font size=3>This WebUI is based on Qwen-VL-Max, the upgraded version of Qwen-VL, developed by Alibaba Cloud.</center>""")
216
+ gr.Markdown("""<center><font size=3>本WebUI基于Qwen-VL-Max,这是Qwen-VL的升级版。</center>""")
217
+ gr.Markdown("""\
218
+ <center><font size=4> \
219
+ <a href="https://github.com/QwenLM/Qwen-VL#qwen-vl-plus">Github</a>&nbsp | &nbsp
220
+ Qwen-VL <a href="https://modelscope.cn/models/qwen/Qwen-VL/summary">🤖 </a>
221
+ | <a href="https://huggingface.co/Qwen/Qwen-VL">🤗</a>&nbsp |
222
+ Qwen-VL-Chat <a href="https://modelscope.cn/models/qwen/Qwen-VL-Chat/summary">🤖 </a> |
223
+ <a href="https://huggingface.co/Qwen/Qwen-VL-Chat">🤗</a>&nbsp |
224
+ Qwen-VL-Plus
225
+ <a href="https://huggingface.co/spaces/Qwen/Qwen-VL-Plus">🤗</a>&nbsp
226
+ <a href="https://modelscope.cn/studios/qwen/Qwen-VL-Chat-Demo/summary">🤖 </a>&nbsp |
227
+ Qwen-VL-Max
228
+ <a href="https://huggingface.co/spaces/Qwen/Qwen-VL-Max">🤗</a>&nbsp
229
+ <a href="https://modelscope.cn/studios/qwen/Qwen-VL-Max/summary">🤖 </a>&nbsp |
230
+ <a href="https://qianwen.aliyun.com">Web</a> |
231
+ <a href="https://help.aliyun.com/zh/dashscope/developer-reference/vl-plus-quick-start/">API</a></center>""")
232
+
233
+ chatbot = gr.Chatbot(label='Qwen-VL-Max', elem_classes="control-height", height=500)
234
+ query = gr.Textbox(lines=2, label='Input')
235
+ task_history = gr.State([])
236
+
237
+ with gr.Row():
238
+ addfile_btn = gr.UploadButton("📁 Upload (上传文件)", file_types=["image"])
239
+ submit_btn = gr.Button("🚀 Submit (发送)")
240
+ regen_btn = gr.Button("🤔️ Regenerate (重试)")
241
+ empty_bin = gr.Button("🧹 Clear History (清除历史)")
242
+
243
+ submit_btn.click(add_text, [chatbot, task_history, query], [chatbot, task_history]).then(
244
+ predict, [chatbot, task_history], [chatbot], show_progress=True
245
+ )
246
+ submit_btn.click(reset_user_input, [], [query])
247
+ empty_bin.click(reset_state, [task_history], [chatbot], show_progress=True)
248
+ regen_btn.click(regenerate, [chatbot, task_history], [chatbot], show_progress=True)
249
+ addfile_btn.upload(add_file, [chatbot, task_history, addfile_btn], [chatbot, task_history], show_progress=True)
250
+
251
+ gr.Markdown("""\
252
+ <font size=2>Note: This demo is governed by the original license of Qwen-VL. \
253
+ We strongly advise users not to knowingly generate or allow others to knowingly generate harmful content, \
254
+ including hate speech, violence, pornography, deception, etc. \
255
+ (注:本演示受Qwen-VL的许可协议限制。我们强烈建议,用户不应传播及不应允许他人传播以下内容,\
256
+ 包括但不限于仇恨言论、暴力、色情、欺诈相关的有害信息。)""")
257
+
258
+ demo.queue().launch(
259
+ share=args.share,
260
+ # inbrowser=args.inbrowser,
261
+ # server_port=args.server_port,
262
+ # server_name=args.server_name,
263
+ )
264
+
265
+
266
+ def main():
267
+ args = _get_args()
268
+ _launch_demo(args)
269
+
270
+
271
+ if __name__ == '__main__':
272
+ main()