File size: 1,584 Bytes
4159dc2 8f10b0b 4159dc2 a0ace0f 8f10b0b 4159dc2 415b5df 4159dc2 8f10b0b a4575d8 8f10b0b 360e8ae 415b5df 6cd48e0 360e8ae 6cd48e0 360e8ae 4159dc2 d80f9fb 9e32d29 d80f9fb 65e404a a4575d8 17b5440 a4575d8 22e2b1c a4575d8 4745a50 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
import gradio as gr
from fastapi import FastAPI, Request
import uvicorn
# from sentence_transformers import SentenceTransformer
# from sentence_transformers.util import cos_sim
# from sentence_transformers.quantization import quantize_embeddings
import spaces
app = FastAPI()
@spaces.GPU
def embed(text):
query_embedding = Embedder.encode(text)
return query_embedding.tolist();
#@app.post("/v1/embeddings")
#async def openai_embeddings(request: Request):
# body = await request.json();
# print(body);
#
# model = body['model']
# text = body['input'];
# embeddings = embed(text)
# return {
# 'object': "list"
# ,'data': [{
# 'object': "embeddings"
# ,'embedding': embeddings
# ,'index':0
# }]
# ,'model':model
# ,'usage':{
# 'prompt_tokens': 0
# ,'total_tokens': 0
# }
# }
def fn(text):
embed(text);
with gr.Blocks(fill_height=True) as demo:
text = gr.Textbox();
embeddings = gr.Textbox()
text.submit(fn, [text], [embeddings]);
print("Loading embedding model");
Embedder = None #SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
# demo.run_startup_events()
GradioApp = gr.mount_gradio_app(app, demo, path="/", ssr_mode=False);
#demo.launch(
# share=False,
# debug=False,
# server_port=7860,
# server_name="0.0.0.0",
# allowed_paths=[]
#)
print("Demo run...");
demo.launch();
print("Running uviconr...");
if __name__ == '__main__':
uvicorn.run(GradioApp)
|