File size: 4,390 Bytes
20a070f c91441b 0b80183 c91441b 20a070f 0e4258a 20a070f c91441b 20a070f c91441b 20a070f 0e4258a c91441b 20a070f c91441b 0e4258a c91441b 0e4258a 20a070f c91441b 20a070f c91441b 20a070f 0e4258a 20a070f 0e4258a c91441b 0e4258a 5382507 20a070f 0e4258a 0b80183 c91441b c61b22c 20a070f c91441b 20a070f 0e4258a 20a070f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
import argparse
import asyncio
import functools
import json
import os
from io import BytesIO
import uvicorn
from fastapi import FastAPI, Body, Request
# from fastapi.responses import StreamingResponse
# from starlette.staticfiles import StaticFiles
# from starlette.templating import Jinja2Templates
from utils.utils import add_arguments, print_arguments
from sentence_transformers import SentenceTransformer, models
from gensim.models import Word2Vec
from gensim.utils import simple_preprocess
import numpy as np
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
add_arg("host", type=str, default="0.0.0.0", help="")
add_arg("port", type=int, default=5000, help="")
add_arg("model_path", type=str, default="BAAI/bge-small-en-v1.5", help="")
add_arg("use_gpu", type=bool, default=False, help="")
add_arg("num_workers", type=int, default=2, help="")
args = parser.parse_args()
print_arguments(args)
# similarity score func
def similarity_score(model, textA, textB):
em_test = model.encode(
[textA, textB],
normalize_embeddings=True
)
return em_test[0] @ em_test[1].T
# BGE embedding
if args.use_gpu:
bge_model = SentenceTransformer(args.model_path, device="cuda", compute_type="float16", cache_folder=".")
else:
bge_model = SentenceTransformer(args.model_path, device='cpu', cache_folder=".")
# tsdae embedding
if args.use_gpu:
model_name = 'sam2ai/sbert-tsdae'
word_embedding_model = models.Transformer(model_name)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), 'cls')
tsdae_model = SentenceTransformer(
modules=[word_embedding_model, pooling_model],
device="cuda",
compute_type="float16",
cache_folder="."
)
else:
model_name = 'sam2ai/sbert-tsdae'
word_embedding_model = models.Transformer(model_name)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), 'cls')
tsdae_model = SentenceTransformer(
modules=[word_embedding_model, pooling_model],
device='cpu',
cache_folder="."
)
# word2vec embedding
# Define the calculate_similarity function
def calculate_similarity(sentence1, sentence2):
# Tokenize the sentences
tokens1 = simple_preprocess(sentence1)
tokens2 = simple_preprocess(sentence2)
# Load or train a Word2Vec model
# Here, we'll create a simple model for demonstration purposes
sentences = [tokens1, tokens2]
model = Word2Vec(sentences, vector_size=100, window=5, min_count=1, sg=0)
# Calculate the vector representation for each sentence
vector1 = np.mean([model.wv[token] for token in tokens1], axis=0)
vector2 = np.mean([model.wv[token] for token in tokens2], axis=0)
# Calculate cosine similarity
similarity = np.dot(vector1, vector2) / (np.linalg.norm(vector1) * np.linalg.norm(vector2))
return similarity
app = FastAPI(title="embedding Inference")
@app.get("/")
async def index(request: Request):
return {"detail": "API is Active !!"}
@app.post("/bge_embed")
async def api_bge_embed(
text1: str = Body("text1", description="", embed=True),
text2: str = Body("text2", description="", embed=True),
):
scores = similarity_score(bge_model, text1, text2)
print(scores)
scores = scores.tolist()
ret = {"similarity score": scores, "status_code": 200}
return ret
@app.post("/tsdae_embed")
async def api_tsdae_embed(
text1: str = Body("text1", description="", embed=True),
text2: str = Body("text2", description="", embed=True),
):
scores = similarity_score(tsdae_model, text1, text2)
print(scores)
scores = scores.tolist()
ret = {"similarity score": scores, "status_code": 200}
return ret
@app.post("/w2v_embed")
async def api_w2v_embed(
text1: str = Body("text1", description="", embed=True),
text2: str = Body("text2", description="", embed=True),
):
scores = calculate_similarity(text1, text2)
print(scores)
scores = scores.tolist()
ret = {"similarity score": scores, "status_code": 200}
return ret
if __name__ == '__main__':
uvicorn.run(app, host=args.host, port=args.port) |