mohamedalix546 commited on
Commit
5705d09
·
verified ·
1 Parent(s): efa23c7

Upload 3 files

Browse files
Files changed (3) hide show
  1. Dockerfile +11 -0
  2. app.py +23 -0
  3. requirements.txt +4 -0
Dockerfile ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+
3
+ COPY . /app
4
+ WORKDIR /app
5
+
6
+ RUN apt-get update && apt-get install -y --no-install-recommends \
7
+ build-essential git && \
8
+ pip install --no-cache-dir -r requirements.txt && \
9
+ apt-get clean && rm -rf /var/lib/apt/lists/*
10
+
11
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
app.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from pydantic import BaseModel
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ import torch
5
+
6
+ app = FastAPI()
7
+
8
+ model_name = "microsoft/phi-2"
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+ model = AutoModelForCausalLM.from_pretrained(model_name)
11
+
12
+ class MessageRequest(BaseModel):
13
+ username: str
14
+ message: str
15
+
16
+ @app.post("/intro")
17
+ def smart_intro(req: MessageRequest):
18
+ prompt = f"العميل اسمه {req.username}، وكتب: {req.message}\nرد عليه برد ذكي بسيط يحسسه إنك مهتم وهترد عليه بعد لحظات:"
19
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True)
20
+ outputs = model.generate(**inputs, max_new_tokens=50, temperature=0.8)
21
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
22
+ reply = response.split(":")[-1].strip()
23
+ return {"reply": reply}
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ torch
4
+ transformers