Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,29 +1,22 @@
|
|
1 |
-
import os
|
2 |
-
os.environ["HF_HOME"] = "/tmp/hf" # Prevents write errors on Hugging Face Spaces
|
3 |
-
|
4 |
from fastapi import FastAPI
|
5 |
from pydantic import BaseModel
|
6 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
7 |
import torch
|
8 |
import re
|
|
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
)
|
15 |
|
16 |
-
# Load model and tokenizer
|
17 |
model_id = "misalsathsara/phi1.5-js-codegen"
|
18 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
19 |
model = AutoModelForCausalLM.from_pretrained(model_id)
|
20 |
-
|
21 |
-
# Device setup
|
22 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
23 |
model.to(device)
|
24 |
model.eval()
|
25 |
|
26 |
-
# Prompt Template
|
27 |
system_prompt = """
|
28 |
You are a smart javascript assistant that only generates only the best simple javascript functions without any comments like this:
|
29 |
function transform(row) {
|
@@ -47,21 +40,19 @@ Don't add any markdown block markers either.
|
|
47 |
Every function must end with return row;
|
48 |
"""
|
49 |
|
50 |
-
# Input schema
|
51 |
class RequestData(BaseModel):
|
52 |
instruction: str
|
53 |
|
54 |
-
|
55 |
-
@app.post("/generate", summary="Generate JavaScript code", tags=["Code Generation"])
|
56 |
def generate_code(data: RequestData):
|
57 |
instruction = data.instruction
|
58 |
-
full_prompt = f"
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
with torch.no_grad():
|
63 |
-
|
64 |
-
|
65 |
max_new_tokens=200,
|
66 |
temperature=0.3,
|
67 |
top_k=50,
|
@@ -70,6 +61,10 @@ def generate_code(data: RequestData):
|
|
70 |
pad_token_id=tokenizer.eos_token_id
|
71 |
)
|
72 |
|
73 |
-
|
74 |
-
|
75 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from fastapi import FastAPI
|
2 |
from pydantic import BaseModel
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
4 |
import torch
|
5 |
import re
|
6 |
+
import os
|
7 |
|
8 |
+
# Fix the permission issue by changing HF cache location
|
9 |
+
os.environ["HF_HOME"] = "/tmp/hf"
|
10 |
+
|
11 |
+
app = FastAPI()
|
|
|
12 |
|
|
|
13 |
model_id = "misalsathsara/phi1.5-js-codegen"
|
14 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
15 |
model = AutoModelForCausalLM.from_pretrained(model_id)
|
|
|
|
|
16 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
17 |
model.to(device)
|
18 |
model.eval()
|
19 |
|
|
|
20 |
system_prompt = """
|
21 |
You are a smart javascript assistant that only generates only the best simple javascript functions without any comments like this:
|
22 |
function transform(row) {
|
|
|
40 |
Every function must end with return row;
|
41 |
"""
|
42 |
|
|
|
43 |
class RequestData(BaseModel):
|
44 |
instruction: str
|
45 |
|
46 |
+
@app.post("/generate")
|
|
|
47 |
def generate_code(data: RequestData):
|
48 |
instruction = data.instruction
|
49 |
+
full_prompt = system_prompt + f"\n### Instruction:\n{instruction}\n\n### Response:\n"
|
50 |
+
|
51 |
+
input_ids = tokenizer(full_prompt, return_tensors="pt").input_ids.to(device)
|
52 |
+
|
53 |
with torch.no_grad():
|
54 |
+
output_ids = model.generate(
|
55 |
+
input_ids,
|
56 |
max_new_tokens=200,
|
57 |
temperature=0.3,
|
58 |
top_k=50,
|
|
|
61 |
pad_token_id=tokenizer.eos_token_id
|
62 |
)
|
63 |
|
64 |
+
generated_text = tokenizer.decode(output_ids[0][input_ids.shape[-1]:], skip_special_tokens=True)
|
65 |
+
|
66 |
+
# Only return JavaScript function — no extra text
|
67 |
+
match = re.search(r"function\s+transform\(.*?\)\s*{.*?return row;\s*}", generated_text, re.DOTALL)
|
68 |
+
clean_output = match.group(0).strip() if match else generated_text.strip()
|
69 |
+
|
70 |
+
return clean_output
|