Update README.md
Browse files
README.md
CHANGED
|
@@ -228,14 +228,14 @@ llm_cpp = llama_cpp.Llama(
|
|
| 228 |
|
| 229 |
prompt = """
|
| 230 |
### Instruction:
|
| 231 |
-
|
| 232 |
|
| 233 |
### Response:
|
| 234 |
"""
|
| 235 |
|
| 236 |
response = llm_cpp(
|
| 237 |
prompt=prompt,
|
| 238 |
-
max_tokens=
|
| 239 |
temperature=0.5,
|
| 240 |
top_k=1,
|
| 241 |
repeat_penalty=1.1,
|
|
@@ -245,6 +245,30 @@ response = llm_cpp(
|
|
| 245 |
print(response)
|
| 246 |
```
|
| 247 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 248 |
## How to use with LangChain
|
| 249 |
|
| 250 |
Here's guides on using llama-cpp-python or ctransformers with LangChain:
|
|
|
|
| 228 |
|
| 229 |
prompt = """
|
| 230 |
### Instruction:
|
| 231 |
+
สวัสดีครับ ผมชื่อเอก
|
| 232 |
|
| 233 |
### Response:
|
| 234 |
"""
|
| 235 |
|
| 236 |
response = llm_cpp(
|
| 237 |
prompt=prompt,
|
| 238 |
+
max_tokens=256,
|
| 239 |
temperature=0.5,
|
| 240 |
top_k=1,
|
| 241 |
repeat_penalty=1.1,
|
|
|
|
| 245 |
print(response)
|
| 246 |
```
|
| 247 |
|
| 248 |
+
#### Output:
|
| 249 |
+
|
| 250 |
+
```json
|
| 251 |
+
{
|
| 252 |
+
"id": "cmpl-a8d5746d-25fb-43b6-8b04-b562db72df2b",
|
| 253 |
+
"object": "text_completion",
|
| 254 |
+
"created": 1714460999,
|
| 255 |
+
"model": "tc-instruct-dpo.Q4_K_M.gguf",
|
| 256 |
+
"choices": [
|
| 257 |
+
{
|
| 258 |
+
"text": "\n### Instruction:\nสวัสดีครับ ผมชื่อเอก\n\n### Response:\nสวัสดีครับ\n ",
|
| 259 |
+
"index": 0,
|
| 260 |
+
"logprobs": None,
|
| 261 |
+
"finish_reason": "stop"
|
| 262 |
+
}
|
| 263 |
+
],
|
| 264 |
+
"usage": {
|
| 265 |
+
"prompt_tokens": 21,
|
| 266 |
+
"completion_tokens": 7,
|
| 267 |
+
"total_tokens": 28
|
| 268 |
+
}
|
| 269 |
+
}
|
| 270 |
+
```
|
| 271 |
+
|
| 272 |
## How to use with LangChain
|
| 273 |
|
| 274 |
Here's guides on using llama-cpp-python or ctransformers with LangChain:
|