File size: 1,506 Bytes
68a3079
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
from transformers import GPT2LMHeadModel, GPT2Tokenizer
from transformers import pipeline
import torch
import os

# Automatically get the current folder
base_dir = os.path.dirname(os.path.abspath(__file__))
model_path = os.path.join(base_dir, "chatbot")
# path to the folder where all your files are
tokenizer = GPT2Tokenizer.from_pretrained(model_path)
model = GPT2LMHeadModel.from_pretrained(model_path)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
model.eval()

# Create the text generation pipeline
generator = pipeline(
    "text-generation",
    model=model,
    tokenizer=tokenizer,
    device=0 if torch.cuda.is_available() else -1
)

# Chat function
def chat(temp=0.5):
    print(f"\n🤖 Chatbot is ready! (temperature={temp}) — type 'exit' to quit.")
    context = ""
    while True:
        user_input = input("You: ")
        if user_input.lower() == "exit":
            break
        context += f"A: {user_input}\nB:"
        result = generator(
            context,
            max_length=len(tokenizer.encode(context)) + 50,
            pad_token_id=tokenizer.eos_token_id,
            do_sample=True,
            top_k=50,
            top_p=0.95,
            temperature=temp
        )[0]["generated_text"]
        reply = result[len(context):].split("\n")[0].strip()
        print(f"Bot: {reply}")
        context += f"{reply}\n"

# Start chatting
if __name__ == "__main__":
    chat(temp=0.8)