File size: 3,605 Bytes
c9eb99e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import streamlit as st
from langchain.prompts import PromptTemplate
from langchain.llms import CTransformers
import os
from huggingface_hub import hf_hub_download


# Function to download the LLaMA model
def download_model():
    model_path = "models/llama-2-7b-chat.ggmlv3.q8_0.bin"
    if not os.path.exists(model_path):
        model_path = hf_hub_download(repo_id="TheBloke/Llama-2-7B-Chat-GGML",
                                     filename="llama-2-7b-chat.ggmlv3.q8_0.bin",
                                     local_dir="models")
    return model_path


# Function to get LLaMA response
def getLLamaresponse(input_text, no_words, blog_style):
    model_path = download_model()

    llm = CTransformers(model=model_path,
                        model_type='llama',
                        config={'max_new_tokens': 256, 'temperature': 0.01})

    # Blog prompt template
    template = """

        Write a blog for {blog_style} job profile for a topic {input_text}

        within {no_words} words.

    """

    prompt = PromptTemplate(input_variables=["blog_style", "input_text", "no_words"],
                            template=template)

    # Generate response
    response = llm(prompt.format(blog_style=blog_style, input_text=input_text, no_words=no_words))
    return response


# --------------- Streamlit UI Design ----------------
st.set_page_config(page_title="AI Blog Generator", page_icon="πŸ“", layout="centered")

# Custom CSS for a clean UI
st.markdown("""

    <style>

        .main {background-color: #f4f4f4;}

        .stTextInput, .stSelectbox, .stNumberInput {width: 100%;}

        .stButton>button {width: 100%; background-color: #4CAF50; color: white; padding: 10px 20px; font-size: 18px; border-radius: 8px;}

        .stButton>button:hover {background-color: #45a049;}

    </style>

""", unsafe_allow_html=True)

# Page Title
st.markdown("<h1 style='text-align: center; color: #333;'>πŸš€ AI Blog Generator</h1>", unsafe_allow_html=True)
st.markdown("<h3 style='text-align: center; color: #666;'>Generate high-quality blogs instantly with LLaMA 2</h3>",
            unsafe_allow_html=True)
st.write("---")

# Sidebar
with st.sidebar:

    st.markdown("## πŸ”₯ About This App")
    st.write("This AI-powered app generates professional blogs using **LLaMA 2** from Meta.")
    st.markdown("#### ✨ Features")
    st.write("- Generates **high-quality** blog content.")
    st.write("- **Custom word count** & writing style.")
    st.write("- **Powered by LLaMA 2 (7B)** model.")
    st.write("---")
    st.write("πŸ”— [Hugging Face Model](https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML)")

# Input Fields
st.markdown("### ✍️ Enter Blog Details")
input_text = st.text_input("πŸ“Œ Blog Topic", placeholder="e.g. Future of AI in Healthcare")

col1, col2 = st.columns([5, 5])
with col1:
    no_words = st.number_input('πŸ“ Word Count', min_value=50, max_value=1000, value=300, step=50)
with col2:
    blog_style = st.selectbox("βœ’οΈ Target Audience", ("Researchers", "Data Scientist", "Common People"), index=0)

# Generate Button
submit = st.button("πŸš€ Generate Blog")

if submit:
    if not input_text:
        st.error("⚠️ Please enter a blog topic.")
    else:
        with st.spinner("⏳ Generating your blog..."):
            blog_content = getLLamaresponse(input_text, no_words, blog_style)
        st.success("βœ… Blog Generated Successfully!")

        # Display the blog
        st.markdown("### πŸ“ Your AI-Generated Blog")
        st.write(blog_content)