File size: 1,612 Bytes
e498dfc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import streamlit as st
from langchain.prompts import PromptTemplate
from langchain.llms import CTransformers

# functio to get response from LLAMA 2 model 

def get_llama_response(input_text,no_words,blog_style):

    ### LLama 2 model 
    llm = CTransformers(model = 'TheBloke/Llama-2-7B-Chat-GGML',
                        model_type = 'llama',
                        config = {'max_new_tokens': 256,
                                'temperature': 0.01})


    ## Prompt Template
    template = """
    write a blog for {blog_style} job profile for a topic  {input_text}
    within  {no_words} words
    """

    prompt = PromptTemplate(input_vairables =['blog_style','input_text','no_words'],
                            template = template)

    ## Generate the response from LLMA 2 model

    response  = llm(prompt.format(blog_style=blog_style , input_text = input_text , no_words = no_words))
    print(response)
    return response 



st.set_page_config(page_title = 'Generate Blogs',
                   page_icon = '',
                   layout = 'centered',
                   initial_sidebar_state = 'collapsed')

st.header('Generate Blogs ')

input_text = st.text_input('Enter the blog Topic')

## creating two more columns additional 2 fields

col1 , col2 = st.columns([5,5])

with col1 :
    no_words = st.text_input('No. of words ')


with col2 :
    blog_style = st.selectbox('Wiriting the blog for ',
    ('Researchers','Data Scientist','Common People'),index=0)

submit = st.button('Generate')

## final response

if submit :
    st.write(get_llama_response(input_text,no_words,blog_style))