import streamlit as st import os import google.generativeai as genai from dotenv import load_dotenv import json # Load environment variables load_dotenv() GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY") # Configure Generative AI model if GOOGLE_API_KEY: genai.configure(api_key=GOOGLE_API_KEY) model = genai.GenerativeModel('gemini-pro') # You can experiment with other available models else: st.error("Google AI Studio API key not found. Please add it to your .env file.") st.stop() st.title("Prompt Engineering Playground") st.subheader("Experiment with Fundamental Prompting Techniques") with st.sidebar: st.header("Prompting Concepts") st.markdown( """ This app demonstrates fundamental prompt engineering techniques based on the Google Generative AI course. """ ) st.subheader("Key Techniques:") st.markdown( """ - **Clear and Specific Instructions**: Providing explicit guidance to the model. - **Using Delimiters**: Clearly separating different parts of the input text. - **Asking for Structured Output**: Requesting output in a specific format (e.g., JSON). - **Checking Assumptions**: Verifying if certain conditions are met. - **Providing Examples (Few-Shot Prompting)**: Giving the model a few examples of the desired input-output behavior. - **Temperature Control**: Adjusting the randomness of the model's output. - **Chain-of-Thought Prompting**: Encouraging the model to show its reasoning process. """ ) st.subheader("Whitepaper Insights:") st.markdown( """ - Understanding LLM capabilities and limitations. - Importance of prompt clarity and specificity. - Iterative prompt development and refinement. - Context window awareness """ ) # --- Prompting Techniques Section --- st.header("Experiment with Prompts") prompt_technique = st.selectbox( "Choose a Prompting Technique to Try:", [ "Simple Instruction", "Using Delimiters", "Requesting JSON Output", "Checking Assumptions", "Providing Examples (Few-Shot)", "Temperature Control", "Chain-of-Thought Prompting" ], index=0 # Start with "Simple Instruction" ) prompt_input = st.text_area("Enter your prompt here:", height=150) # Temperature slider (common to several techniques) temperature = st.slider( "Temperature:", min_value=0.0, max_value=1.0, value=0.7, # Default temperature step=0.01, help="Controls the randomness of the output. Lower values are more deterministic; higher values are more creative.", ) if st.button("Generate Response"): if not prompt_input: st.warning("Please enter a prompt.") else: with st.spinner("Generating..."): try: if prompt_technique == "Using Delimiters": delimiter = st.text_input("Enter your delimiter (e.g., ###, ---):", "###") processed_prompt = f"Here is the input, with parts separated by '{delimiter}':\n{prompt_input}\n Please process each part separately." response = model.generate_content( processed_prompt, generation_config=genai.types.GenerationConfig(temperature=temperature) ) st.subheader("Generated Response:") st.markdown(response.text) elif prompt_technique == "Requesting JSON Output": json_format = st.text_input( "Describe the desired JSON format (e.g., {'name': str, 'age': int}):", "{'key1': type, 'key2': type}" ) processed_prompt = f"Please provide the output in JSON format, following this structure: {json_format}. Here is the information: {prompt_input}" response = model.generate_content( processed_prompt, generation_config=genai.types.GenerationConfig(temperature=temperature) ) try: json_output = json.loads(response.text) st.subheader("Generated JSON Output:") st.json(json_output) except json.JSONDecodeError: st.error("Failed to decode JSON. Raw response:") st.text(response.text) elif prompt_technique == "Checking Assumptions": assumption = st.text_input("State the assumption you want the model to check:", "The main subject is a person") processed_prompt = f"First, check if the following assumption is true: '{assumption}'. Then, answer the prompt: {prompt_input}" response = model.generate_content( processed_prompt, generation_config=genai.types.GenerationConfig(temperature=temperature) ) st.subheader("Generated Response:") st.markdown(response.text) elif prompt_technique == "Providing Examples (Few-Shot)": example1_input = st.text_area("Example 1 Input:", height=50) example1_output = st.text_area("Example 1 Output:", height=50) example2_input = st.text_area("Example 2 Input (Optional):", height=50) example2_output = st.text_area("Example 2 Output (Optional):", height=50) processed_prompt = "Here are some examples:\n" processed_prompt += f"Input: {example1_input}\nOutput: {example1_output}\n" if example2_input and example2_output: processed_prompt += f"Input: {example2_input}\nOutput: {example2_output}\n" processed_prompt += f"\nNow, answer the following:\nInput: {prompt_input}" response = model.generate_content( processed_prompt, generation_config=genai.types.GenerationConfig(temperature=temperature) ) st.subheader("Generated Response:") st.markdown(response.text) elif prompt_technique == "Temperature Control": # The temperature slider is already handled, so we just pass it to the model response = model.generate_content( prompt_input, generation_config=genai.types.GenerationConfig(temperature=temperature) ) st.subheader("Generated Response:") st.markdown(response.text) elif prompt_technique == "Chain-of-Thought Prompting": cot_prompt = f"Let's think step by step. {prompt_input}" response = model.generate_content(cot_prompt, generation_config=genai.types.GenerationConfig(temperature=temperature)) st.subheader("Generated Response (Chain-of-Thought):") st.markdown(response.text) else: # Simple Instruction response = model.generate_content( prompt_input, generation_config=genai.types.GenerationConfig(temperature=temperature) ) st.subheader("Generated Response:") st.markdown(response.text) except Exception as e: st.error(f"An error occurred: {e}")