Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import google.generativeai as genai | |
| # Replace with your actual API key | |
| API_KEY = "AIzaSyA3AAdrEZ4YwczJZpV8uVMIM7zTJpXtNgg" | |
| model = genai.GenerativeModel("gemini-1.5-flash") | |
| chat = model.start_chat( | |
| history=[ | |
| {"role": "user", "parts": "Hello"}, | |
| {"role": "model", "parts": "Great to meet you. What would you like to know?"}, | |
| ] | |
| ) | |
| genai.configure(api_key=API_KEY) | |
| def get_response_from_gemini(prompt): | |
| """Send a prompt to the Gemini model and get the response.""" | |
| response = chat.send_message(prompt, stream=True) | |
| # Gemini model sends multiple parts of the response | |
| response_parts = [] | |
| for chunk in response: | |
| response_parts.append(chunk.text) | |
| return " ".join(response_parts) | |
| # Streamlit UI | |
| st.title("Gemini Chat App") | |
| st.write("Type your message below to chat with the Gemini model.") | |
| user_input = st.text_area("Your Message", placeholder="Type your message here...") | |
| if st.button("Send"): | |
| if user_input.strip(): | |
| with st.spinner("Waiting for response..."): | |
| response = get_response_from_gemini(user_input) | |
| st.markdown(f"**You:** {user_input}") | |
| st.markdown(f"**Gemini:** {response}") | |
| else: | |
| st.warning("Please type a message before sending.") |