import gradio as gr from transformers import pipeline # Load your StudyBuddyAI model chat = pipeline("text-generation", model="redmint/studybuddy-ai", device=-1) # use -1 for CPU, 0 for GPU # Core response function def respond(message, history): # Convert history into simple Q&A format context = "" for user, bot in history: context += f"Question: {user}\nAnswer: {bot}\n" # Add the latest user question prompt = context + f"Question: {message}\nAnswer:" # Generate response output = chat( prompt, max_new_tokens=200, temperature=0.4, top_p=0.9, do_sample=True, )[0]["generated_text"] # Extract only the part after "Answer:" if "Answer:" in output: response = output.split("Answer:")[-1].strip() else: response = output.strip() history.append((message, response)) return history, history # Gradio interface with gr.Blocks() as demo: chatbot = gr.Chatbot() msg = gr.Textbox(label="Ask StudyBuddyAI") msg.submit(respond, [msg, chatbot], [chatbot, chatbot]) if __name__ == "__main__": demo.launch()