Spaces:
Paused
Paused
| import gradio as gr | |
| import requests | |
| import os | |
| import time | |
| # Model configuration | |
| MODEL_ID = "chendren/deepseek-dnd-lora" | |
| API_URL = f"https://huggingface.co/proxy/api-inference.huggingface.co/models/{MODEL_ID}" | |
| # Use environment variable for the token | |
| # This will be set in the Hugging Face Space settings | |
| headers = {"Authorization": f"Bearer {os.environ.get('HUGGINGFACE_TOKEN', '')}"} | |
| # Example prompts | |
| example_prompts = [ | |
| "Create a D&D character with the following details: Race: Half-Elf, Class: Bard, Background: Entertainer", | |
| "Design a D&D adventure hook set in a dark forest with a mysterious cult", | |
| "Create a magical item for D&D 5e that would be suitable for a level 5 rogue", | |
| "Write a description for a fantasy tavern in a D&D setting called 'The Dragon\'s Rest'", | |
| "Create a D&D monster stat block for a new type of undead creature called 'Mist Wraith'", | |
| "Write a backstory for a Dragonborn Paladin who follows the Oath of Vengeance", | |
| ] | |
| def generate_text(prompt, temperature=0.7, max_length=500, top_p=0.9, top_k=50, rep_penalty=1.1): | |
| """ | |
| Generate text using the Hugging Face Inference API | |
| If the API fails, provide instructions for manual testing | |
| """ | |
| full_prompt = f"You are a Dungeons & Dragons assistant. {prompt}" | |
| payload = { | |
| "inputs": full_prompt, | |
| "parameters": { | |
| "max_new_tokens": int(max_length), | |
| "temperature": float(temperature), | |
| "top_p": float(top_p), | |
| "top_k": int(top_k), | |
| "repetition_penalty": float(rep_penalty), | |
| "do_sample": True | |
| } | |
| } | |
| try: | |
| response = requests.post(API_URL, headers=headers, json=payload) | |
| if response.status_code == 200: | |
| result = response.json() | |
| if isinstance(result, list) and len(result) > 0: | |
| if "generated_text" in result[0]: | |
| return result[0]["generated_text"] | |
| else: | |
| return str(result[0]) | |
| else: | |
| return str(result) | |
| elif response.status_code == 503: | |
| return ( | |
| "⏳ The model is still loading or being processed by Hugging Face. " | |
| "This is normal for new or infrequently used models. Please try again in a few minutes.\n\n" | |
| "If the problem persists, you can also try using the model directly via code: " | |
| "https://huggingface.co/chendren/deepseek-dnd-lora#using-the-transformers-library" | |
| ) | |
| else: | |
| return ( | |
| f"⚠️ Error: API returned status code {response.status_code}\n\n" | |
| f"Response: {response.text}\n\n" | |
| "You can try again later or use the model directly with the code provided here: " | |
| "https://huggingface.co/chendren/deepseek-dnd-lora" | |
| ) | |
| except Exception as e: | |
| return ( | |
| f"⚠️ An error occurred: {str(e)}\n\n" | |
| "Check if the model is available or try using the model directly using the code examples: " | |
| "https://huggingface.co/chendren/deepseek-dnd-lora" | |
| ) | |
| # Create the Gradio interface | |
| with gr.Blocks(title="D&D Content Generator", css="footer {visibility: hidden}") as demo: | |
| gr.Markdown( | |
| """ | |
| # 🐉 Dungeons & Dragons Content Generator | |
| This demo uses the [DeepSeek-R1-Distill-Qwen-7B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B) model fine-tuned on D&D content. | |
| Enter a prompt related to D&D content creation, or try one of the examples below. | |
| ### Model Details | |
| - Base model: [deepseek-ai/DeepSeek-R1-Distill-Qwen-7B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B) | |
| - Fine-tuned with LoRA on 500 examples of D&D content | |
| - Specialized for creating characters, adventures, items, and more for D&D | |
| [View Model on Hugging Face](https://huggingface.co/chendren/deepseek-dnd-lora) | |
| """ | |
| ) | |
| with gr.Row(): | |
| with gr.Column(): | |
| prompt = gr.Textbox( | |
| label="Prompt", | |
| placeholder="Enter your D&D content request...", | |
| lines=4 | |
| ) | |
| with gr.Row(): | |
| submit_btn = gr.Button("Generate", variant="primary") | |
| clear_btn = gr.Button("Clear") | |
| with gr.Accordion("Advanced Parameters", open=False): | |
| temperature = gr.Slider( | |
| minimum=0.1, maximum=1.5, value=0.7, step=0.1, | |
| label="Temperature (creativity)", | |
| info="Higher values produce more creative but potentially less coherent output" | |
| ) | |
| max_length = gr.Slider( | |
| minimum=100, maximum=1000, value=500, step=50, | |
| label="Maximum Length", | |
| info="Maximum number of tokens to generate" | |
| ) | |
| top_p = gr.Slider( | |
| minimum=0.1, maximum=1.0, value=0.9, step=0.1, | |
| label="Top-p", | |
| info="Nucleus sampling probability threshold" | |
| ) | |
| top_k = gr.Slider( | |
| minimum=1, maximum=100, value=50, step=5, | |
| label="Top-k", | |
| info="Number of highest probability tokens to consider" | |
| ) | |
| rep_penalty = gr.Slider( | |
| minimum=1.0, maximum=2.0, value=1.1, step=0.1, | |
| label="Repetition Penalty", | |
| info="Penalizes repeated tokens (higher = less repetition)" | |
| ) | |
| with gr.Column(): | |
| output = gr.Textbox( | |
| label="Generated Output", | |
| placeholder="D&D content will appear here...", | |
| lines=20 | |
| ) | |
| gr.Examples( | |
| examples=example_prompts, | |
| inputs=prompt, | |
| outputs=output, | |
| fn=lambda x: generate_text(x), | |
| cache_examples=False | |
| ) | |
| gr.Markdown( | |
| """ | |
| ### Usage Tips | |
| - Be specific in your prompts, including details like race, class, and level where applicable | |
| - For character creation, specify race, class, background, and any other details | |
| - For adventure hooks, specify setting, theme, and target party level | |
| - For magic items, specify item type, rarity, and intended user | |
| ### Credits | |
| - Base model: [DeepSeek-R1-Distill-Qwen-7B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B) | |
| - Fine-tuned by: [chendren](https://huggingface.co/chendren) | |
| """ | |
| ) | |
| # Set up event handlers | |
| submit_btn.click( | |
| fn=generate_text, | |
| inputs=[prompt, temperature, max_length, top_p, top_k, rep_penalty], | |
| outputs=output | |
| ) | |
| clear_btn.click( | |
| fn=lambda: ("", ""), | |
| inputs=[], | |
| outputs=[prompt, output] | |
| ) | |
| # Launch the app | |
| if __name__ == "__main__": | |
| demo.launch() |