feat: update game instructions and enhance chat functionality
Browse files- app.py +22 -16
- src/unpredictable_lord/chat.py +29 -72
app.py
CHANGED
|
@@ -10,7 +10,7 @@ from pathlib import Path
|
|
| 10 |
import gradio as gr
|
| 11 |
import spaces
|
| 12 |
|
| 13 |
-
from unpredictable_lord.chat import
|
| 14 |
from unpredictable_lord.game_state import PERSONALITY_DESCRIPTIONS
|
| 15 |
from unpredictable_lord.mcp_server import (
|
| 16 |
execute_turn,
|
|
@@ -33,7 +33,12 @@ MCP_GUIDE_PATH = Path(__file__).parent / "docs" / "mcp_guide.md"
|
|
| 33 |
MCP_GUIDE_CONTENT = MCP_GUIDE_PATH.read_text(encoding="utf-8")
|
| 34 |
|
| 35 |
# Constants
|
| 36 |
-
NO_GAME_MESSAGE = "_No active game.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
|
| 39 |
# Gradio UI
|
|
@@ -54,10 +59,11 @@ with gr.Blocks(title="Unpredictable Lord") as demo:
|
|
| 54 |
with gr.Row():
|
| 55 |
msg = gr.Textbox(
|
| 56 |
label="Your Advice",
|
| 57 |
-
placeholder="
|
| 58 |
scale=4,
|
|
|
|
| 59 |
)
|
| 60 |
-
submit_btn = gr.Button("Submit", scale=1)
|
| 61 |
|
| 62 |
# Right column: Game status panel
|
| 63 |
with gr.Column(scale=1):
|
|
@@ -146,6 +152,8 @@ _{personality_desc}_"""
|
|
| 146 |
state.get("royal_trust", 50),
|
| 147 |
state.get("advisor_trust", 50),
|
| 148 |
[], # Clear chat history
|
|
|
|
|
|
|
| 149 |
)
|
| 150 |
|
| 151 |
def reset_chat_game():
|
|
@@ -160,6 +168,8 @@ _{personality_desc}_"""
|
|
| 160 |
50,
|
| 161 |
50,
|
| 162 |
[], # Clear chat history
|
|
|
|
|
|
|
| 163 |
)
|
| 164 |
|
| 165 |
def refresh_game_state(session_id: str | None):
|
|
@@ -227,18 +237,10 @@ _{personality_desc}_"""
|
|
| 227 |
user_message = history[-1]["content"]
|
| 228 |
history_for_model = history[:-1]
|
| 229 |
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
):
|
| 235 |
-
yield updated_history
|
| 236 |
-
else:
|
| 237 |
-
# Fallback to simple chat without tools
|
| 238 |
-
for updated_history in chat_with_llm_stream(
|
| 239 |
-
user_message, history_for_model
|
| 240 |
-
):
|
| 241 |
-
yield updated_history
|
| 242 |
|
| 243 |
# Event handlers
|
| 244 |
start_game_btn.click(
|
|
@@ -254,6 +256,8 @@ _{personality_desc}_"""
|
|
| 254 |
royal_trust_bar,
|
| 255 |
advisor_trust_bar,
|
| 256 |
chatbot,
|
|
|
|
|
|
|
| 257 |
],
|
| 258 |
show_api=False,
|
| 259 |
)
|
|
@@ -271,6 +275,8 @@ _{personality_desc}_"""
|
|
| 271 |
royal_trust_bar,
|
| 272 |
advisor_trust_bar,
|
| 273 |
chatbot,
|
|
|
|
|
|
|
| 274 |
],
|
| 275 |
show_api=False,
|
| 276 |
)
|
|
|
|
| 10 |
import gradio as gr
|
| 11 |
import spaces
|
| 12 |
|
| 13 |
+
from unpredictable_lord.chat import chat_with_mcp_tools
|
| 14 |
from unpredictable_lord.game_state import PERSONALITY_DESCRIPTIONS
|
| 15 |
from unpredictable_lord.mcp_server import (
|
| 16 |
execute_turn,
|
|
|
|
| 33 |
MCP_GUIDE_CONTENT = MCP_GUIDE_PATH.read_text(encoding="utf-8")
|
| 34 |
|
| 35 |
# Constants
|
| 36 |
+
NO_GAME_MESSAGE = """_No active game._
|
| 37 |
+
|
| 38 |
+
**To start playing:**
|
| 39 |
+
1. Select a Lord Personality above
|
| 40 |
+
2. Click "⚔️ Start New Game"
|
| 41 |
+
"""
|
| 42 |
|
| 43 |
|
| 44 |
# Gradio UI
|
|
|
|
| 59 |
with gr.Row():
|
| 60 |
msg = gr.Textbox(
|
| 61 |
label="Your Advice",
|
| 62 |
+
placeholder="Start a new game first...",
|
| 63 |
scale=4,
|
| 64 |
+
interactive=False,
|
| 65 |
)
|
| 66 |
+
submit_btn = gr.Button("Submit", scale=1, interactive=False)
|
| 67 |
|
| 68 |
# Right column: Game status panel
|
| 69 |
with gr.Column(scale=1):
|
|
|
|
| 152 |
state.get("royal_trust", 50),
|
| 153 |
state.get("advisor_trust", 50),
|
| 154 |
[], # Clear chat history
|
| 155 |
+
gr.update(interactive=True, placeholder="My Lord, I have a proposal..."),
|
| 156 |
+
gr.update(interactive=True),
|
| 157 |
)
|
| 158 |
|
| 159 |
def reset_chat_game():
|
|
|
|
| 168 |
50,
|
| 169 |
50,
|
| 170 |
[], # Clear chat history
|
| 171 |
+
gr.update(interactive=False, placeholder="Start a new game first..."),
|
| 172 |
+
gr.update(interactive=False),
|
| 173 |
)
|
| 174 |
|
| 175 |
def refresh_game_state(session_id: str | None):
|
|
|
|
| 237 |
user_message = history[-1]["content"]
|
| 238 |
history_for_model = history[:-1]
|
| 239 |
|
| 240 |
+
async for updated_history in chat_with_mcp_tools(
|
| 241 |
+
user_message, history_for_model, session_id, personality
|
| 242 |
+
):
|
| 243 |
+
yield updated_history
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 244 |
|
| 245 |
# Event handlers
|
| 246 |
start_game_btn.click(
|
|
|
|
| 256 |
royal_trust_bar,
|
| 257 |
advisor_trust_bar,
|
| 258 |
chatbot,
|
| 259 |
+
msg,
|
| 260 |
+
submit_btn,
|
| 261 |
],
|
| 262 |
show_api=False,
|
| 263 |
)
|
|
|
|
| 275 |
royal_trust_bar,
|
| 276 |
advisor_trust_bar,
|
| 277 |
chatbot,
|
| 278 |
+
msg,
|
| 279 |
+
submit_btn,
|
| 280 |
],
|
| 281 |
show_api=False,
|
| 282 |
)
|
src/unpredictable_lord/chat.py
CHANGED
|
@@ -75,6 +75,14 @@ Available advice options:
|
|
| 75 |
- do_nothing: Maintain current state
|
| 76 |
|
| 77 |
After calling execute_turn, summarize the results to your advisor in character.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
"""
|
| 79 |
|
| 80 |
return oh.Message.from_role_and_content(
|
|
@@ -85,20 +93,6 @@ After calling execute_turn, summarize the results to your advisor in character.
|
|
| 85 |
)
|
| 86 |
|
| 87 |
|
| 88 |
-
def _build_simple_system_message() -> oh.Message:
|
| 89 |
-
"""Build simple system message without tools (for non-game chat)."""
|
| 90 |
-
system_content = (
|
| 91 |
-
oh.SystemContent.new()
|
| 92 |
-
.with_model_identity(
|
| 93 |
-
"You are a lord of a medieval fantasy kingdom. The user is your advisor. "
|
| 94 |
-
"Listen to your advisor's advice and act for the development of your territory "
|
| 95 |
-
"and the maintenance of your authority. Speak in an arrogant tone."
|
| 96 |
-
)
|
| 97 |
-
.with_reasoning_effort(oh.ReasoningEffort.LOW)
|
| 98 |
-
)
|
| 99 |
-
return oh.Message.from_role_and_content(oh.Role.SYSTEM, system_content)
|
| 100 |
-
|
| 101 |
-
|
| 102 |
def _convert_history_to_messages(chat_history: list[dict]) -> list[oh.Message]:
|
| 103 |
"""Convert Gradio chat history to openai-harmony messages.
|
| 104 |
|
|
@@ -141,6 +135,7 @@ def _stream_llm_response(messages: list[oh.Message], encoding):
|
|
| 141 |
response_text = ""
|
| 142 |
token_count = 0
|
| 143 |
parser_error = False
|
|
|
|
| 144 |
|
| 145 |
for token in generate_stream(input_tokens):
|
| 146 |
if token is None:
|
|
@@ -161,66 +156,30 @@ def _stream_llm_response(messages: list[oh.Message], encoding):
|
|
| 161 |
)
|
| 162 |
raise
|
| 163 |
|
| 164 |
-
# Get content
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
|
|
|
|
|
|
|
|
|
| 170 |
|
| 171 |
# Finish parsing and return parsed messages
|
| 172 |
if not parser_error:
|
| 173 |
parser.process_eos()
|
| 174 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 175 |
yield response_text, parsed_messages
|
| 176 |
|
| 177 |
|
| 178 |
-
def chat_with_llm_stream(
|
| 179 |
-
user_message: str,
|
| 180 |
-
chat_history: list[dict[str, str]],
|
| 181 |
-
):
|
| 182 |
-
"""
|
| 183 |
-
Chat with LLM (streaming version) - without tool support.
|
| 184 |
-
|
| 185 |
-
This is the simple version for when no game session is active.
|
| 186 |
-
|
| 187 |
-
Args:
|
| 188 |
-
user_message: User's message
|
| 189 |
-
chat_history: Past chat history (list of dictionaries in Gradio format)
|
| 190 |
-
|
| 191 |
-
Yields:
|
| 192 |
-
updated_chat_history: Updated chat history (Gradio format)
|
| 193 |
-
"""
|
| 194 |
-
try:
|
| 195 |
-
# Build messages
|
| 196 |
-
messages = [_build_simple_system_message()]
|
| 197 |
-
messages.extend(_convert_history_to_messages(chat_history))
|
| 198 |
-
messages.append(oh.Message.from_role_and_content(oh.Role.USER, user_message))
|
| 199 |
-
|
| 200 |
-
encoding = _get_encoding()
|
| 201 |
-
|
| 202 |
-
# Build history for UI
|
| 203 |
-
partial_history = chat_history + [
|
| 204 |
-
{"role": "user", "content": user_message},
|
| 205 |
-
{"role": "assistant", "content": ""},
|
| 206 |
-
]
|
| 207 |
-
|
| 208 |
-
# Stream response
|
| 209 |
-
for response_text, _ in _stream_llm_response(messages, encoding):
|
| 210 |
-
partial_history[-1]["content"] = response_text
|
| 211 |
-
yield partial_history
|
| 212 |
-
|
| 213 |
-
except Exception:
|
| 214 |
-
logger.exception("Error during chat_with_llm_stream")
|
| 215 |
-
yield chat_history + [
|
| 216 |
-
{"role": "user", "content": user_message},
|
| 217 |
-
{
|
| 218 |
-
"role": "assistant",
|
| 219 |
-
"content": "[Error occurred while generating response.]",
|
| 220 |
-
},
|
| 221 |
-
]
|
| 222 |
-
|
| 223 |
-
|
| 224 |
async def chat_with_mcp_tools(
|
| 225 |
user_message: str,
|
| 226 |
chat_history: list[dict[str, str]],
|
|
@@ -308,13 +267,11 @@ async def chat_with_mcp_tools(
|
|
| 308 |
# Log total message count
|
| 309 |
logger.debug(f"Total messages after tool execution: {len(messages)}")
|
| 310 |
|
| 311 |
-
#
|
| 312 |
full_response = ""
|
| 313 |
-
partial_history[-1]["content"] = ""
|
| 314 |
|
| 315 |
-
# Ensure final response is yielded
|
| 316 |
-
|
| 317 |
-
yield partial_history
|
| 318 |
|
| 319 |
except Exception:
|
| 320 |
logger.exception("Error during chat_with_mcp_tools")
|
|
|
|
| 75 |
- do_nothing: Maintain current state
|
| 76 |
|
| 77 |
After calling execute_turn, summarize the results to your advisor in character.
|
| 78 |
+
|
| 79 |
+
IMPORTANT: After each action or when starting a conversation:
|
| 80 |
+
1. Explain the current situation and any recent changes
|
| 81 |
+
2. Ask your advisor what they suggest next
|
| 82 |
+
3. Be specific about what challenges or opportunities exist
|
| 83 |
+
|
| 84 |
+
Example: "The treasury has grown, but whispers of discontent spread among the peasants.
|
| 85 |
+
What counsel do you offer, advisor? Shall we address their grievances or press our advantage?"
|
| 86 |
"""
|
| 87 |
|
| 88 |
return oh.Message.from_role_and_content(
|
|
|
|
| 93 |
)
|
| 94 |
|
| 95 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 96 |
def _convert_history_to_messages(chat_history: list[dict]) -> list[oh.Message]:
|
| 97 |
"""Convert Gradio chat history to openai-harmony messages.
|
| 98 |
|
|
|
|
| 135 |
response_text = ""
|
| 136 |
token_count = 0
|
| 137 |
parser_error = False
|
| 138 |
+
all_content = "" # Capture all content regardless of channel
|
| 139 |
|
| 140 |
for token in generate_stream(input_tokens):
|
| 141 |
if token is None:
|
|
|
|
| 156 |
)
|
| 157 |
raise
|
| 158 |
|
| 159 |
+
# Get content from any channel for fallback
|
| 160 |
+
delta = parser.last_content_delta
|
| 161 |
+
if delta:
|
| 162 |
+
all_content += delta
|
| 163 |
+
|
| 164 |
+
# Get content only from final channel for display
|
| 165 |
+
if parser.current_channel == "final" and delta:
|
| 166 |
+
response_text += delta
|
| 167 |
+
yield response_text, None
|
| 168 |
|
| 169 |
# Finish parsing and return parsed messages
|
| 170 |
if not parser_error:
|
| 171 |
parser.process_eos()
|
| 172 |
+
parsed_messages = parser.messages
|
| 173 |
+
else:
|
| 174 |
+
# On parser error, return empty list to stop tool calling loop
|
| 175 |
+
# Use all_content as fallback if response_text is empty
|
| 176 |
+
if not response_text and all_content:
|
| 177 |
+
response_text = all_content
|
| 178 |
+
logger.info(f"Using fallback content (length: {len(all_content)})")
|
| 179 |
+
parsed_messages = []
|
| 180 |
yield response_text, parsed_messages
|
| 181 |
|
| 182 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 183 |
async def chat_with_mcp_tools(
|
| 184 |
user_message: str,
|
| 185 |
chat_history: list[dict[str, str]],
|
|
|
|
| 267 |
# Log total message count
|
| 268 |
logger.debug(f"Total messages after tool execution: {len(messages)}")
|
| 269 |
|
| 270 |
+
# Clear response for next iteration (will accumulate new response)
|
| 271 |
full_response = ""
|
|
|
|
| 272 |
|
| 273 |
+
# Ensure final response is yielded (even if empty after tool calls)
|
| 274 |
+
yield partial_history
|
|
|
|
| 275 |
|
| 276 |
except Exception:
|
| 277 |
logger.exception("Error during chat_with_mcp_tools")
|