ryomo commited on
Commit
e20b84b
·
1 Parent(s): 0016ceb

fix: add logging for LLM stream calls and remove debug logs in tool execution

Browse files
src/unpredictable_lord/chat/chat.py CHANGED
@@ -31,11 +31,13 @@ if USE_MODAL:
31
  _generate_stream = modal.Function.from_name(APP_NAME, "generate_stream")
32
 
33
  def generate_stream(input_tokens):
 
34
  return _generate_stream.remote_gen(input_tokens)
35
  else:
36
  from unpredictable_lord.chat.llm_zerogpu import generate_stream as _generate_stream
37
 
38
  def generate_stream(input_tokens):
 
39
  return _generate_stream(input_tokens)
40
 
41
 
@@ -128,8 +130,6 @@ def _stream_llm_response(messages: list[oh.Message], encoding):
128
  convo = oh.Conversation.from_messages(messages)
129
  input_tokens = encoding.render_conversation_for_completion(convo, oh.Role.ASSISTANT)
130
 
131
- logger.debug(f"Input token count: {len(input_tokens)}")
132
-
133
  parser = oh.StreamableParser(encoding, role=oh.Role.ASSISTANT)
134
 
135
  response_text = ""
@@ -250,29 +250,14 @@ async def chat_with_mcp_tools(
250
 
251
  logger.info(f"Found {len(tool_calls)} tool call(s)")
252
 
253
- # Log parsed messages for debugging
254
- for i, pm in enumerate(parsed_messages):
255
- logger.debug(
256
- f"Parsed message {i}: role={pm.author.role if pm.author else 'None'}, recipient={pm.recipient}, channel={pm.channel}"
257
- )
258
-
259
  # Add parsed messages to conversation
260
  messages.extend(parsed_messages)
261
 
262
  # Execute tools via MCP
263
  tool_result_messages = await execute_tool_calls(tool_calls)
264
 
265
- # Log tool result messages for debugging
266
- for i, trm in enumerate(tool_result_messages):
267
- logger.debug(
268
- f"Tool result message {i}: role={trm.author.role if trm.author else 'None'}, name={trm.author.name if trm.author else 'None'}, channel={trm.channel}"
269
- )
270
-
271
  messages.extend(tool_result_messages)
272
 
273
- # Log total message count
274
- logger.debug(f"Total messages after tool execution: {len(messages)}")
275
-
276
  # Ensure final response is yielded (even if empty after tool calls)
277
  yield partial_history
278
 
 
31
  _generate_stream = modal.Function.from_name(APP_NAME, "generate_stream")
32
 
33
  def generate_stream(input_tokens):
34
+ logger.info("Calling Modal LLM generate_stream")
35
  return _generate_stream.remote_gen(input_tokens)
36
  else:
37
  from unpredictable_lord.chat.llm_zerogpu import generate_stream as _generate_stream
38
 
39
  def generate_stream(input_tokens):
40
+ logger.info("Calling ZeroGPU LLM generate_stream")
41
  return _generate_stream(input_tokens)
42
 
43
 
 
130
  convo = oh.Conversation.from_messages(messages)
131
  input_tokens = encoding.render_conversation_for_completion(convo, oh.Role.ASSISTANT)
132
 
 
 
133
  parser = oh.StreamableParser(encoding, role=oh.Role.ASSISTANT)
134
 
135
  response_text = ""
 
250
 
251
  logger.info(f"Found {len(tool_calls)} tool call(s)")
252
 
 
 
 
 
 
 
253
  # Add parsed messages to conversation
254
  messages.extend(parsed_messages)
255
 
256
  # Execute tools via MCP
257
  tool_result_messages = await execute_tool_calls(tool_calls)
258
 
 
 
 
 
 
 
259
  messages.extend(tool_result_messages)
260
 
 
 
 
261
  # Ensure final response is yielded (even if empty after tool calls)
262
  yield partial_history
263
 
src/unpredictable_lord/chat/chat_tools.py CHANGED
@@ -138,9 +138,6 @@ async def _execute_tool(tool_name: str, params: dict) -> str:
138
  mcp_client = MCPClient()
139
  try:
140
  result = await mcp_client.call_tool(tool_name, params)
141
- logger.debug(
142
- f"Raw MCP result for {tool_name}: {result[:200] if result else 'None'}"
143
- )
144
 
145
  # MCP returns text - try to parse it
146
  # First, try as JSON
@@ -189,9 +186,6 @@ async def execute_tool_calls(tool_calls: list[oh.Message]) -> list[oh.Message]:
189
  # Filter parameters to only include those defined in the tool schema
190
  params = _filter_tool_params(tool_name, raw_params)
191
 
192
- if raw_params != params:
193
- logger.debug(f"Filtered params for {tool_name}: {raw_params} -> {params}")
194
-
195
  logger.info(f"Executing MCP tool: {tool_name} with params: {params}")
196
 
197
  # Execute tool via MCP (returns JSON string)
 
138
  mcp_client = MCPClient()
139
  try:
140
  result = await mcp_client.call_tool(tool_name, params)
 
 
 
141
 
142
  # MCP returns text - try to parse it
143
  # First, try as JSON
 
186
  # Filter parameters to only include those defined in the tool schema
187
  params = _filter_tool_params(tool_name, raw_params)
188
 
 
 
 
189
  logger.info(f"Executing MCP tool: {tool_name} with params: {params}")
190
 
191
  # Execute tool via MCP (returns JSON string)