Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
132 changes: 129 additions & 3 deletions cecli/coders/base_coder.py
Original file line number Diff line number Diff line change
Expand Up @@ -598,6 +598,9 @@ def __init__(
self.io.tool_output("JSON Schema:")
self.io.tool_output(json.dumps(self.functions, indent=4))

# Track partial response state for each request
self._reset_partial_response_flags()

@property
def gpt_prompts(self):
"""Get prompts from the registry based on the coder type."""
Expand Down Expand Up @@ -3013,6 +3016,7 @@ async def send(self, messages, model=None, functions=None, tools=None):
self.partial_response_chunks = []
self.partial_response_tool_calls = []
self.partial_response_function_call = dict()
self._reset_partial_response_flags()

completion = None
self.token_profiler.start()
Expand Down Expand Up @@ -3058,6 +3062,84 @@ async def send(self, messages, model=None, functions=None, tools=None):
if args:
self.io.ai_output(json.dumps(args, indent=4))

def _reset_partial_response_flags(self):
self._partial_response_received_flags = {
"content": False,
"reasoning": False,
"tool_calls": False,
"function_call": False,
}

def _register_partial_response(
self,
*,
content=False,
reasoning=False,
tool_calls=False,
function_call=False,
):
if not hasattr(self, "_partial_response_received_flags"):
self._reset_partial_response_flags()

flags = self._partial_response_received_flags
if content:
flags["content"] = True
if reasoning:
flags["reasoning"] = True
if tool_calls:
flags["tool_calls"] = True
if function_call:
flags["function_call"] = True

def _received_any_partial_response(self, received_content_flag=False):
if not hasattr(self, "_partial_response_received_flags"):
return False

flags = self._partial_response_received_flags
if received_content_flag:
return True
return any(flags.values())

def _get_empty_response_message(self):
"""Generate a descriptive warning for empty responses."""
flags = getattr(self, "_partial_response_received_flags", {})

has_content = flags.get("content", False)
has_reasoning = flags.get("reasoning", False)
has_tool_calls = flags.get("tool_calls", False)
has_function_call = flags.get("function_call", False)

if has_tool_calls and not has_content:
if has_reasoning:
return (
"Empty response received from LLM. "
"Only tool calls and reasoning content were received, but no text response. "
"Check if the model is configured to return text content."
)
return (
"Empty response received from LLM. "
"Only tool calls were received, but no text response. "
"Check if the model is configured to return text content."
)
if has_reasoning and not has_content:
return (
"Empty response received from LLM. "
"Only reasoning content was received, but no text response. "
"Check if the model is configured to return text content."
)
if has_function_call and not has_content:
return (
"Empty response received from LLM. "
"Only function calls were received, but no text response. "
"Check if the model is configured to return text content."
)

return (
"Empty response received from LLM. "
"No content, tool calls, or reasoning was received. "
"Check your provider account, model availability, or network connectivity."
)

def show_send_output(self, completion):
if self.verbose:
print(completion)
Expand All @@ -3072,6 +3154,31 @@ def show_send_output(self, completion):

self.partial_response_chunks.append(completion)

try:
message = None
if completion.choices and completion.choices[0].message:
message = completion.choices[0].message
except (AttributeError, IndexError):
message = None

if message:
if getattr(message, "tool_calls", None):
self._register_partial_response(tool_calls=True)

if getattr(message, "function_call", None):
self._register_partial_response(function_call=True)

reasoning_content = getattr(message, "reasoning_content", None)
if reasoning_content:
self._register_partial_response(reasoning=True)
else:
reasoning_attr = getattr(message, "reasoning", None)
if reasoning_attr:
self._register_partial_response(reasoning=True)

if getattr(message, "content", None):
self._register_partial_response(content=True)

response, func_err, content_err = self.consolidate_chunks()

resp_hash = dict(
Expand All @@ -3096,7 +3203,10 @@ def show_send_output(self, completion):

show_resp = replace_reasoning_tags(show_resp, self.reasoning_tag_name)

self.io.assistant_output(show_resp, pretty=self.show_pretty())
if show_resp:
self.io.assistant_output(show_resp, pretty=self.show_pretty())
elif not self._received_any_partial_response():
self.io.tool_warning(self._get_empty_response_message())

if (
hasattr(completion.choices[0], "finish_reason")
Expand Down Expand Up @@ -3243,6 +3353,8 @@ def consolidate_chunks(self):
for chunk in self.partial_response_chunks:
try:
if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.tool_calls:
if not self.stream:
self._register_partial_response(tool_calls=True)
for tool_call in chunk.choices[0].delta.tool_calls:
if (
hasattr(tool_call, "provider_specific_fields")
Expand Down Expand Up @@ -3271,6 +3383,8 @@ def consolidate_chunks(self):

try:
if response.choices[0].message.tool_calls:
if not self.stream:
self._register_partial_response(tool_calls=True)
for i, tool_call in enumerate(response.choices[0].message.tool_calls):
# Add provider-specific fields if we collected any for this tool
tool_id = tool_call.id
Expand Down Expand Up @@ -3304,6 +3418,8 @@ def consolidate_chunks(self):
self.partial_response_function_call = (
response.choices[0].message.tool_calls[0].function
)
if self.partial_response_function_call and not self.stream:
self._register_partial_response(function_call=True)
except AttributeError as e:
func_err = e

Expand All @@ -3315,7 +3431,12 @@ def consolidate_chunks(self):
except AttributeError:
reasoning_content = None

self.partial_response_reasoning_content = reasoning_content or ""
if reasoning_content:
self.partial_response_reasoning_content = reasoning_content
if not self.stream:
self._register_partial_response(reasoning=True)
else:
self.partial_response_reasoning_content = ""

try:
content = response.choices[0].message.content
Expand All @@ -3331,7 +3452,12 @@ def consolidate_chunks(self):
for block in content
if isinstance(block, dict) and block.get("type") == "text"
)
self.partial_response_content = content or ""
if content:
self.partial_response_content = content
if not self.stream:
self._register_partial_response(content=True)
else:
self.partial_response_content = ""
except AttributeError as e:
content_err = e

Expand Down
5 changes: 4 additions & 1 deletion cecli/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -1484,7 +1484,10 @@ def profile(self, *messages, start=False):

def assistant_output(self, message, pretty=None):
if not message:
self.tool_warning("Empty response received from LLM. Check your provider account?")
self.tool_warning(
"Empty response received from LLM. No text content was returned. "
"Check your provider account, model availability, or network connectivity."
)
return

show_resp = message
Expand Down
5 changes: 4 additions & 1 deletion cecli/tui/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,10 @@ def assistant_output(self, message, pretty=None):
pretty: Whether to use pretty formatting (unused in TUI, kept for compatibility)
"""
if not message:
self.tool_warning("Empty response received from LLM. Check your provider account?")
self.tool_warning(
"Empty response received from LLM. No text content was returned. "
"Check your provider account, model availability, or network connectivity."
)
return

# Use the streaming path so markdown rendering is applied
Expand Down
115 changes: 115 additions & 0 deletions tests/basic/test_coder.py
Original file line number Diff line number Diff line change
Expand Up @@ -1927,3 +1927,118 @@ async def test_execute_tool_calls_blob_content(self, mock_call_openai_tool):
" (application/octet-stream)]"
)
assert result[0]["content"] == expected_content

async def test_reset_partial_response_flags(self):
"""Test that _reset_partial_response_flags initializes all flags to False."""
with GitTemporaryDirectory():
io = InputOutput(yes=True)
coder = await Coder.create(self.GPT35, "diff", io=io)

coder._reset_partial_response_flags()
flags = coder._partial_response_received_flags
assert flags["content"] is False
assert flags["reasoning"] is False
assert flags["tool_calls"] is False
assert flags["function_call"] is False

async def test_register_partial_response(self):
"""Test that _register_partial_response correctly sets flags."""
with GitTemporaryDirectory():
io = InputOutput(yes=True)
coder = await Coder.create(self.GPT35, "diff", io=io)

coder._reset_partial_response_flags()
coder._register_partial_response(content=True)
assert coder._partial_response_received_flags["content"] is True
assert coder._partial_response_received_flags["reasoning"] is False

coder._register_partial_response(reasoning=True)
assert coder._partial_response_received_flags["reasoning"] is True

coder._register_partial_response(tool_calls=True)
assert coder._partial_response_received_flags["tool_calls"] is True

coder._register_partial_response(function_call=True)
assert coder._partial_response_received_flags["function_call"] is True

coder._reset_partial_response_flags()
coder._register_partial_response(content=True, reasoning=True)
assert coder._partial_response_received_flags["content"] is True
assert coder._partial_response_received_flags["reasoning"] is True
assert coder._partial_response_received_flags["tool_calls"] is False

async def test_received_any_partial_response(self):
"""Test that _received_any_partial_response correctly checks flags."""
with GitTemporaryDirectory():
io = InputOutput(yes=True)
coder = await Coder.create(self.GPT35, "diff", io=io)

coder._reset_partial_response_flags()
assert coder._received_any_partial_response() is False
assert coder._received_any_partial_response(received_content_flag=True) is True

coder._register_partial_response(content=True)
assert coder._received_any_partial_response() is True

coder._reset_partial_response_flags()
coder._register_partial_response(reasoning=True)
assert coder._received_any_partial_response() is True

coder._reset_partial_response_flags()
coder._register_partial_response(tool_calls=True)
assert coder._received_any_partial_response() is True

coder._reset_partial_response_flags()
coder._register_partial_response(function_call=True)
assert coder._received_any_partial_response() is True

async def test_get_empty_response_message_variants(self):
"""Verify _get_empty_response_message returns descriptive strings."""
with GitTemporaryDirectory():
io = InputOutput(yes=True)
coder = await Coder.create(self.GPT35, "diff", io=io)

coder._reset_partial_response_flags()
coder._register_partial_response(tool_calls=True)
msg = coder._get_empty_response_message()
assert "Only tool calls" in msg

coder._reset_partial_response_flags()
coder._register_partial_response(tool_calls=True, reasoning=True)
msg = coder._get_empty_response_message()
assert "tool calls and reasoning" in msg

coder._reset_partial_response_flags()
coder._register_partial_response(reasoning=True)
msg = coder._get_empty_response_message()
assert "Only reasoning content" in msg

coder._reset_partial_response_flags()
coder._register_partial_response(function_call=True)
msg = coder._get_empty_response_message()
assert "Only function calls" in msg

async def test_initialization_in_constructor(self):
"""Test that coders initialize the partial response flags."""
with GitTemporaryDirectory():
io = InputOutput(yes=True)
coder = await Coder.create(self.GPT35, "diff", io=io)

assert "_partial_response_received_flags" in coder.__dict__
flags = coder._partial_response_received_flags
assert flags["content"] is False
assert flags["reasoning"] is False
assert flags["tool_calls"] is False
assert flags["function_call"] is False

async def test_received_content_flag_override(self):
"""Test that received_content_flag overrides other flags."""
with GitTemporaryDirectory():
io = InputOutput(yes=True)
coder = await Coder.create(self.GPT35, "diff", io=io)

coder._reset_partial_response_flags()
assert coder._received_any_partial_response(received_content_flag=True) is True

coder._register_partial_response(content=True, reasoning=True)
assert coder._received_any_partial_response(received_content_flag=True) is True
Loading