Skip to content
Open
2 changes: 1 addition & 1 deletion nemoguardrails/actions/action_dispatcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
from langchain_core.runnables import Runnable

from nemoguardrails import utils
from nemoguardrails.actions.llm.utils import LLMCallException
from nemoguardrails.exceptions import LLMCallException

log = logging.getLogger(__name__)

Expand Down
82 changes: 68 additions & 14 deletions nemoguardrails/actions/llm/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

import logging
import re
from typing import Any, Dict, List, Optional, Sequence, Union
from typing import Dict, List, Optional, Sequence, Union

from langchain_core.callbacks.base import AsyncCallbackHandler, BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
Expand All @@ -30,23 +30,25 @@
reasoning_trace_var,
tool_calls_var,
)
from nemoguardrails.exceptions import LLMCallException
from nemoguardrails.integrations.langchain.message_utils import dicts_to_messages
from nemoguardrails.logging.callbacks import logging_callbacks
from nemoguardrails.logging.explain import LLMCallInfo

logger = logging.getLogger(__name__)


class LLMCallException(Exception):
"""A wrapper around the LLM call invocation exception.

This is used to propagate the exception out of the `generate_async` call (the default behavior is to
catch it and return an "Internal server error." message.
"""

def __init__(self, inner_exception: Any):
super().__init__(f"LLM Call Exception: {str(inner_exception)}")
self.inner_exception = inner_exception
# Since different providers have different attributes for the base URL, we'll use this list
# to attempt to extract the base URL from a `BaseLanguageModel` instance.
BASE_URL_ATTRIBUTES = [
"api_base",
"api_host",
"azure_endpoint",
"base_url",
"endpoint",
"endpoint_url",
"openai_api_base",
"server_url",
]


def _infer_provider_from_module(llm: BaseLanguageModel) -> Optional[str]:
Expand Down Expand Up @@ -202,6 +204,58 @@ def _prepare_callbacks(
return logging_callbacks


def _raise_llm_call_exception(
exception: Exception,
llm: Union[BaseLanguageModel, Runnable],
) -> None:
"""Raise an LLMCallException with enriched context about the failed invocation.

Args:
exception: The original exception that occurred
llm: The LLM instance that was being invoked

Raises:
LLMCallException with context message including model name and endpoint
"""
# Extract model name from context
llm_call_info = llm_call_info_var.get()
model_name = (
llm_call_info.llm_model_name
if llm_call_info
else _infer_model_name(llm)
if isinstance(llm, BaseLanguageModel)
else ""
)

# Extract endpoint URL from the LLM instance
endpoint_url = None
for attr in BASE_URL_ATTRIBUTES:
if hasattr(llm, attr):
value = getattr(llm, attr, None)
if value:
endpoint_url = str(value)
break

# If we didn't find endpoint URL, check the nested client object.
if not endpoint_url and hasattr(llm, "client"):
client = getattr(llm, "client", None)
if client and hasattr(client, "base_url"):
endpoint_url = str(client.base_url)

# Build context message with model and endpoint info
context_parts = []
if model_name:
context_parts.append(f"model={model_name}")
if endpoint_url:
context_parts.append(f"endpoint={endpoint_url}")

if context_parts:
context_message = f"Error invoking LLM ({', '.join(context_parts)})"
raise LLMCallException(exception, context_message=context_message)
else:
raise LLMCallException(exception)


async def _invoke_with_string_prompt(
llm: Union[BaseLanguageModel, Runnable],
prompt: str,
Expand All @@ -211,7 +265,7 @@ async def _invoke_with_string_prompt(
try:
return await llm.ainvoke(prompt, config=RunnableConfig(callbacks=callbacks))
except Exception as e:
raise LLMCallException(e)
_raise_llm_call_exception(e, llm)


async def _invoke_with_message_list(
Expand All @@ -225,7 +279,7 @@ async def _invoke_with_message_list(
try:
return await llm.ainvoke(messages, config=RunnableConfig(callbacks=callbacks))
except Exception as e:
raise LLMCallException(e)
_raise_llm_call_exception(e, llm)


def _convert_messages_to_langchain_format(prompt: List[dict]) -> List:
Expand Down
70 changes: 70 additions & 0 deletions nemoguardrails/exceptions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
# SPDX-FileCopyrightText: Copyright (c) 2023-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional

__all__ = [
"ConfigurationError",
"InvalidModelConfigurationError",
"InvalidRailsConfigurationError",
"LLMCallException",
]


class ConfigurationError(ValueError):
"""
Base class for Guardrails Configuration validation errors.
"""

pass


class InvalidModelConfigurationError(ConfigurationError):
"""Raised when a guardrail configuration's model is invalid."""

pass


class InvalidRailsConfigurationError(ConfigurationError):
"""Raised when rails configuration is invalid.

Examples:
- Input/output rail references a model that doesn't exist in config
- Rail references a flow that doesn't exist
- Missing required prompt template
- Invalid rail parameters
"""

pass


class LLMCallException(Exception):
"""A wrapper around the LLM call invocation exception.

This is used to propagate the exception out of the `generate_async` call. The default behavior is to
catch it and return an "Internal server error." message.
"""

def __init__(self, inner_exception: Any, context_message: Optional[str] = None):
"""Initialize LLMCallException.

Args:
inner_exception: The original exception that occurred
context_message: Optional context to prepend (for example, the model name or endpoint)
"""
message = f"{context_message or 'LLM Call Exception'}: {str(inner_exception)}"
super().__init__(message)

self.inner_exception = inner_exception
self.context_message = context_message
58 changes: 42 additions & 16 deletions nemoguardrails/rails/llm/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,10 @@
from nemoguardrails.colang.v1_0.runtime.flows import _normalize_flow_id
from nemoguardrails.colang.v2_x.lang.utils import format_colang_parsing_error_message
from nemoguardrails.colang.v2_x.runtime.errors import ColangParsingError
from nemoguardrails.exceptions import (
InvalidModelConfigurationError,
InvalidRailsConfigurationError,
)

log = logging.getLogger(__name__)

Expand Down Expand Up @@ -136,8 +140,8 @@ def set_and_validate_model(cls, data: Any) -> Any:
model_from_params = parameters.get("model_name") or parameters.get("model")

if model_field and model_from_params:
raise ValueError(
"Model name must be specified in exactly one place: either in the 'model' field or in parameters, not both."
raise InvalidModelConfigurationError(
"Model name must be specified in exactly one place: either the `model` field, or in `parameters` (`parameters.model` or `parameters.model_name`).",
)
if not model_field and model_from_params:
data["model"] = model_from_params
Expand All @@ -151,8 +155,8 @@ def set_and_validate_model(cls, data: Any) -> Any:
def model_must_be_none_empty(self) -> "Model":
"""Validate that a model name is present either directly or in parameters."""
if not self.model or not self.model.strip():
raise ValueError(
"Model name must be specified either directly in the 'model' field or through 'model_name'/'model' in parameters"
raise InvalidModelConfigurationError(
"Model name must be specified in exactly one place: either the `model` field, or in `parameters` (`parameters.model` or `parameters.model_name`)."
)
return self

Expand Down Expand Up @@ -334,10 +338,10 @@ class TaskPrompt(BaseModel):
@root_validator(pre=True, allow_reuse=True)
def check_fields(cls, values):
if not values.get("content") and not values.get("messages"):
raise ValueError("One of `content` or `messages` must be provided.")
raise InvalidRailsConfigurationError("One of `content` or `messages` must be provided.")

if values.get("content") and values.get("messages"):
raise ValueError("Only one of `content` or `messages` must be provided.")
raise InvalidRailsConfigurationError("Only one of `content` or `messages` must be provided.")

return values

Expand Down Expand Up @@ -1414,7 +1418,11 @@ def check_model_exists_for_input_rails(cls, values):
if not flow_model:
continue
if flow_model not in model_types:
raise ValueError(f"No `{flow_model}` model provided for input flow `{_normalize_flow_id(flow)}`")
flow_id = _normalize_flow_id(flow)
available_types = ", ".join(f"'{str(t)}'" for t in sorted(model_types)) if model_types else "none"
raise InvalidRailsConfigurationError(
f"Input flow '{flow_id}' references model type '{flow_model}' that is not defined in the configuration. Detected model types: {available_types}."
)
return values

@root_validator(pre=True)
Expand All @@ -1436,7 +1444,11 @@ def check_model_exists_for_output_rails(cls, values):
if not flow_model:
continue
if flow_model not in model_types:
raise ValueError(f"No `{flow_model}` model provided for output flow `{_normalize_flow_id(flow)}`")
flow_id = _normalize_flow_id(flow)
available_types = ", ".join(f"'{str(t)}'" for t in sorted(model_types)) if model_types else "none"
raise InvalidRailsConfigurationError(
f"Output flow '{flow_id}' references model type '{flow_model}' that is not defined in the configuration. Detected model types: {available_types}."
)
return values

@root_validator(pre=True)
Expand All @@ -1450,9 +1462,13 @@ def check_prompt_exist_for_self_check_rails(cls, values):

# Input moderation prompt verification
if "self check input" in enabled_input_rails and "self_check_input" not in provided_task_prompts:
raise ValueError("You must provide a `self_check_input` prompt template.")
raise InvalidRailsConfigurationError(
"Missing a `self_check_input` prompt template, which is required for the `self check input` rail."
)
if "llama guard check input" in enabled_input_rails and "llama_guard_check_input" not in provided_task_prompts:
raise ValueError("You must provide a `llama_guard_check_input` prompt template.")
raise InvalidRailsConfigurationError(
"Missing a `llama_guard_check_input` prompt template, which is required for the `llama guard check input` rail."
)

# Only content-safety and topic-safety include a $model reference in the rail flow text
# Need to match rails with flow_id (excluding $model reference) and match prompts
Expand All @@ -1462,20 +1478,28 @@ def check_prompt_exist_for_self_check_rails(cls, values):

# Output moderation prompt verification
if "self check output" in enabled_output_rails and "self_check_output" not in provided_task_prompts:
raise ValueError("You must provide a `self_check_output` prompt template.")
raise InvalidRailsConfigurationError(
"Missing a `self_check_output` prompt template, which is required for the `self check output` rail."
)
if (
"llama guard check output" in enabled_output_rails
and "llama_guard_check_output" not in provided_task_prompts
):
raise ValueError("You must provide a `llama_guard_check_output` prompt template.")
raise InvalidRailsConfigurationError(
"Missing a `llama_guard_check_output` prompt template, which is required for the `llama guard check output` rail."
)
if (
"patronus lynx check output hallucination" in enabled_output_rails
and "patronus_lynx_check_output_hallucination" not in provided_task_prompts
):
raise ValueError("You must provide a `patronus_lynx_check_output_hallucination` prompt template.")
raise InvalidRailsConfigurationError(
"Missing a `patronus_lynx_check_output_hallucination` prompt template, which is required for the `patronus lynx check output hallucination` rail."
)

if "self check facts" in enabled_output_rails and "self_check_facts" not in provided_task_prompts:
raise ValueError("You must provide a `self_check_facts` prompt template.")
raise InvalidRailsConfigurationError(
"Missing a `self_check_facts` prompt template, which is required for the `self check facts` rail."
)

# Only content-safety and topic-safety include a $model reference in the rail flow text
# Need to match rails with flow_id (excluding $model reference) and match prompts
Expand Down Expand Up @@ -1528,7 +1552,7 @@ def validate_models_api_key_env_var(cls, models):
api_keys = [m.api_key_env_var for m in models]
for api_key in api_keys:
if api_key and not os.environ.get(api_key):
raise ValueError(f"Model API Key environment variable '{api_key}' not set.")
raise InvalidRailsConfigurationError(f"Model API Key environment variable '{api_key}' not set.")
return models

raw_llm_call_action: Optional[str] = Field(
Expand Down Expand Up @@ -1801,4 +1825,6 @@ def _validate_rail_prompts(rails: list[str], prompts: list[Any], validation_rail
prompt_flow_id = flow_id.replace(" ", "_")
expected_prompt = f"{prompt_flow_id} $model={flow_model}"
if expected_prompt not in prompts:
raise ValueError(f"You must provide a `{expected_prompt}` prompt template.")
raise InvalidRailsConfigurationError(
f"Missing a `{expected_prompt}` prompt template, which is required for the `{validation_rail}` rail."
)
Loading