diff --git a/README.md b/README.md index b407689..b23c2dc 100644 --- a/README.md +++ b/README.md @@ -209,6 +209,9 @@ openspace # Execute task openspace --model "anthropic/claude-sonnet-4-5" --query "Create a monitoring dashboard for my Docker containers" + +# Use MiniMax (high-performance, 204K context) +openspace --model "minimax/MiniMax-M2.7" --query "Build a REST API with FastAPI" ``` Add your own custom skills: [`openspace/skills/README.md`](openspace/skills/README.md). diff --git a/openspace/.env.example b/openspace/.env.example index d032801..4a81851 100644 --- a/openspace/.env.example +++ b/openspace/.env.example @@ -17,6 +17,9 @@ # OpenRouter (for openrouter/* models, e.g. openrouter/anthropic/claude-sonnet-4.5) OPENROUTER_API_KEY= +# MiniMax (for minimax/* models, e.g. minimax/MiniMax-M2.7) +# MINIMAX_API_KEY= + # ── OpenSpace Cloud (optional) ────────────────────────────── # Register at https://open-space.cloud to get your key. # Enables cloud skill search & upload; local features work without it. diff --git a/openspace/config/README.md b/openspace/config/README.md index 74327ff..ca774d3 100644 --- a/openspace/config/README.md +++ b/openspace/config/README.md @@ -113,3 +113,41 @@ Layered system — later files override earlier ones: | `sandbox_enabled` | Enable sandboxing for all operations | `false` | | Per-backend overrides | Shell, MCP, GUI, Web each have independent security policies | Inherit global | +## 6. Supported LLM Providers + +OpenSpace uses [LiteLLM](https://docs.litellm.ai/docs/providers) for model routing. Set your model via `--model` flag, `OPENSPACE_MODEL` env var, or host agent config. + +| Provider | Model format | API Key env var | +|----------|-------------|-----------------| +| OpenRouter | `openrouter/anthropic/claude-sonnet-4.5` | `OPENROUTER_API_KEY` | +| Anthropic | `anthropic/claude-sonnet-4-5` | `ANTHROPIC_API_KEY` | +| OpenAI | `openai/gpt-4o` | `OPENAI_API_KEY` | +| DeepSeek | `deepseek/deepseek-chat` | `DEEPSEEK_API_KEY` | +| MiniMax | `minimax/MiniMax-M2.7` | `MINIMAX_API_KEY` | + +### MiniMax + +[MiniMax](https://platform.minimax.io) offers high-performance LLMs with 204K context at competitive pricing. + +**Available models:** + +| Model | Context | Description | +|-------|---------|-------------| +| `MiniMax-M2.7` | 204K | Peak performance, ultimate value | +| `MiniMax-M2.7-highspeed` | 204K | Same performance, faster and more agile | + +**Quick setup:** + +```bash +# Set your API key +export MINIMAX_API_KEY=your-key-here + +# Run with MiniMax +openspace --model "minimax/MiniMax-M2.7" --query "your task" +``` + +**API docs:** [OpenAI-compatible API](https://platform.minimax.io/docs/api-reference/text-openai-api) + +> [!NOTE] +> MiniMax temperature is automatically clamped to `(0.0, 1.0]` by OpenSpace. The `response_format` parameter is not supported and is automatically removed. + diff --git a/openspace/host_detection/nanobot.py b/openspace/host_detection/nanobot.py index c06c743..77e753f 100644 --- a/openspace/host_detection/nanobot.py +++ b/openspace/host_detection/nanobot.py @@ -31,7 +31,7 @@ ("zhipu", ("zhipu", "glm", "zai"), ""), ("dashscope", ("qwen", "dashscope"), ""), ("moonshot", ("moonshot", "kimi"), "https://api.moonshot.ai/v1"), - ("minimax", ("minimax",), "https://api.minimax.io/v1"), + ("minimax", ("minimax",), "https://api.minimax.io/v1"), # MiniMax-M2.7, MiniMax-M2.7-highspeed ("groq", ("groq",), ""), ] diff --git a/openspace/host_detection/resolver.py b/openspace/host_detection/resolver.py index 3fb3611..3426c65 100644 --- a/openspace/host_detection/resolver.py +++ b/openspace/host_detection/resolver.py @@ -102,6 +102,18 @@ def build_llm_kwargs(model: str) -> tuple[str, Dict[str, Any]]: if not resolved_model: resolved_model = "openrouter/anthropic/claude-sonnet-4.5" + # --- Tier 3: Provider-native env vars (MiniMax auto-detection) --- + # If the model targets MiniMax but no api_key was set in Tier 1/2, + # auto-detect MINIMAX_API_KEY from the environment. + if "api_key" not in kwargs and resolved_model and "minimax" in resolved_model.lower(): + minimax_key = os.environ.get("MINIMAX_API_KEY") + if minimax_key: + kwargs["api_key"] = minimax_key + if "api_base" not in kwargs: + kwargs["api_base"] = "https://api.minimax.io/v1" + source = "MINIMAX_API_KEY env" + logger.info("Auto-detected MINIMAX_API_KEY for MiniMax model") + if kwargs: safe = { k: (v[:8] + "..." if k == "api_key" and isinstance(v, str) and len(v) > 8 else v) diff --git a/openspace/llm/client.py b/openspace/llm/client.py index 19a1664..54c255d 100644 --- a/openspace/llm/client.py +++ b/openspace/llm/client.py @@ -25,6 +25,41 @@ logger = Logger.get_logger(__name__) +def _is_minimax_model(model: str) -> bool: + """Check if the model string refers to a MiniMax model.""" + return "minimax" in model.lower() + + +def _apply_minimax_constraints(completion_kwargs: Dict) -> Dict: + """Apply MiniMax-specific parameter constraints. + + MiniMax API constraints: + - temperature must be in (0.0, 1.0] — cannot be 0 + - response_format is not supported — must be removed + """ + # Clamp temperature + temp = completion_kwargs.get("temperature") + if temp is not None: + original = temp + if temp <= 0: + temp = 0.01 + elif temp > 1.0: + temp = 1.0 + completion_kwargs["temperature"] = temp + if temp != original: + logger.debug( + "MiniMax: clamped temperature %.4f -> %.4f (must be in (0, 1])", + original, temp, + ) + + # Remove unsupported response_format + if "response_format" in completion_kwargs: + completion_kwargs.pop("response_format") + logger.debug("MiniMax: removed unsupported response_format parameter") + + return completion_kwargs + + def _sanitize_schema(params: Dict) -> Dict: """Sanitize tool parameter schema to comply with Claude API requirements. @@ -421,14 +456,19 @@ async def _rate_limit(self): async def _call_with_retry(self, **completion_kwargs): """Call LLM with backoff retry on rate limit errors - + Timeout and retry strategy: - Single call timeout: self.timeout (default 120s) - Rate limit retry delays: 60s, 90s, 120s - Total max time: timeout * max_retries + sum(retry_delays) """ + # Apply MiniMax-specific parameter constraints before calling + model = completion_kwargs.get("model", self.model) + if _is_minimax_model(model): + completion_kwargs = _apply_minimax_constraints(completion_kwargs) + last_exception = None - + for attempt in range(self.max_retries): try: # Add timeout to the completion call diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_minimax_integration.py b/tests/test_minimax_integration.py new file mode 100644 index 0000000..264da41 --- /dev/null +++ b/tests/test_minimax_integration.py @@ -0,0 +1,72 @@ +"""Integration tests for MiniMax provider support. + +These tests require MINIMAX_API_KEY to be set in the environment. +They are automatically skipped when the key is not available. +""" + +import asyncio +import os +import unittest + +MINIMAX_API_KEY = os.environ.get("MINIMAX_API_KEY") +SKIP_REASON = "MINIMAX_API_KEY not set" + + +@unittest.skipUnless(MINIMAX_API_KEY, SKIP_REASON) +class TestMiniMaxChatIntegration(unittest.TestCase): + """Integration tests for MiniMax chat completions via litellm.""" + + def test_basic_chat_completion(self): + """MiniMax M2.7 should return a valid chat completion.""" + import litellm + + response = litellm.completion( + model="minimax/MiniMax-M2.7", + messages=[{"role": "user", "content": "What is 2+2? Reply with just the number."}], + max_tokens=50, + temperature=0.5, + api_key=MINIMAX_API_KEY, + ) + self.assertTrue(response.choices) + content = response.choices[0].message.content + self.assertIsNotNone(content) + + def test_highspeed_model(self): + """MiniMax M2.7-highspeed should also work.""" + import litellm + + response = litellm.completion( + model="minimax/MiniMax-M2.7-highspeed", + messages=[{"role": "user", "content": "Reply with only the word 'ok'."}], + max_tokens=10, + temperature=0.5, + api_key=MINIMAX_API_KEY, + ) + self.assertTrue(response.choices) + self.assertTrue(response.choices[0].message.content) + + def test_llm_client_with_minimax(self): + """LLMClient should work with MiniMax models (temperature clamping applied).""" + from openspace.llm.client import LLMClient + + client = LLMClient( + model="minimax/MiniMax-M2.7-highspeed", + timeout=30.0, + max_retries=1, + api_key=MINIMAX_API_KEY, + ) + + result = asyncio.get_event_loop().run_until_complete( + client.complete( + messages=[{"role": "user", "content": "Reply with 'integration test passed'."}], + temperature=0.5, + max_tokens=30, + ) + ) + self.assertIn("message", result) + content = result["message"].get("content", "") + self.assertTrue(content) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_minimax_provider.py b/tests/test_minimax_provider.py new file mode 100644 index 0000000..12cdc51 --- /dev/null +++ b/tests/test_minimax_provider.py @@ -0,0 +1,200 @@ +"""Unit tests for MiniMax provider support in OpenSpace.""" + +import os +import unittest +from unittest.mock import patch, AsyncMock, MagicMock + +from openspace.llm.client import ( + _is_minimax_model, + _apply_minimax_constraints, +) +from openspace.host_detection.nanobot import match_provider, PROVIDER_REGISTRY +from openspace.host_detection.resolver import build_llm_kwargs + + +class TestIsMiniMaxModel(unittest.TestCase): + """Tests for MiniMax model detection.""" + + def test_minimax_prefixed_model(self): + self.assertTrue(_is_minimax_model("minimax/MiniMax-M2.7")) + + def test_minimax_highspeed_model(self): + self.assertTrue(_is_minimax_model("minimax/MiniMax-M2.7-highspeed")) + + def test_minimax_case_insensitive(self): + self.assertTrue(_is_minimax_model("MiniMax/MiniMax-M2.7")) + + def test_minimax_in_openai_compat(self): + self.assertTrue(_is_minimax_model("openai/MiniMax-M2.7")) + + def test_non_minimax_openai_model(self): + self.assertFalse(_is_minimax_model("openai/gpt-4o")) + + def test_non_minimax_anthropic_model(self): + self.assertFalse(_is_minimax_model("anthropic/claude-sonnet-4-5")) + + def test_non_minimax_openrouter_model(self): + self.assertFalse(_is_minimax_model("openrouter/anthropic/claude-sonnet-4.5")) + + def test_empty_string(self): + self.assertFalse(_is_minimax_model("")) + + +class TestApplyMiniMaxConstraints(unittest.TestCase): + """Tests for MiniMax parameter constraints.""" + + def test_clamp_temperature_zero(self): + """Temperature 0 should be clamped to 0.01.""" + kwargs = {"model": "minimax/MiniMax-M2.7", "temperature": 0} + result = _apply_minimax_constraints(kwargs) + self.assertEqual(result["temperature"], 0.01) + + def test_clamp_temperature_negative(self): + """Negative temperature should be clamped to 0.01.""" + kwargs = {"model": "minimax/MiniMax-M2.7", "temperature": -0.5} + result = _apply_minimax_constraints(kwargs) + self.assertEqual(result["temperature"], 0.01) + + def test_clamp_temperature_above_one(self): + """Temperature > 1.0 should be clamped to 1.0.""" + kwargs = {"model": "minimax/MiniMax-M2.7", "temperature": 1.5} + result = _apply_minimax_constraints(kwargs) + self.assertEqual(result["temperature"], 1.0) + + def test_valid_temperature_unchanged(self): + """Valid temperature (0 < t <= 1.0) should remain unchanged.""" + kwargs = {"model": "minimax/MiniMax-M2.7", "temperature": 0.7} + result = _apply_minimax_constraints(kwargs) + self.assertEqual(result["temperature"], 0.7) + + def test_temperature_one_unchanged(self): + """Temperature 1.0 is valid and should remain unchanged.""" + kwargs = {"model": "minimax/MiniMax-M2.7", "temperature": 1.0} + result = _apply_minimax_constraints(kwargs) + self.assertEqual(result["temperature"], 1.0) + + def test_no_temperature_no_change(self): + """When temperature is not set, no clamping should occur.""" + kwargs = {"model": "minimax/MiniMax-M2.7"} + result = _apply_minimax_constraints(kwargs) + self.assertNotIn("temperature", result) + + def test_remove_response_format(self): + """response_format should be removed for MiniMax models.""" + kwargs = { + "model": "minimax/MiniMax-M2.7", + "response_format": {"type": "json_object"}, + } + result = _apply_minimax_constraints(kwargs) + self.assertNotIn("response_format", result) + + def test_other_params_preserved(self): + """Non-constrained parameters should be preserved.""" + kwargs = { + "model": "minimax/MiniMax-M2.7", + "temperature": 0.5, + "max_tokens": 1024, + "messages": [{"role": "user", "content": "Hello"}], + } + result = _apply_minimax_constraints(kwargs) + self.assertEqual(result["max_tokens"], 1024) + self.assertEqual(result["messages"], [{"role": "user", "content": "Hello"}]) + self.assertEqual(result["temperature"], 0.5) + + +class TestProviderRegistry(unittest.TestCase): + """Tests for MiniMax in the nanobot provider registry.""" + + def test_minimax_in_registry(self): + """MiniMax should be in the provider registry.""" + names = [entry[0] for entry in PROVIDER_REGISTRY] + self.assertIn("minimax", names) + + def test_minimax_base_url(self): + """MiniMax should have the correct base URL.""" + for name, _keywords, base_url in PROVIDER_REGISTRY: + if name == "minimax": + self.assertEqual(base_url, "https://api.minimax.io/v1") + break + + def test_minimax_keyword_match(self): + """MiniMax keyword should match 'minimax'.""" + for name, keywords, _base_url in PROVIDER_REGISTRY: + if name == "minimax": + self.assertIn("minimax", keywords) + break + + def test_match_provider_minimax_model(self): + """match_provider should find minimax config for minimax models.""" + providers = { + "minimax": {"apiKey": "test-key-123"}, + } + result = match_provider(providers, "minimax/MiniMax-M2.7") + self.assertIsNotNone(result) + self.assertEqual(result["api_key"], "test-key-123") + self.assertEqual(result["api_base"], "https://api.minimax.io/v1") + + def test_match_provider_minimax_keyword(self): + """match_provider should detect minimax in model name via keyword.""" + providers = { + "minimax": {"apiKey": "test-key-456"}, + } + result = match_provider(providers, "MiniMax-M2.7") + self.assertIsNotNone(result) + self.assertEqual(result["api_key"], "test-key-456") + + def test_match_provider_minimax_forced(self): + """match_provider should use minimax when forced_provider='minimax'.""" + providers = { + "minimax": {"apiKey": "test-key-789"}, + "openai": {"apiKey": "other-key"}, + } + result = match_provider(providers, "some-model", forced_provider="minimax") + self.assertIsNotNone(result) + self.assertEqual(result["api_key"], "test-key-789") + + +class TestResolverMiniMaxDetection(unittest.TestCase): + """Tests for MINIMAX_API_KEY auto-detection in the resolver.""" + + @patch.dict(os.environ, {"MINIMAX_API_KEY": "minimax-test-key"}, clear=False) + @patch("openspace.host_detection.nanobot.try_read_nanobot_config", return_value=None) + def test_auto_detect_minimax_api_key(self, _mock_nanobot): + """MINIMAX_API_KEY should be auto-detected for minimax models.""" + model, kwargs = build_llm_kwargs("minimax/MiniMax-M2.7") + self.assertEqual(model, "minimax/MiniMax-M2.7") + self.assertEqual(kwargs.get("api_key"), "minimax-test-key") + self.assertEqual(kwargs.get("api_base"), "https://api.minimax.io/v1") + + @patch.dict(os.environ, {}, clear=False) + @patch("openspace.host_detection.nanobot.try_read_nanobot_config", return_value=None) + def test_no_minimax_key_no_kwargs(self, _mock_nanobot): + """Without MINIMAX_API_KEY, no api_key should be set.""" + # Remove MINIMAX_API_KEY if it exists + os.environ.pop("MINIMAX_API_KEY", None) + model, kwargs = build_llm_kwargs("minimax/MiniMax-M2.7") + self.assertEqual(model, "minimax/MiniMax-M2.7") + self.assertNotIn("api_key", kwargs) + + @patch.dict(os.environ, {"MINIMAX_API_KEY": "minimax-key"}, clear=False) + @patch("openspace.host_detection.nanobot.try_read_nanobot_config", return_value=None) + def test_minimax_not_triggered_for_openai(self, _mock_nanobot): + """MINIMAX_API_KEY should not be used for non-minimax models.""" + model, kwargs = build_llm_kwargs("openai/gpt-4o") + self.assertEqual(model, "openai/gpt-4o") + self.assertNotIn("api_key", kwargs) + + @patch.dict( + os.environ, + {"OPENSPACE_LLM_API_KEY": "explicit-key", "MINIMAX_API_KEY": "minimax-key"}, + clear=False, + ) + @patch("openspace.host_detection.nanobot.try_read_nanobot_config", return_value=None) + def test_explicit_key_overrides_minimax(self, _mock_nanobot): + """OPENSPACE_LLM_API_KEY (Tier 1) should override MINIMAX_API_KEY (Tier 3).""" + model, kwargs = build_llm_kwargs("minimax/MiniMax-M2.7") + self.assertEqual(kwargs["api_key"], "explicit-key") + + +if __name__ == "__main__": + unittest.main()