Skip to content

Commit 4d41dcc

Browse files
authored
Deprecate use_default_config and replace all its uses with autotune_effort (#924)
1 parent fe33e3b commit 4d41dcc

33 files changed

+189
-202
lines changed

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -275,8 +275,8 @@ implementations from a single Helion kernel.
275275
## Settings for Development and Debugging
276276

277277
When developing kernels with Helion, you might prefer skipping autotuning for faster iteration. To
278-
do this, set the environment variable `HELION_USE_DEFAULT_CONFIG=1` or use the decorator argument
279-
`@helion.kernel(use_default_config=True)`. **Warning:** The default configuration is slow and not intended for
278+
do this, set the environment variable `HELION_AUTOTUNE_EFFORT=none` or use the decorator argument
279+
`@helion.kernel(autotune_effort="none")`. **Warning:** The default configuration is slow and not intended for
280280
production or performance testing.
281281

282282
To view the generated Triton code, set the environment variable `HELION_PRINT_OUTPUT_CODE=1` or include

benchmarks/run.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -880,9 +880,9 @@ def helion_method(
880880
attr = getattr(mod, attr_name)
881881
if isinstance(attr, Kernel):
882882
attr.reset()
883-
# Force autotuning unless HELION_USE_DEFAULT_CONFIG=1 is set
883+
# Force autotuning unless HELION_AUTOTUNE_EFFORT=none is set
884884
# This ensures we run autotuning even if the kernel has pre-specified configs
885-
if os.environ.get("HELION_USE_DEFAULT_CONFIG", "0") != "1":
885+
if os.environ.get("HELION_AUTOTUNE_EFFORT", "") != "none":
886886
# Only force full autotuning if no configs are provided
887887
if not attr.configs:
888888
attr.settings.force_autotune = True

docs/api/config.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ The `Config` class represents kernel optimization parameters that control how He
2727
|--------|--------|----------|
2828
| **Purpose** | Control execution performance | Control compilation behavior |
2929
| **Autotuning** | ✅ Automatically optimized | ❌ Never autotuned |
30-
| **Examples** | `block_sizes`, `num_warps`, `indexing` | `print_output_code`, `use_default_config` |
30+
| **Examples** | `block_sizes`, `num_warps`, `indexing` | `print_output_code`, `autotune_effort` |
3131
| **When to use** | Performance optimization | Development, debugging, environment setup |
3232

3333

docs/api/kernel.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ c = vector_add(a, b) # Automatically compiles and executes
4747

4848
```python
4949
@helion.kernel(
50-
use_default_config=True, # Skip autotuning
50+
autotune_effort="none", # Skip autotuning
5151
print_output_code=True # Debug generated code
5252
)
5353
def my_kernel(x: torch.Tensor) -> torch.Tensor:
@@ -154,7 +154,7 @@ Settings control **how the kernel is compiled** and the development environment:
154154
```python
155155
@helion.kernel(
156156
# Settings parameters
157-
use_default_config=True, # Skip autotuning for development
157+
autotune_effort="none", # Skip autotuning for development
158158
autotune_effort="quick", # Smaller autotuning budget when search is enabled
159159
print_output_code=True, # Debug: show generated Triton code
160160
static_shapes=True, # Compilation optimization strategy

docs/api/settings.md

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ The `Settings` class controls compilation behavior and debugging options for Hel
2727
|--------|----------|--------|
2828
| **Purpose** | Control compilation behavior | Control execution performance |
2929
| **Autotuning** | ❌ Never autotuned | ✅ Automatically optimized |
30-
| **Examples** | `print_output_code`, `use_default_config` | `block_sizes`, `num_warps` |
30+
| **Examples** | `print_output_code`, `autotune_effort` | `block_sizes`, `num_warps` |
3131
| **When to use** | Development, debugging, environment setup | Performance optimization |
3232

3333
Settings can be configured via:
@@ -41,7 +41,7 @@ Settings can be configured via:
4141
### Using Environment Variables
4242

4343
```bash
44-
env HELION_PRINT_OUTPUT_CODE=1 HELION_USE_DEFAULT_CONFIG=1 my_kernel.py
44+
env HELION_PRINT_OUTPUT_CODE=1 HELION_AUTOTUNE_EFFORT=none my_kernel.py
4545
```
4646

4747
### Using Decorator Arguments
@@ -52,7 +52,7 @@ import helion
5252
import helion.language as hl
5353

5454
@helion.kernel(
55-
use_default_config=True, # Skip autotuning
55+
autotune_effort="none", # Skip autotuning
5656
print_output_code=True, # Debug output
5757
)
5858
def my_kernel(x: torch.Tensor) -> torch.Tensor:
@@ -104,10 +104,6 @@ with helion.set_default_settings(
104104
### Autotuning Settings
105105

106106
```{eval-rst}
107-
.. autoattribute:: Settings.use_default_config
108-
109-
Skip autotuning and use default configuration. Default is ``False``. Controlled by ``HELION_USE_DEFAULT_CONFIG=1``.
110-
111107
.. autoattribute:: Settings.force_autotune
112108
113109
Force autotuning even when explicit configs are provided. Default is ``False``. Controlled by ``HELION_FORCE_AUTOTUNE=1``.
@@ -165,7 +161,7 @@ with helion.set_default_settings(
165161
166162
Select the autotuning effort preset. Available values:
167163
168-
- ``"none"`` – skip autotuning and run the default configuration (equivalent to ``use_default_config=True``).
164+
- ``"none"`` – skip autotuning and run the default configuration.
169165
- ``"quick"`` – limited search for faster runs with decent performance.
170166
- ``"full"`` – exhaustive autotuning (current default behavior).
171167
@@ -234,12 +230,12 @@ Built-in values for ``HELION_AUTOTUNER`` include ``"PatternSearch"``, ``"Differe
234230
| Environment Variable | Maps To | Description |
235231
|----------------------|---------|-------------|
236232
| ``TRITON_F32_DEFAULT`` | ``dot_precision`` | Sets default floating-point precision for Triton dot products (``"tf32"``, ``"tf32x3"``, ``"ieee"``). |
237-
| ``HELION_USE_DEFAULT_CONFIG`` | ``use_default_config`` | Skip autotuning entirely and rely on the default (debug) configuration. |
238233
| ``HELION_FORCE_AUTOTUNE`` | ``force_autotune`` | Force the autotuner to run even when explicit configs are provided. |
239234
| ``HELION_AUTOTUNE_COMPILE_TIMEOUT`` | ``autotune_compile_timeout`` | Maximum seconds to wait for Triton compilation during autotuning. |
240235
| ``HELION_AUTOTUNE_RANDOM_SEED`` | ``autotune_random_seed`` | Seed used for randomized autotuning searches. |
241236
| ``HELION_AUTOTUNE_MAX_GENERATIONS`` | ``autotune_max_generations`` | Upper bound on generations for Pattern Search and Differential Evolution. |
242237
| ``HELION_AUTOTUNE_ACCURACY_CHECK`` | ``autotune_accuracy_check`` | Toggle baseline validation for candidate configs. |
238+
| ``HELION_AUTOTUNE_EFFORT`` | ``autotune_effort`` | Select autotuning preset (``"none"``, ``"quick"``, ``"full"``). |
243239
| ``HELION_REBENCHMARK_THRESHOLD`` | ``autotune_rebenchmark_threshold`` | Re-run configs whose performance is within a multiplier of the current best. |
244240
| ``HELION_AUTOTUNE_PROGRESS_BAR`` | ``autotune_progress_bar`` | Enable or disable the progress bar UI during autotuning. |
245241
| ``HELION_PRINT_OUTPUT_CODE`` | ``print_output_code`` | Print generated Triton code to stderr for inspection. |

docs/index.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,7 @@ Example combining both:
212212
```python
213213
@helion.kernel(
214214
# Settings: Control compilation behavior
215-
use_default_config=True, # Skip autotuning for development
215+
autotune_effort="none", # Skip autotuning for development
216216
print_output_code=True, # Debug: show generated code
217217
# Config: Control GPU execution (when not using default)
218218
# config=helion.Config(block_sizes=[64, 32], num_warps=8)
@@ -225,8 +225,8 @@ def debug_kernel(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
225225
## Settings for Development and Debugging
226226

227227
When developing kernels with Helion, you might prefer skipping autotuning for faster iteration. To
228-
do this, set the environment variable `HELION_USE_DEFAULT_CONFIG=1` or use the decorator argument
229-
`@helion.kernel(use_default_config=True)`. **Warning:** The default configuration is slow and not intended for
228+
do this, set the environment variable `HELION_AUTOTUNE_EFFORT=none` or use the decorator argument
229+
`@helion.kernel(autotune_effort="none")`. **Warning:** The default configuration is slow and not intended for
230230
production or performance testing.
231231

232232
To view the generated Triton code, set the environment variable `HELION_PRINT_OUTPUT_CODE=1` or include

docs/installation.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ import torch
133133
import helion
134134
import helion.language as hl
135135

136-
@helion.kernel(use_default_config=True)
136+
@helion.kernel(autotune_effort="none")
137137
def test_kernel(x: torch.Tensor) -> torch.Tensor:
138138
out = torch.empty_like(x)
139139
for tile in hl.tile(x.shape[0]):

examples/fp8_gemm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
# Override default config to work around Triton tl.dot requirement:
2323
# `AssertionError: Input shapes should have M >= 16, N >= 16 and K >= 32`
2424
config = None
25-
if os.environ.get("HELION_USE_DEFAULT_CONFIG") == "1":
25+
if os.environ.get("HELION_AUTOTUNE_EFFORT") == "none":
2626
config = helion.Config(block_sizes=[32, 32, 32])
2727

2828

examples/jagged_layer_norm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333

3434

3535
# %%
36-
@helion.kernel(use_default_config=True)
36+
@helion.kernel(autotune_effort="none")
3737
def jagged_layer_norm_kernel(
3838
x_values: torch.Tensor, # [total_L, M] - compressed values
3939
x_offsets: torch.Tensor, # [B+1] - sequence start offsets

helion/runtime/settings.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -222,14 +222,6 @@ def __init__(self, **settings: object) -> None:
222222
settings: Keyword arguments representing various settings.
223223
"""
224224

225-
# Translate use_default_config to autotune_effort='none' for backward compatibility
226-
if (
227-
settings.get("use_default_config")
228-
or os.environ.get("HELION_USE_DEFAULT_CONFIG") == "1"
229-
):
230-
settings.setdefault("autotune_effort", "none")
231-
settings.pop("use_default_config", None)
232-
233225
if defaults := getattr(_tls, "default_settings", None):
234226
settings = {**defaults.to_dict(), **settings}
235227

0 commit comments

Comments
 (0)