Skip to content

Commit a6dd082

Browse files
committed
Add epilogue subtiling
stack-info: PR: #948, branch: PaulZhang12/stack/14
1 parent 9660804 commit a6dd082

File tree

7 files changed

+163
-20
lines changed

7 files changed

+163
-20
lines changed

helion/_compiler/compile_environment.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,9 @@ def __init__(self, device: torch.device, settings: Settings) -> None:
9999
self.device_load_count = (
100100
0 # Track number of loads in all device code for eviction policy tuning
101101
)
102+
self.device_store_count = (
103+
0 # Track number of stores for subtiling
104+
)
102105

103106
def add_kernel_tensor_size(self, sizes: Sequence[int | torch.SymInt]) -> None:
104107
from .device_function import contains_only_block_size_symbols

helion/_compiler/device_function.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -250,6 +250,7 @@ def __init__(self, name: str, config: Config, codegen: GenerateAST) -> None:
250250
self.rng_seed_count = 0
251251
self.device_load_index = 0 # Track which load in device code we're generating (for eviction policy tuning)
252252
# Name of the RNG seed buffer parameter in kernel signature
253+
self.device_store_index = 0 # Track which store in device code we're generating (for subtiling)
253254
self.rng_seed_buffer_param_name = None
254255

255256
def has_rng_ops(self) -> bool:
@@ -420,9 +421,14 @@ def tensor_arg(
420421
def tensor_descriptor_arg(
421422
self, fake_value: torch.Tensor, block_size: list[int | torch.SymInt]
422423
) -> TensorDescriptorArg:
424+
import re
423425
host_function = HostFunction.current()
424426
block_size_expr = ", ".join(map(self.literal_expr, block_size))
427+
pattern = r'triton_helpers\.div_floor_integer\(([^,]+),\s*(\d+)\)'
428+
replacement = r'\1 // \2'
429+
block_size_expr = re.sub(pattern, replacement, block_size_expr)
425430
key = (fake_value, block_size_expr)
431+
426432
if key not in self._tensor_descriptor_args:
427433
origin = host_function.tensor_to_origin[fake_value]
428434
desc_name = self.new_var(origin.suggest_var_name() + "_desc")

helion/_compiler/device_ir.py

Lines changed: 33 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1076,7 +1076,7 @@ def visit_For(self, node: ast.For) -> None:
10761076
self.generic_visit(node)
10771077

10781078

1079-
def _count_device_loads(device_ir: DeviceIR) -> int:
1079+
def _count_device_loads_and_stores(device_ir: DeviceIR) -> int:
10801080
"""Count the number of load operations in all device code for eviction policy tuning."""
10811081
from ..language import memory_ops
10821082

@@ -1087,26 +1087,29 @@ def _count_device_loads(device_ir: DeviceIR) -> int:
10871087
if info.new_graph_id is not None
10881088
}
10891089

1090-
load_count = 0
1090+
load_count, store_count = 0, 0
10911091
# Walk all graphs except rolled duplicates
10921092
for graph_info in device_ir.graphs:
10931093
if graph_info.graph_id in rolled_graph_ids:
10941094
continue
10951095

10961096
for node in graph_info.graph.nodes:
10971097
# Check if this is a load operation
1098-
if node.op == "call_function" and node.target is memory_ops.load:
1099-
# Only count loads without explicit eviction policy
1100-
# (user can still specify eviction_policy to override tuning)
1101-
# Check kwargs first, then check if 4th arg (eviction_policy) is None
1102-
eviction_policy_arg = node.kwargs.get("eviction_policy")
1103-
if eviction_policy_arg is None:
1104-
# Check if eviction_policy was passed as positional arg (index 3)
1105-
if len(node.args) >= 4:
1106-
eviction_policy_arg = node.args[3]
1098+
if node.op == "call_function":
1099+
if node.target is memory_ops.load:
1100+
# Only count loads without explicit eviction policy
1101+
# (user can still specify eviction_policy to override tuning)
1102+
# Check kwargs first, then check if 4th arg (eviction_policy) is None
1103+
eviction_policy_arg = node.kwargs.get("eviction_policy")
11071104
if eviction_policy_arg is None:
1108-
load_count += 1
1109-
return load_count
1105+
# Check if eviction_policy was passed as positional arg (index 3)
1106+
if len(node.args) >= 4:
1107+
eviction_policy_arg = node.args[3]
1108+
if eviction_policy_arg is None:
1109+
load_count += 1
1110+
elif node.target is memory_ops.store:
1111+
store_count += 1
1112+
return load_count, store_count
11101113

11111114

11121115
def _register_eviction_policy_tunable(load_count: int) -> None:
@@ -1124,6 +1127,21 @@ def _register_eviction_policy_tunable(load_count: int) -> None:
11241127
env.config_spec.load_eviction_policies = fragment
11251128
env.device_load_count = load_count
11261129

1130+
def _register_epilogue_subtile_tunable(store_count: int) -> None:
1131+
"""Register the epilogue subtile tunable for all device stores."""
1132+
if store_count == 0:
1133+
return
1134+
1135+
from ..autotuner.config_fragment import EnumFragment
1136+
from ..autotuner.config_fragment import ListOf
1137+
from ..autotuner.config_spec import VALID_EPILOGUE_SUBTILE_SIZES
1138+
1139+
env = CompileEnvironment.current()
1140+
# Register a tunable for epilogue subtile for all device stores
1141+
fragment = ListOf(EnumFragment(choices=VALID_EPILOGUE_SUBTILE_SIZES), length=store_count)
1142+
env.config_spec.epilogue_subtiling = fragment
1143+
env.device_store_count = store_count
1144+
11271145

11281146
def lower_to_device_ir(func: HostFunction) -> DeviceIR:
11291147
device_ir = DeviceIR()
@@ -1148,8 +1166,9 @@ def lower_to_device_ir(func: HostFunction) -> DeviceIR:
11481166
CompileEnvironment.current().config_spec.disallow_pid_type("xyz")
11491167

11501168
# Count all device loads and register eviction policy tunable
1151-
load_count = _count_device_loads(device_ir)
1169+
load_count, store_count = _count_device_loads_and_stores(device_ir)
11521170
_register_eviction_policy_tunable(load_count)
1171+
_register_epilogue_subtile_tunable(store_count)
11531172

11541173
return device_ir
11551174

helion/_compiler/indexing_strategy.py

Lines changed: 99 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
from .. import exc
1616
from .._compat import get_tensor_descriptor_fn_name
1717
from .ast_extension import expr_from_string
18+
from .ast_extension import statement_from_string
1819
from .compile_environment import CompileEnvironment
1920
from .device_function import DeviceFunction
2021
from .host_function import HostFunction
@@ -353,7 +354,6 @@ def codegen_load(
353354
)
354355
assert extra_mask is None
355356
indexing = BlockedSubscriptIndexing.create(state, fake_tensor, subscript)
356-
357357
# Load from tensor descriptor with permuted offsets
358358
load_expr = expr_from_string(
359359
f"{indexing.tensor_descriptor(state)}.load({indexing.offsets_str_permuted(state)})"
@@ -383,23 +383,119 @@ def codegen_store(
383383
)
384384
assert extra_mask is None
385385
indexing = BlockedSubscriptIndexing.create(state, fake_tensor, subscript)
386+
store_value = indexing.reshape_store(state, value)
387+
388+
config = DeviceFunction.current().config
389+
epilogue_subtiles = state.config.epilogue_subtiling
390+
if torch.cuda.get_device_capability() >= (9, 0) and (idx := state.device_function.device_store_index) < len(epilogue_subtiles):
391+
subtile_split = epilogue_subtiles[idx]
392+
state.device_function.device_store_index += 1
393+
394+
subtile_codegen = self._codegen_epilogue_subtile_store(state, fake_tensor, indexing, store_value, subtile_split, config)
395+
if subtile_codegen is not None:
396+
return subtile_codegen
386397

387398
# Apply permutation to the value being stored if needed
388399
desc_arg = indexing.tensor_descriptor_arg(state)
389-
store_value = indexing.reshape_store(state, value)
390400

391401
if desc_arg.permutation is not None:
392402
# Apply permutation to the value
393403
store_value = expr_from_string(
394404
f"tl.permute({{store_val}}, {desc_arg.permutation!r})",
395405
store_val=store_value,
396406
)
397-
407+
398408
return expr_from_string(
399409
f"{indexing.tensor_descriptor(state)}.store({indexing.offsets_str_permuted(state)}, {{value}})",
400410
value=store_value,
401411
)
402412

413+
def _codegen_epilogue_subtile_store(
414+
self,
415+
state: CodegenState,
416+
fake_tensor: torch.Tensor,
417+
indexing: BlockedSubscriptIndexing,
418+
store_value: ast.AST,
419+
subtile_split: int,
420+
config: Config,
421+
) -> ast.AST | None:
422+
# Currently support 2D tiles without permutations
423+
if len(indexing.block_shape) != 2 or len(indexing.offsets) != 2 or subtile_split == 0:
424+
return None
425+
426+
env = CompileEnvironment.current()
427+
block_m, block_n = indexing.block_shape
428+
try:
429+
block_n_hint = env.size_hint(block_n)
430+
block_idx = env.get_block_id(block_n)
431+
block_size = env.block_sizes[block_idx].from_config(config)
432+
except Exception:
433+
return None
434+
435+
if block_n_hint % 2 != 0 or block_size <= 16:
436+
return None
437+
438+
device_fn = state.device_function
439+
codegen = state.codegen
440+
441+
block_m_str = device_fn.literal_expr(block_m)
442+
block_n_str = device_fn.literal_expr(block_n)
443+
indexing.block_shape[1] //= subtile_split
444+
445+
desc_arg = indexing.tensor_descriptor_arg(state)
446+
447+
# TODO: Support more epilogue subtile configs besides 2
448+
block_n_half_str = f"({block_n_str} // {subtile_split})"
449+
450+
# Lift the store value into a temporary variable for reuse
451+
acc_var = codegen.lift(store_value, prefix="acc")
452+
453+
reshape_expr = expr_from_string(
454+
"tl.reshape({acc}, [{dim_m}, 2, {dim_half}]).permute(0, 2, 1)",
455+
acc=acc_var,
456+
dim_m=expr_from_string(block_m_str),
457+
dim_half=expr_from_string(block_n_half_str),
458+
)
459+
reshape_var = codegen.lift(reshape_expr, prefix="acc")
460+
461+
acc0_name = codegen.tmpvar(prefix="acc")
462+
acc1_name = codegen.tmpvar(prefix="acc")
463+
codegen.add_statement(
464+
statement_from_string(
465+
f"{acc0_name}, {acc1_name} = tl.split({{acc}})",
466+
acc=reshape_var,
467+
)
468+
)
469+
acc0 = expr_from_string(acc0_name)
470+
acc1 = expr_from_string(acc1_name)
471+
472+
desc_name = indexing.tensor_descriptor(state)
473+
offset0 = expr_from_string(indexing.offsets[0])
474+
offset1 = expr_from_string(indexing.offsets[1])
475+
476+
# First subtile store
477+
codegen.add_statement(
478+
statement_from_string(
479+
f"{desc_name}.store([{{off0}}, {{off1}}], {{value}})",
480+
off0=offset0,
481+
off1=offset1,
482+
value=acc0,
483+
)
484+
)
485+
486+
offset1_shifted = expr_from_string(
487+
"({offset} + {half})",
488+
offset=expr_from_string(indexing.offsets[1]),
489+
half=expr_from_string(block_n_half_str),
490+
)
491+
492+
# Emit second subtile store as the expression returned to the caller
493+
return expr_from_string(
494+
f"{desc_name}.store([{{off0}}, {{off1}}], {{value}})",
495+
off0=offset0,
496+
off1=offset1_shifted,
497+
value=acc1,
498+
)
403499

404500
class StackIndexingStrategy:
405501
"""

helion/_compiler/inductor_lowering.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -766,7 +766,10 @@ def normalize_args_kwargs(
766766
api_func: APIFunc,
767767
node: torch.fx.Node,
768768
) -> None:
769-
bound = api_func._signature.bind(*node.args, **node.kwargs)
769+
try:
770+
bound = api_func._signature.bind(*node.args, **node.kwargs)
771+
except:
772+
import pdb; pdb.set_trace()
770773
bound.apply_defaults()
771774
node.args = (*bound.arguments.values(),)
772775
node.kwargs = {}

helion/autotuner/config_spec.py

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,10 +52,12 @@
5252
"pid_type",
5353
"indexing",
5454
"load_eviction_policies",
55+
"epilogue_subtiling"
5556
]
5657
)
5758
VALID_PID_TYPES = ("flat", "xyz", "persistent_blocked", "persistent_interleaved")
5859
VALID_EVICTION_POLICIES = ("", "first", "last")
60+
VALID_EPILOGUE_SUBTILE_SIZES = (0, 2)
5961

6062

6163
@dataclasses.dataclass
@@ -105,11 +107,14 @@ class ConfigSpec:
105107
EnumFragment(choices=VALID_EVICTION_POLICIES), length=0
106108
)
107109
)
110+
epilogue_subtiling: ListOf = dataclasses.field(
111+
default_factory=lambda: ListOf(EnumFragment(choices=VALID_EPILOGUE_SUBTILE_SIZES), length=0)
112+
)
108113

109114
@staticmethod
110115
def _valid_indexing_types() -> tuple[IndexingLiteral, ...]:
111116
return (
112-
("pointer", "block_ptr", "tensor_descriptor")
117+
("pointer", "tensor_descriptor")
113118
if supports_tensor_descriptor()
114119
else ("pointer", "block_ptr")
115120
)
@@ -208,6 +213,7 @@ def normalize(self, config: helion.Config | dict[str, object]) -> None:
208213
"range_flattens",
209214
"static_ranges",
210215
"load_eviction_policies",
216+
"epilogue_subtiling",
211217
):
212218
if not config.get(name):
213219
config.pop(name, None)
@@ -217,6 +223,7 @@ def normalize(self, config: helion.Config | dict[str, object]) -> None:
217223
config.setdefault(
218224
"load_eviction_policies", self.load_eviction_policies.default()
219225
)
226+
config.setdefault("epilogue_subtiling", self.epilogue_subtiling.default())
220227
# TODO(jansel): include num_ctas and max_nreg
221228

222229
for name, values in (
@@ -230,7 +237,7 @@ def normalize(self, config: helion.Config | dict[str, object]) -> None:
230237
)
231238
else:
232239
config[name] = values[0]
233-
240+
234241
# Set default values for grid indices when pid_type is not persistent
235242
pid_type = config["pid_type"]
236243
if pid_type in ("flat", "xyz") and self.grid_block_ids:
@@ -289,6 +296,7 @@ def flat_config(self, fn: Callable[[ConfigSpecFragment], object]) -> helion.Conf
289296
"indexing": fn(EnumFragment(self._valid_indexing_types())),
290297
"pid_type": fn(EnumFragment(self.allowed_pid_types)),
291298
"load_eviction_policies": fn(self.load_eviction_policies),
299+
"epilogue_subtiling": fn(self.epilogue_subtiling),
292300
}
293301
# Add tunable parameters
294302
config.update(
@@ -307,9 +315,11 @@ def flat_config(self, fn: Callable[[ConfigSpecFragment], object]) -> helion.Conf
307315
"range_flattens",
308316
"static_ranges",
309317
"load_eviction_policies",
318+
"epilogue_subtiling",
310319
):
311320
if not config.get(name):
312321
config.pop(name, None)
322+
313323
self.normalize(config)
314324
return helion.Config(**config)
315325

helion/runtime/config.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@ def __init__(
3939
num_stages: int | None = None,
4040
pid_type: PidTypeLiteral | None = None,
4141
indexing: IndexingLiteral | None = None,
42+
epilogue_subtiling: list[int] | None = None,
4243
# For user-defined properties
4344
**kwargs: object,
4445
) -> None:
@@ -61,6 +62,7 @@ def __init__(
6162
num_stages: Number of stages for software pipelining.
6263
pid_type: Program ID type strategy ("flat", "xyz", "persistent_blocked", "persistent_interleaved").
6364
indexing: Indexing strategy ("pointer", "tensor_descriptor", "block_ptr").
65+
epilogue_subtiling: Whether to use subtiling for epilogue.
6466
**kwargs: Additional user-defined configuration parameters.
6567
"""
6668
self.config = {}
@@ -81,6 +83,7 @@ def __init__(
8183
"num_stages": num_stages,
8284
"indexing": indexing,
8385
"pid_type": pid_type,
86+
"epilogue_subtiling": epilogue_subtiling,
8487
}
8588
for key, value in core_props.items():
8689
if value is not None:
@@ -206,6 +209,9 @@ def load_eviction_policies(self) -> list[str]:
206209
def indexing(self) -> IndexingLiteral:
207210
return self.config.get("indexing", "pointer") # type: ignore[return-value]
208211

212+
@property
213+
def epilogue_subtiling(self) -> bool:
214+
return cast("list[int]", self.config.get("epilogue_subtiling", [])) # type: ignore[return-value]
209215

210216
def _to_hashable(x: object) -> object:
211217
if isinstance(x, list):

0 commit comments

Comments
 (0)