Skip to content

Commit cd0040c

Browse files
committed
Seperate the resource partitioning from adjacency partitioning
1 parent 9ee7e67 commit cd0040c

File tree

3 files changed

+526
-1
lines changed

3 files changed

+526
-1
lines changed

py/torch_tensorrt/dynamo/_compiler.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -860,6 +860,20 @@ def preserve_module_specs(
860860
require_full_compilation=settings.require_full_compilation,
861861
)
862862

863+
from torch_tensorrt.dynamo.partitioning._resource_partitioner import (
864+
resource_partition as resource_partitioner_partition,
865+
)
866+
867+
partitioned_module, _ = resource_partitioner_partition(
868+
gm,
869+
partitioned_module,
870+
min_block_size=settings.min_block_size,
871+
torch_executed_ops=settings.torch_executed_ops,
872+
require_full_compilation=settings.require_full_compilation,
873+
skip_fusion=(num_supported_ops == total_ops),
874+
cpu_memory_budget=settings.cpu_memory_budget,
875+
)
876+
863877
dryrun_tracker.unsupported_ops = supported_ops.unsupported_operators
864878

865879
# The global partitioner leaves non-TRT nodes as-is

py/torch_tensorrt/dynamo/partitioning/_adjacency_partitioner.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -234,7 +234,7 @@ def partition_graph(self) -> torch.fx.GraphModule:
234234

235235
subgraphs = self.break_subgraphs(
236236
subgraphs,
237-
subgraph_size_budget=500 * 1024 * 1024, # self.calculate_size_budget()
237+
subgraph_size_budget=1000 * 1024 * 1024, # self.calculate_size_budget()
238238
)
239239

240240
# Set the number of TRT engines to be generated

0 commit comments

Comments
 (0)