From 194cc58571012c18ae0b10cd121a95d6ca048dee Mon Sep 17 00:00:00 2001 From: Masahiro Hiramori Date: Sun, 8 Feb 2026 11:19:04 +0900 Subject: [PATCH 1/3] use dlpack to convert torch tensor to tvm tensor --- python/tvm/relax/frontend/torch/dynamo.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/python/tvm/relax/frontend/torch/dynamo.py b/python/tvm/relax/frontend/torch/dynamo.py index dea08256de71..3b8734280092 100644 --- a/python/tvm/relax/frontend/torch/dynamo.py +++ b/python/tvm/relax/frontend/torch/dynamo.py @@ -67,7 +67,7 @@ def to_torch_tensor(nd_tensor): def to_tvm_tensor(torch_tensor): """A helper function to transfer a torch.tensor to Tensor.""" if not isinstance(torch_tensor, torch._subclasses.fake_tensor.FakeTensor): - return tvm.runtime.tensor(torch_tensor.numpy()) + return tvm.runtime.from_dlpack(torch.utils.dlpack.to_dlpack(torch_tensor)) # Fake Tensor real_tensor = torch.randn(torch_tensor.shape, dtype=torch_tensor.dtype) return tvm.runtime.tensor(real_tensor.numpy()) @@ -129,6 +129,10 @@ def to_tvm_tensor(torch_tensor): mod = mod.with_attr("target", target) mod = seq(mod) + if device.type == "cuda": + with target: + mod = tvm.tir.transform.DefaultGPUSchedule()(mod) + ex = relax_build(mod, target=target) vm = tvm.relax.VirtualMachine(ex.mod, device=dev) From 89ca79bc9ae4336f89467a044088f2817a1090e7 Mon Sep 17 00:00:00 2001 From: Masahiro Hiramori Date: Sun, 8 Feb 2026 11:23:59 +0900 Subject: [PATCH 2/3] simplify torch tensor to tvm tensor conversion --- python/tvm/relax/frontend/torch/dynamo.py | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/python/tvm/relax/frontend/torch/dynamo.py b/python/tvm/relax/frontend/torch/dynamo.py index 3b8734280092..150649ea96a7 100644 --- a/python/tvm/relax/frontend/torch/dynamo.py +++ b/python/tvm/relax/frontend/torch/dynamo.py @@ -64,14 +64,6 @@ def to_torch_tensor(nd_tensor): else: raise ValueError(f"Unsupported type {type(nd_tensor)}") - def to_tvm_tensor(torch_tensor): - """A helper function to transfer a torch.tensor to Tensor.""" - if not isinstance(torch_tensor, torch._subclasses.fake_tensor.FakeTensor): - return tvm.runtime.from_dlpack(torch.utils.dlpack.to_dlpack(torch_tensor)) - # Fake Tensor - real_tensor = torch.randn(torch_tensor.shape, dtype=torch_tensor.dtype) - return tvm.runtime.tensor(real_tensor.numpy()) - graph_module.graph.eliminate_dead_code() device = device_from_inputs(example_inputs) @@ -143,7 +135,10 @@ def exec_tvm(*i_args): for arg in args: if arg.requires_grad: arg = arg.detach() - vm_args.append(to_tvm_tensor(arg)) + if isinstance(arg, torch._subclasses.fake_tensor.FakeTensor): + # Materialize a real (eager) Tensor + arg = torch.randn(arg.shape, dtype=arg.dtype, device=device) + vm_args.append(arg) outputs = vm["main"](*vm_args) return to_torch_tensor(outputs) From 7fadb76cb12a2bcf58b3575b3903744754a01624 Mon Sep 17 00:00:00 2001 From: Masahiro Hiramori Date: Sun, 8 Feb 2026 11:23:59 +0900 Subject: [PATCH 3/3] remove --- python/tvm/relax/frontend/torch/dynamo.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/python/tvm/relax/frontend/torch/dynamo.py b/python/tvm/relax/frontend/torch/dynamo.py index 150649ea96a7..21388dbef7a0 100644 --- a/python/tvm/relax/frontend/torch/dynamo.py +++ b/python/tvm/relax/frontend/torch/dynamo.py @@ -121,10 +121,6 @@ def to_torch_tensor(nd_tensor): mod = mod.with_attr("target", target) mod = seq(mod) - if device.type == "cuda": - with target: - mod = tvm.tir.transform.DefaultGPUSchedule()(mod) - ex = relax_build(mod, target=target) vm = tvm.relax.VirtualMachine(ex.mod, device=dev)