From 4729e135fac53fe706e47b019a164e5d46788bff Mon Sep 17 00:00:00 2001 From: dk949 <56653556+dk949@users.noreply.github.com> Date: Tue, 2 Sep 2025 17:07:56 +0100 Subject: [PATCH 01/10] Make `apply-target` extensible with new targets It should now be possible to inherit from `ApplyTargetConfig` and override the `_get_config` member function to return a new config. --- ftn/transforms/apply_target_config.py | 54 +++++++++++++++++++-------- 1 file changed, 39 insertions(+), 15 deletions(-) diff --git a/ftn/transforms/apply_target_config.py b/ftn/transforms/apply_target_config.py index c365f69..0b392b1 100644 --- a/ftn/transforms/apply_target_config.py +++ b/ftn/transforms/apply_target_config.py @@ -1,4 +1,5 @@ from dataclasses import dataclass +from abc import ABC, abstractmethod from xdsl.context import Context from xdsl.dialects import builtin, dlti @@ -7,16 +8,32 @@ from ftn.dialects import device -class TenstorrentConfiguration: - def get(): +class TargetConfiguration(ABC): + @abstractmethod + @classmethod + def get(cls) -> dlti.TargetDeviceSpecAttr: ... + + @abstractmethod + @classmethod + def _memory_subsystem(cls) -> dlti.MapAttr: ... + + @abstractmethod + @classmethod + def _compute_subsystem(cls) -> dlti.MapAttr: ... + + +class TenstorrentConfiguration(TargetConfiguration): + @classmethod + def get(cls): return dlti.TargetDeviceSpecAttr( { - "memory": TenstorrentConfiguration._memory_subsystem(), - "compute": TenstorrentConfiguration._compute_subsystem(), + "memory": cls._memory_subsystem(), + "compute": cls._compute_subsystem(), } ) - def _memory_subsystem(): + @classmethod + def _memory_subsystem(cls): config = { "DRAM": { "kind": device.MemoryKindAttr(device.MemoryKind.DDR), @@ -25,7 +42,8 @@ def _memory_subsystem(): } return dlti.MapAttr(config) - def _compute_subsystem(): + @classmethod + def _compute_subsystem(cls): config = { "architecture_type": device.ArchitectureKindAttr( device.ArchitectureKind.MANYCORE @@ -53,16 +71,18 @@ def _compute_subsystem(): return dlti.MapAttr(config) -class U280Configuration: - def get(): +class U280Configuration(TargetConfiguration): + @classmethod + def get(cls): return dlti.TargetDeviceSpecAttr( { - "memory": U280Configuration._memory_subsystem(), - "compute": U280Configuration._compute_subsystem(), + "memory": cls._memory_subsystem(), + "compute": cls._compute_subsystem(), } ) - def _memory_subsystem(): + @classmethod + def _memory_subsystem(cls): config = { "DRAM": { "kind": device.MemoryKindAttr(device.MemoryKind.DDR), @@ -76,7 +96,8 @@ def _memory_subsystem(): } return dlti.MapAttr(config) - def _compute_subsystem(): + @classmethod + def _compute_subsystem(cls): config = { "architecture_type": device.ArchitectureKindAttr( device.ArchitectureKind.FPGA @@ -153,14 +174,17 @@ def generate_system_config(self, accelerator_name, accelerator_config): } ) + def _get_config(self) -> dlti.TargetDeviceSpecAttr: + if config := SYSTEM_CONFIGURATIONS.get(self.target): + return config.get() + raise ValueError(f"No such target configuration {self.target}") + def apply(self, ctx: Context, op: builtin.ModuleOp) -> None: op.attributes["omp.target_triples"] = builtin.ArrayAttr( [builtin.StringAttr(self.target)] ) - assert self.target in SYSTEM_CONFIGURATIONS.keys() - - config = SYSTEM_CONFIGURATIONS[self.target].get() + config = self._get_config() op.attributes["dlti.target_system_spec"] = self.generate_system_config( self.target, config From 1338b6db64b676fde2cc1a736a9cd644ddd87e68 Mon Sep 17 00:00:00 2001 From: dk949 <56653556+dk949@users.noreply.github.com> Date: Tue, 2 Sep 2025 17:22:57 +0100 Subject: [PATCH 02/10] classmethod should be specified before abstractmethod --- ftn/transforms/apply_target_config.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ftn/transforms/apply_target_config.py b/ftn/transforms/apply_target_config.py index 0b392b1..abf23ed 100644 --- a/ftn/transforms/apply_target_config.py +++ b/ftn/transforms/apply_target_config.py @@ -9,16 +9,16 @@ class TargetConfiguration(ABC): - @abstractmethod @classmethod + @abstractmethod def get(cls) -> dlti.TargetDeviceSpecAttr: ... - @abstractmethod @classmethod + @abstractmethod def _memory_subsystem(cls) -> dlti.MapAttr: ... - @abstractmethod @classmethod + @abstractmethod def _compute_subsystem(cls) -> dlti.MapAttr: ... From cff59c194b356d14d44ca26ef1b05421d009fc52 Mon Sep 17 00:00:00 2001 From: dk949 <56653556+dk949@users.noreply.github.com> Date: Tue, 2 Sep 2025 17:48:54 +0100 Subject: [PATCH 03/10] added documentation for _get_cofnig --- ftn/transforms/apply_target_config.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ftn/transforms/apply_target_config.py b/ftn/transforms/apply_target_config.py index abf23ed..fdd2e9b 100644 --- a/ftn/transforms/apply_target_config.py +++ b/ftn/transforms/apply_target_config.py @@ -175,6 +175,11 @@ def generate_system_config(self, accelerator_name, accelerator_config): ) def _get_config(self) -> dlti.TargetDeviceSpecAttr: + """ + Get the device spec for the current `self.taregt` + + If overriding this function, make sure to *not* specify `name` field again + """ if config := SYSTEM_CONFIGURATIONS.get(self.target): return config.get() raise ValueError(f"No such target configuration {self.target}") From 5cc4561f04c1454a97391a6a51fa2fd4f50c09c7 Mon Sep 17 00:00:00 2001 From: dk949 <56653556+dk949@users.noreply.github.com> Date: Mon, 15 Sep 2025 18:27:43 +0100 Subject: [PATCH 04/10] WIP: conditionally wait on implicitly sent data --- ftn/transforms/lower_omp_target_data.py | 26 +++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/ftn/transforms/lower_omp_target_data.py b/ftn/transforms/lower_omp_target_data.py index 25ab1c8..1d82ac1 100644 --- a/ftn/transforms/lower_omp_target_data.py +++ b/ftn/transforms/lower_omp_target_data.py @@ -1,10 +1,11 @@ +from collections.abc import Sequence from dataclasses import dataclass from enum import Enum from xdsl.builder import Builder from xdsl.context import Context from xdsl.dialects import arith, builtin, memref, omp, scf -from xdsl.ir import BlockArgument, Block, Region +from xdsl.ir import Block, BlockArgument, Operation, Region, Region Block, SSAValue from xdsl.passes import ModulePass from xdsl.pattern_rewriter import ( GreedyRewritePatternApplier, @@ -25,6 +26,8 @@ class DataEnvironmentDirection(Enum): BOTH = 3 +_DO_NOT_WAIT = 1<<64 + class DataMovementGenerator: def collect_mapped_vars_by_stack_and_heap(mapped_vars, use_mapped_vars): sorted_mapped_vars = [] @@ -305,7 +308,7 @@ def generate_conditional_on_data_exists( """ data_exists_op = device.DataCheckExists(var_name, memory_space) - ops = [data_exists_op] + ops: list[Operation] = [data_exists_op] if is_not_conditional: const_op = arith.ConstantOp.from_int_and_width(1, 1) ex_io_op = arith.XOrIOp(const_op, data_exists_op, builtin.i1) @@ -314,6 +317,12 @@ def generate_conditional_on_data_exists( else: condition_ssa = data_exists_op + if false_region is None: + @Builder.implicit_region([]) + def false_region(args: tuple[BlockArgument, ...]) -> None: + dummy_tag = memref.AllocaOp.get(builtin.i32, shape=[]) + do_not_wait = arith.ConstantOp(builtin.IntegerAttr.from_index_int_value(_DO_NOT_WAIT)) + scf.YieldOp(dummy_tag, do_not_wait) cond = scf.IfOp( condition_ssa, conditional_return_type, true_region, false_region ) @@ -441,15 +450,20 @@ def generate_copy_from_device(var_name, memory_space, var_type, dest): ) return tag_ssa, [device_memref] + ops_list - def generate_dma_waits_for_tags(wait_ssas_list): + @staticmethod + def generate_dma_waits_for_tags(wait_ssas_list: Sequence[tuple[SSAValue|Operation, SSAValue|Operation]]): """ Generates the DMA wait operations based upon the provided wait list, each entry in the wait list is a tuple (wait tag, number elements). """ - ops_list = [] + ops_list: list[Operation] = [] for tag, num_els in wait_ssas_list: - wait_op = memref.DmaWaitOp.get(tag, [], num_els) - ops_list.append(wait_op) + do_not_wait = arith.ConstantOp(builtin.IntegerAttr.from_index_int_value(_DO_NOT_WAIT)) + should_wait = arith.CmpiOp(num_els, do_not_wait, "ne") + if_op = scf.IfOp(should_wait, [], Region(Block( + [memref.DmaWaitOp.get(tag, [], num_els)] + ))) + ops_list.extend([do_not_wait, should_wait, if_op]) return ops_list From ab0568a613bbc40fdc4cd7b2c6bd56dca7b90677 Mon Sep 17 00:00:00 2001 From: dk949 <56653556+dk949@users.noreply.github.com> Date: Mon, 15 Sep 2025 18:30:18 +0100 Subject: [PATCH 05/10] forgot terminator in scf.if --- ftn/transforms/lower_omp_target_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ftn/transforms/lower_omp_target_data.py b/ftn/transforms/lower_omp_target_data.py index 1d82ac1..f8bdeda 100644 --- a/ftn/transforms/lower_omp_target_data.py +++ b/ftn/transforms/lower_omp_target_data.py @@ -461,7 +461,7 @@ def generate_dma_waits_for_tags(wait_ssas_list: Sequence[tuple[SSAValue|Operatio do_not_wait = arith.ConstantOp(builtin.IntegerAttr.from_index_int_value(_DO_NOT_WAIT)) should_wait = arith.CmpiOp(num_els, do_not_wait, "ne") if_op = scf.IfOp(should_wait, [], Region(Block( - [memref.DmaWaitOp.get(tag, [], num_els)] + [memref.DmaWaitOp.get(tag, [], num_els), scf.YieldOp()] ))) ops_list.extend([do_not_wait, should_wait, if_op]) return ops_list From f356158f6e40ef0826ee8d1d3442a6924c5016be Mon Sep 17 00:00:00 2001 From: dk949 <56653556+dk949@users.noreply.github.com> Date: Tue, 30 Sep 2025 11:34:44 +0100 Subject: [PATCH 06/10] replaced uses of -1 as the dynamic index marker xDSL now uses a different value for the dynamic index marker - `builtin.DYNAMIC_INDEX` with value `-(2**63)`, which matches the value MLIR uses for this. Now `memref`s, and `tensor`s that use `-1` as the dynamic index get printed with a literal `-1`, rather than `?`. --- ftn/transforms/lift_omp_to_tensor.py | 2 +- ftn/transforms/lower_omp_target_data.py | 2 +- ftn/transforms/tenstorrent/convert_to_tt.py | 2 +- ftn/transforms/to_core/components/ftn_types.py | 6 +++--- ftn/transforms/to_core/components/intrinsics.py | 8 ++++---- ftn/transforms/to_core/components/load_store.py | 8 ++++---- ftn/transforms/to_core/components/memory.py | 6 +++--- 7 files changed, 17 insertions(+), 17 deletions(-) diff --git a/ftn/transforms/lift_omp_to_tensor.py b/ftn/transforms/lift_omp_to_tensor.py index 6ce8457..a96455d 100644 --- a/ftn/transforms/lift_omp_to_tensor.py +++ b/ftn/transforms/lift_omp_to_tensor.py @@ -502,7 +502,7 @@ def lift_op( tensor_sizes.append(int((upper_const - (lower_const - 1)) / step_const)) else: # Otherwise this dimension size is dynamic - tensor_sizes.append(-1) + tensor_sizes.append(builtin.DYNAMIC_INDEX) # Create dependency tree walker, this is passed the private (intermediate) # memrefs, and SSA of the device mapped data diff --git a/ftn/transforms/lower_omp_target_data.py b/ftn/transforms/lower_omp_target_data.py index f8bdeda..ddd371a 100644 --- a/ftn/transforms/lower_omp_target_data.py +++ b/ftn/transforms/lower_omp_target_data.py @@ -343,7 +343,7 @@ def generate_allocate_on_device(var_type, var_name, memory_space, size_ssas): """ dynamic_ssas = [] for idx, shape in enumerate(var_type.shape): - if shape.data == -1: + if shape.data == builtin.DYNAMIC_INDEX: dynamic_ssas.append(size_ssas[idx]) return device.AllocOp( diff --git a/ftn/transforms/tenstorrent/convert_to_tt.py b/ftn/transforms/tenstorrent/convert_to_tt.py index d94fbbd..0bdb595 100644 --- a/ftn/transforms/tenstorrent/convert_to_tt.py +++ b/ftn/transforms/tenstorrent/convert_to_tt.py @@ -497,7 +497,7 @@ def generate_data_in(self, module, memory_type, references, cb_idxs, new_block, mem_size_bytes_op=arith.Muli(dt_width_conversion_op, new_block.args[(len(memory_type)*2)+idx]) read_op=data_movement.DMNocAsyncRead(dm_op.results[0], cb_op.results[0], mem_size_bytes_op) - target_memref=builtin.MemRefType(element_type, [-1]) + target_memref=builtin.MemRefType(element_type, [builtin.DYNAMIC_INDEX]) conversion_op=builtin.UnrealizedConversionCastOp.get([cb_op.results[0]], [target_memref]) conversion_op.results[0].name_hint = f"src{idx}_data" diff --git a/ftn/transforms/to_core/components/ftn_types.py b/ftn/transforms/to_core/components/ftn_types.py index 876667a..c211ddd 100644 --- a/ftn/transforms/to_core/components/ftn_types.py +++ b/ftn/transforms/to_core/components/ftn_types.py @@ -25,7 +25,7 @@ def compare_memrefs(memref_a, memref_b): return MemrefComparison.INCOMPATIBLE for dim_size_a, dim_size_b in zip(memref_a.shape, memref_b.shape): if dim_size_a.data != dim_size_b.data and ( - dim_size_a.data == -1 or dim_size_b.data == -1 + dim_size_a.data == builtin.DYNAMIC_INDEX or dim_size_b.data == builtin.DYNAMIC_INDEX ): return MemrefComparison.CONVERTABLE return MemrefComparison.SAME @@ -111,7 +111,7 @@ def convert_fir_type_to_standard(fir_type, ref_as_mem_ref=True): if isa(shape_el, builtin.IntegerAttr): dim_sizes.append(shape_el.value.data) else: - dim_sizes.append(-1) + dim_sizes.append(builtin.DYNAMIC_INDEX) # Reverse the sizes to go from Fortran to C allocation semantics dim_sizes.reverse() return builtin.MemRefType( @@ -229,7 +229,7 @@ def translate_convert(program_state: ProgramState, ctx: SSAValueCtx, op: fir.Con shape_size = [] for s in out_type.type.shape.data: if isa(s, fir.DeferredAttr): - shape_size.append(-1) + shape_size.append(builtin.DYNAMIC_INDEX) else: shape_size.append(s.value.data) # Reverse shape_size to get it from Fortran allocation to C/MLIR allocation diff --git a/ftn/transforms/to_core/components/intrinsics.py b/ftn/transforms/to_core/components/intrinsics.py index 5725281..8449863 100644 --- a/ftn/transforms/to_core/components/intrinsics.py +++ b/ftn/transforms/to_core/components/intrinsics.py @@ -19,12 +19,12 @@ def handle_create_temporary_linalg_output_memref( result_type, element_type, input_ssas, input_dims_to_read ): output_shape = [ - -1 if isa(s, fir.DeferredAttr) else s.value for s in result_type.shape + builtin.DYNAMIC_INDEX if isa(s, fir.DeferredAttr) else s.value for s in result_type.shape ] ops_list = [] dynamic_sizes = [] - if -1 in output_shape: + if builtin.DYNAMIC_INDEX in output_shape: # If we have deferred sizes then grab the output sizes from the input array sizes # Ensure all elements are -1 assert len(set(output_shape)) == 1 @@ -177,9 +177,9 @@ def handle_reduction_operation( memref_shape = [] memref_dynamic_sizes = [] elif len(reduction_dimensions) == 1: - if -1 in input_array_shape: + if builtin.DYNAMIC_INDEX in input_array_shape: assert len(set(input_array_shape)) == 1 - memref_shape = [-1] * (len(array_load_ssa.type.shape) - 1) + memref_shape = [builtin.DYNAMIC_INDEX] * (len(array_load_ssa.type.shape) - 1) memref_dynamic_sizes = [] if len(array_load_ssa.type.shape) > 1: for dim in list(range(len(array_load_ssa.type.shape))): diff --git a/ftn/transforms/to_core/components/load_store.py b/ftn/transforms/to_core/components/load_store.py index c05768e..8149148 100644 --- a/ftn/transforms/to_core/components/load_store.py +++ b/ftn/transforms/to_core/components/load_store.py @@ -410,13 +410,13 @@ def generate_allocatable_array_allocate( assert len(dim_sizes) == len(dim_starts) == len(dim_ends) - # Now create memref, passing -1 as shape will make this deferred size + # Now create memref, passing DYNAMIC_INDEX as shape will make this deferred size # Reverse the indicies as Fortran and C/MLIR are opposite in terms of # the order of the contiguous dimension (F is least, whereas C/MLIR is highest) dim_ssa_reversed = dim_ssas.copy() dim_ssa_reversed.reverse() memref_allocation_op = memref_alloca_op = memref.AllocOp.get( - base_type, shape=[-1] * len(dim_ssas), dynamic_sizes=dim_ssa_reversed + base_type, shape=[builtin.DYNAMIC_INDEX] * len(dim_ssas), dynamic_sizes=dim_ssa_reversed ) ops_list.append(memref_allocation_op) @@ -462,13 +462,13 @@ def handle_pointer_assignment( source_ssa = ctx[source_op] ops = [] - if any(i.data != -1 for i in ctx[source_op].type.shape.data): + if any(i.data != builtin.DYNAMIC_INDEX for i in ctx[source_op].type.shape.data): # The source type has explicit dimension sizes, by definition a pointer must be unknown # dimension sizes so we need to convert num_dims = len(ctx[source_op].type.shape.data) cast_op = memref.CastOp.get( source_ssa, - builtin.MemRefType(source_ssa.type.element_type, shape=num_dims * [-1]), + builtin.MemRefType(source_ssa.type.element_type, shape=num_dims * [builtin.DYNAMIC_INDEX]), ) source_ssa = cast_op.results[0] ops.append(cast_op) diff --git a/ftn/transforms/to_core/components/memory.py b/ftn/transforms/to_core/components/memory.py index d439ae6..7c8d000 100644 --- a/ftn/transforms/to_core/components/memory.py +++ b/ftn/transforms/to_core/components/memory.py @@ -423,7 +423,7 @@ def translate_declare( else: alloc_memref_container = memref.AllocaOp.get( builtin.MemRefType( - op.results[0].type.type.type.type.type, shape=num_dims * [-1] + op.results[0].type.type.type.type.type, shape=num_dims * [builtin.DYNAMIC_INDEX] ), shape=[], ) @@ -720,14 +720,14 @@ def translate_elemental(program_state, ctx, op: hlfir.ElementalOp): sizes.reverse() memref_shape = [ - -1 if isa(f, fir.DeferredAttr) else f.value.data + builtin.DYNAMIC_INDEX if isa(f, fir.DeferredAttr) else f.value.data for f in op.results[0].type.shape ] dynamic_sizes = [] for idx, s in enumerate(memref_shape): - if s == -1: + if s == builtin.DYNAMIC_INDEX: dynamic_sizes.append(ctx[sizes[idx]]) memref_alloca_op = memref.AllocOp( From d25164e37e2294995805c0fa608829e6a6698647 Mon Sep 17 00:00:00 2001 From: dk949 <56653556+dk949@users.noreply.github.com> Date: Tue, 14 Oct 2025 14:34:49 +0100 Subject: [PATCH 07/10] expose lift-omp-to-tensor --- ftn/tools/ftn_opt.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/ftn/tools/ftn_opt.py b/ftn/tools/ftn_opt.py index 931f854..7091728 100755 --- a/ftn/tools/ftn_opt.py +++ b/ftn/tools/ftn_opt.py @@ -6,7 +6,9 @@ from ftn.transforms.rewrite_fir_to_core import RewriteFIRToCore from ftn.transforms.merge_memref_deref import MergeMemRefDeref from ftn.transforms.lower_omp_target_data import LowerOmpTargetDataPass -# from ftn.transforms.extract_target import ExtractTarget +from ftn.transforms.apply_target_config import ApplyTargetConfig +from ftn.transforms.omp_target_to_kernel import OmpTargetToKernelPass +from ftn.transforms.lift_omp_to_tensor import LiftOmpToTensorPass # from ftn.transforms.isolate_target import IsolateTarget # from psy.extract_stencil import ExtractStencil # from ftn.transforms.tenstorrent.convert_to_tt import ConvertToTT @@ -28,7 +30,9 @@ def register_all_passes(self): self.register_pass("rewrite-fir-to-core", lambda: RewriteFIRToCore) self.register_pass("merge-memref-deref", lambda: MergeMemRefDeref) self.register_pass("lower-omp-target-data", lambda: LowerOmpTargetDataPass) - # self.register_pass("extract-target", lambda: ExtractTarget) + self.register_pass("apply-target", lambda: ApplyTargetConfig) + self.register_pass("omp-target-to-kernel", lambda: OmpTargetToKernelPass) + self.register_pass("lift-omp-to-tensor", lambda: LiftOmpToTensorPass) # self.register_pass("isolate-target", lambda: IsolateTarget) # self.register_pass("convert-to-tt", lambda: ConvertToTT) From e89773c4836954b837e65c1707d3abad8c6f598c Mon Sep 17 00:00:00 2001 From: dk949 <56653556+dk949@users.noreply.github.com> Date: Fri, 31 Oct 2025 23:33:34 +0000 Subject: [PATCH 08/10] fixed imports --- ftn/transforms/lower_omp_target_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ftn/transforms/lower_omp_target_data.py b/ftn/transforms/lower_omp_target_data.py index ddd371a..33340cb 100644 --- a/ftn/transforms/lower_omp_target_data.py +++ b/ftn/transforms/lower_omp_target_data.py @@ -5,7 +5,7 @@ from xdsl.builder import Builder from xdsl.context import Context from xdsl.dialects import arith, builtin, memref, omp, scf -from xdsl.ir import Block, BlockArgument, Operation, Region, Region Block, SSAValue +from xdsl.ir import Block, BlockArgument, Operation, Region, SSAValue from xdsl.passes import ModulePass from xdsl.pattern_rewriter import ( GreedyRewritePatternApplier, From f7d2ba2aefe3d2d3cf87dc8e13802aec33f7cc4c Mon Sep 17 00:00:00 2001 From: dk949 <56653556+dk949@users.noreply.github.com> Date: Fri, 31 Oct 2025 23:44:33 +0000 Subject: [PATCH 09/10] removed use of `opaque()` for llvm pointers xDSL has dropped support for typed llvm pointers, so the default ctor now produces the equivalent of the old `LLVMPointerType.opaque()`. --- ftn/transforms/to_core/components/ftn_types.py | 8 ++++---- ftn/transforms/to_core/components/functions.py | 2 +- ftn/transforms/to_core/components/memory.py | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/ftn/transforms/to_core/components/ftn_types.py b/ftn/transforms/to_core/components/ftn_types.py index c211ddd..34d326c 100644 --- a/ftn/transforms/to_core/components/ftn_types.py +++ b/ftn/transforms/to_core/components/ftn_types.py @@ -77,7 +77,7 @@ def does_type_represent_ftn_pointer(type_chain): def convert_fir_type_to_standard_if_needed(fir_type): if isa(fir_type, fir.ReferenceType) and fir_type.type == builtin.i8: - return llvm.LLVMPointerType.opaque() + return llvm.LLVMPointerType() else: return convert_fir_type_to_standard(fir_type) @@ -93,7 +93,7 @@ def convert_fir_type_to_standard(fir_type, ref_as_mem_ref=True): base_t, [], builtin.NoneAttr(), builtin.NoneAttr() ) else: - return llvm.LLVMPointerType.opaque() + return llvm.LLVMPointerType() elif isa(fir_type, fir.BoxType): return convert_fir_type_to_standard(fir_type.type, ref_as_mem_ref) elif isa(fir_type, fir.HeapType): @@ -121,7 +121,7 @@ def convert_fir_type_to_standard(fir_type, ref_as_mem_ref=True): return builtin.i1 elif isa(fir_type, fir.BoxCharType): return llvm.LLVMStructType.from_type_list( - [llvm.LLVMPointerType.opaque(), builtin.i64] + [llvm.LLVMPointerType(), builtin.i64] ) elif isa(fir_type, builtin.TupleType): new_types = [] @@ -214,7 +214,7 @@ def translate_convert(program_state: ProgramState, ctx: SSAValueCtx, op: fir.Con get_element_ptr = llvm.GEPOp( ctx[op.value], [0, 0], - result_type=llvm.LLVMPointerType.opaque(), + result_type=llvm.LLVMPointerType(), pointee_type=llvm.LLVMArrayType.from_size_and_type( 1, builtin.IntegerType(8) ), diff --git a/ftn/transforms/to_core/components/functions.py b/ftn/transforms/to_core/components/functions.py index cd4ee55..39683a5 100644 --- a/ftn/transforms/to_core/components/functions.py +++ b/ftn/transforms/to_core/components/functions.py @@ -51,7 +51,7 @@ def translate_function(program_state: ProgramState, ctx: SSAValueCtx, fn: func.F if ftn_types.does_type_represent_ftn_pointer(fir_type): # If we are passing a Fortran pointer then we need to handle this differently, actually pass # the LLVM pointer of this and reconstruct, to access the same underlying memref - converted_type = llvm.LLVMPointerType.opaque() + converted_type = llvm.LLVMPointerType() ptr_unpack_args.append((idx, fir_type)) else: converted_type = ftn_types.convert_fir_type_to_standard(fir_type) diff --git a/ftn/transforms/to_core/components/memory.py b/ftn/transforms/to_core/components/memory.py index 7c8d000..1882237 100644 --- a/ftn/transforms/to_core/components/memory.py +++ b/ftn/transforms/to_core/components/memory.py @@ -29,7 +29,7 @@ def generate_memref_from_llvm_ptr(llvm_ptr_in_ssa, dim_sizes, target_type): # Builds a memref from an LLVM pointer. This is required if we are working with # global arrays, as they are llvm.array, and the pointer is grabbed from that and # then the memref constructed - ptr_type = llvm.LLVMPointerType.opaque() + ptr_type = llvm.LLVMPointerType() offsets = [1] if len(dim_sizes) > 1: @@ -637,7 +637,7 @@ def translate_address_of( return [] assert isa(op.results[0].type, fir.ReferenceType) - global_lookup = llvm.AddressOfOp(op.symbol, llvm.LLVMPointerType.opaque()) + global_lookup = llvm.AddressOfOp(op.symbol, llvm.LLVMPointerType()) ctx[op.results[0]] = global_lookup.results[0] return [global_lookup] @@ -648,7 +648,7 @@ def translate_emboxchar(program_state, ctx, op: fir.EmboxcharOp): return [] struct_type = llvm.LLVMStructType.from_type_list( - [llvm.LLVMPointerType.opaque(), builtin.i64] + [llvm.LLVMPointerType(), builtin.i64] ) char_ptr_ops_list = expressions.translate_expr(program_state, ctx, op.memref) @@ -693,7 +693,7 @@ def translate_unboxchar(program_state, ctx, op: fir.UnboxcharOp): extract_char_ptr = llvm.ExtractValueOp( builtin.DenseArrayBase.from_list(builtin.i64, [0]), ctx[op.boxchar], - llvm.LLVMPointerType.opaque(), + llvm.LLVMPointerType(), ) extract_char_len = llvm.ExtractValueOp( builtin.DenseArrayBase.from_list(builtin.i64, [1]), ctx[op.boxchar], builtin.i64 From 36a042d27540b866220e7a28c5d4cf3c7b7b4685 Mon Sep 17 00:00:00 2001 From: dk949 <56653556+dk949@users.noreply.github.com> Date: Wed, 5 Nov 2025 14:31:25 +0000 Subject: [PATCH 10/10] explicitly set latest supported xdsl in pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index e941f78..f2ab779 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ license = "BSD-3-Clause" authors = [ { name = "Nick Brown" } ] classifiers = ["Programming Language :: Python :: 3"] dependencies = [ - "xdsl@git+https://github.com/xdslproject/xdsl@main" + "xdsl@git+https://github.com/xdslproject/xdsl@v0.54.4" ] dynamic = [ "version" ]