Skip to content

Commit 89a255b

Browse files
jvdp1Vandenplas, Jeremie
andauthored
Fixed compilation errors for Intel compilers (#229)
* Fixed compilation errors for Intel compilers * Adapt CMakeLists for renamed modules --------- Co-authored-by: Vandenplas, Jeremie <jeremie.vandenplas@wur.nl>
1 parent 3658b8a commit 89a255b

File tree

5 files changed

+12
-20
lines changed

5 files changed

+12
-20
lines changed

CMakeLists.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,8 +56,8 @@ add_library(neural-fortran
5656
src/nf/nf_maxpool2d_layer.f90
5757
src/nf/nf_maxpool2d_layer_submodule.f90
5858
src/nf/nf_metrics.f90
59-
src/nf/nf_multihead_attention.f90
60-
src/nf/nf_multihead_attention_submodule.f90
59+
src/nf/nf_multihead_attention_layer.f90
60+
src/nf/nf_multihead_attention_layer_submodule.f90
6161
src/nf/nf_network.f90
6262
src/nf/nf_network_submodule.f90
6363
src/nf/nf_optimizers.f90

src/nf/nf_cross_attention_layer.f90

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -20,22 +20,18 @@ module nf_cross_attention_layer
2020
end type cross_attention_layer
2121

2222
interface cross_attention_layer
23-
module function cross_attention_layer_cons(n_heads) result(res)
24-
!! This function returns the `cross_attention_layer` instance.
25-
integer, intent(in) :: sequence_length, model_dimension, n_heads
26-
type(cross_attention_layer) :: res
27-
end function cross_attention_layer_cons
23+
module procedure cross_attention_layer_cons
2824
end interface cross_attention_layer
2925

3026
contains
31-
module function cross_attention_layer_cons(n_heads) result(res)
27+
function cross_attention_layer_cons(n_heads) result(res)
3228
!! This function returns the `cross_attention_layer` instance.
3329
integer, intent(in) :: n_heads
3430
type(cross_attention_layer) :: res
3531
res % n_heads = n_heads
3632
end function cross_attention_layer_cons
3733

38-
pure module subroutine backward(self, input, gradient)
34+
pure subroutine backward(self, input, gradient)
3935
!! Cross Attention Back propagation
4036
class(cross_attention_layer), intent(in out) :: self
4137
real, intent(in) :: input(:, :, :)
@@ -46,7 +42,7 @@ pure module subroutine backward(self, input, gradient)
4642
self % gradient(2, :, :) = self % key_layer % gradient + self % value_layer % gradient
4743
end subroutine backward
4844

49-
pure module subroutine forward(self, input)
45+
pure subroutine forward(self, input)
5046
!! Cross Attention Forward propagation
5147
!! Input Shape (kind, sequence_length, model_dimension)
5248
!! where kind is 1 for Query and 2 for Key-Value
@@ -56,7 +52,7 @@ pure module subroutine forward(self, input)
5652
call self % common_forward(input(1, :, :), input(2, :, :), input(2, :, :))
5753
end subroutine forward
5854

59-
module subroutine init(self, input_shape)
55+
subroutine init(self, input_shape)
6056
class(cross_attention_layer), intent(in out) :: self
6157
integer, intent(in) :: input_shape(:)
6258

File renamed without changes.
File renamed without changes.

src/nf/nf_self_attention_layer.f90

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -20,22 +20,18 @@ module nf_self_attention_layer
2020
end type self_attention_layer
2121

2222
interface self_attention_layer
23-
module function self_attention_layer_cons(n_heads) result(res)
24-
!! This function returns the `self_attention_layer` instance.
25-
integer, intent(in) :: n_heads
26-
type(self_attention_layer) :: res
27-
end function self_attention_layer_cons
23+
module procedure self_attention_layer_cons
2824
end interface self_attention_layer
2925

3026
contains
31-
module function self_attention_layer_cons(n_heads) result(res)
27+
function self_attention_layer_cons(n_heads) result(res)
3228
!! This function returns the `self_attention_layer` instance.
3329
integer, intent(in) :: n_heads
3430
type(self_attention_layer) :: res
3531
res % n_heads = n_heads
3632
end function self_attention_layer_cons
3733

38-
pure module subroutine backward(self, input, gradient, attention_mask)
34+
pure subroutine backward(self, input, gradient, attention_mask)
3935
!! Self Attention back propagation
4036
!! Returns sum of Query, Key and Value gradients
4137
class(self_attention_layer), intent(in out) :: self
@@ -50,7 +46,7 @@ pure module subroutine backward(self, input, gradient, attention_mask)
5046
+ self % value_layer % gradient
5147
end subroutine backward
5248

53-
pure module subroutine forward(self, input)
49+
pure subroutine forward(self, input)
5450
!! Cross Attention forward propagation
5551
!! Passes input three times into MultiHead Attention
5652
!! Input Shape: (sequence_length, model_dimension)
@@ -60,7 +56,7 @@ pure module subroutine forward(self, input)
6056
call self % common_forward(input, input, input)
6157
end subroutine forward
6258

63-
module subroutine init(self, input_shape)
59+
subroutine init(self, input_shape)
6460
class(self_attention_layer), intent(in out) :: self
6561
integer, intent(in) :: input_shape(:)
6662

0 commit comments

Comments
 (0)