@@ -18,39 +18,37 @@ extern __constant int __OptDisable;
18
18
19
19
// MEMFENCE IMPLEMENTATION
20
20
21
- void __attribute__((optnone )) __intel_memfence_optnone (bool flushRW , bool isGlobal , bool invalidateL1 , bool forceLocalLSCScope )
21
+ void __attribute__((optnone )) __intel_memfence_optnone (bool flushRW , bool isGlobal , bool invalidateL1 )
22
22
{
23
- #define MEMFENCE_IF (V1 , V5 , V6 , V7 ) \
24
- if (flushRW == V1 && isGlobal == V5 && invalidateL1 == V6 && forceLocalLSCScope == V7) \
25
- { \
26
- __builtin_IB_memfence(true, V1, false, false, false, V5, V6, V7); \
23
+ #define MEMFENCE_IF (V1 , V5 , V6 ) \
24
+ if (flushRW == V1 && isGlobal == V5 && invalidateL1 == V6) \
25
+ { \
26
+ __builtin_IB_memfence(true, V1, false, false, false, V5, V6); \
27
27
} else
28
28
29
29
// Generate combinations for all MEMFENCE_IF cases, e.g.:
30
- // true, true, true, true
31
- // true, true, true, false etc.
32
- #define MF_L3 (...) MF_L2(__VA_ARGS__,false) MF_L2(__VA_ARGS__,true)
30
+ // true, true, true
31
+ // true, true, false etc.
33
32
#define MF_L2 (...) MF_L1(__VA_ARGS__,false) MF_L1(__VA_ARGS__,true)
34
33
#define MF_L1 (...) MEMFENCE_IF(__VA_ARGS__,false) MEMFENCE_IF(__VA_ARGS__,true)
35
- MF_L3 (false )
36
- MF_L3 (true ) {}
34
+ MF_L2 (false )
35
+ MF_L2 (true ) {}
37
36
38
37
#undef MEMFENCE_IF
39
- #undef MF_L3
40
38
#undef MF_L2
41
39
#undef MF_L1
42
40
}
43
- void __intel_memfence (bool flushRW , bool isGlobal , bool invalidateL1 , bool forceLocalLSCScope )
41
+ void __intel_memfence (bool flushRW , bool isGlobal , bool invalidateL1 )
44
42
{
45
- __builtin_IB_memfence (true, flushRW , false, false, false, isGlobal , invalidateL1 , forceLocalLSCScope );
43
+ __builtin_IB_memfence (true, flushRW , false, false, false, isGlobal , invalidateL1 );
46
44
}
47
45
48
- void __intel_memfence_handler (bool flushRW , bool isGlobal , bool invalidateL1 , bool forceLocalLSCScope )
46
+ void __intel_memfence_handler (bool flushRW , bool isGlobal , bool invalidateL1 )
49
47
{
50
48
if (__OptDisable )
51
- __intel_memfence_optnone (flushRW , isGlobal , invalidateL1 , forceLocalLSCScope );
49
+ __intel_memfence_optnone (flushRW , isGlobal , invalidateL1 );
52
50
else
53
- __intel_memfence (flushRW , isGlobal , invalidateL1 , forceLocalLSCScope );
51
+ __intel_memfence (flushRW , isGlobal , invalidateL1 );
54
52
}
55
53
56
54
// TYPEDMEMFENCE IMPLEMENTATION
@@ -99,12 +97,12 @@ static void __intel_atomic_work_item_fence( Scope_t Memory, uint Semantics )
99
97
// although on some platforms they may be elided; platform-specific checks are performed in codegen
100
98
if (Semantics & WorkgroupMemory )
101
99
{
102
- __intel_memfence_handler (false, false, false,false );
100
+ __intel_memfence_handler (false, false, false);
103
101
}
104
102
if (Semantics & CrossWorkgroupMemory )
105
103
{
106
104
bool flushL3 = Memory == Device || Memory == CrossDevice ;
107
- __intel_memfence_handler (flushL3 , true, invalidateL1 , false );
105
+ __intel_memfence_handler (flushL3 , true, invalidateL1 );
108
106
}
109
107
}
110
108
}
0 commit comments