Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 37 additions & 17 deletions llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1754,24 +1754,44 @@ bool TargetLowering::SimplifyDemandedBits(
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
// If (1) we only need the sign-bit, (2) the setcc operands are the same
// width as the setcc result, and (3) the result of a setcc conforms to 0 or
// -1, we may be able to bypass the setcc.
if (DemandedBits.isSignMask() &&
Op0.getScalarValueSizeInBits() == BitWidth &&
getBooleanContents(Op0.getValueType()) ==
BooleanContent::ZeroOrNegativeOneBooleanContent) {
// If we're testing X < 0, then this compare isn't needed - just use X!
// FIXME: We're limiting to integer types here, but this should also work
// if we don't care about FP signed-zero. The use of SETLT with FP means
// that we don't care about NaNs.
if (CC == ISD::SETLT && Op1.getValueType().isInteger() &&
(isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode())))
return TLO.CombineTo(Op, Op0);

// TODO: Should we check for other forms of sign-bit comparisons?
// Examples: X <= -1, X >= 0
// If we're testing X < 0, X >= 0, X <= -1 (X is of integer type) or X > -1
// (X is of integer type) then we only need the sign mask of the previous
// result
// FIXME: We're limiting to integer types for X < 0 or X >= 0 here, but this
// should also work if we don't care about FP signed-zero. The use of SETLT
// with FP means that we don't care about NaNs.
if (((CC == ISD::SETLT || CC == ISD::SETGE) &&
Op1.getValueType().isInteger() && isNullOrNullSplat(Op1)) ||
((CC == ISD::SETLE || CC == ISD::SETGT) &&
Op1.getValueType().isInteger() && isAllOnesOrAllOnesSplat(Op1))) {
KnownBits KnownOp0;
bool Changed = false;
if (SimplifyDemandedBits(
Op0, APInt::getSignMask(Op0.getScalarValueSizeInBits()),
DemandedElts, KnownOp0, TLO, Depth + 1))
Changed = true;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we just return true instead of allowing multiple optimizations. I think we will end up revisiting. Do any other places in this file do multiple optimizations like this?

// If (1) we only need the sign-bit, (2) the setcc operands are the same
// width as the setcc result, and (3) the result of a setcc conforms to 0
// or -1, we may be able to bypass the setcc.
if (DemandedBits.isSignMask() &&
Op0.getScalarValueSizeInBits() == BitWidth &&
getBooleanContents(Op0.getValueType()) ==
BooleanContent::ZeroOrNegativeOneBooleanContent) {
// If we remove a >= 0 or > -1 (for integers), we need to introduce a
// NOT Operation
if (CC == ISD::SETGE || CC == ISD::SETGT) {
SDLoc DL(Op);
EVT VT = Op0.getValueType();
SDValue NotOp0 = TLO.DAG.getNOT(DL, Op0, VT);
Changed |= TLO.CombineTo(Op, NotOp0);
} else {
Changed |= TLO.CombineTo(Op, Op0);
}
}
return Changed;
}
// TODO: Should we check for other forms of sign-bit comparisons?
// Example: X <= -1, X > -1
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this FIXME still relevant?

if (getBooleanContents(Op0.getValueType()) ==
TargetLowering::ZeroOrOneBooleanContent &&
BitWidth > 1)
Expand Down
30 changes: 5 additions & 25 deletions llvm/test/CodeGen/AArch64/tbz-tbnz.ll
Original file line number Diff line number Diff line change
Expand Up @@ -838,31 +838,11 @@ if.then28: ; preds = %if.end26
}

define i1 @avifSequenceHeaderParse() {
; CHECK-SD-LABEL: avifSequenceHeaderParse:
; CHECK-SD: // %bb.0: // %entry
; CHECK-SD-NEXT: mov w8, #1 // =0x1
; CHECK-SD-NEXT: cbz w8, .LBB24_2
; CHECK-SD-NEXT: .LBB24_1: // %bb6
; CHECK-SD-NEXT: mov w0, wzr
; CHECK-SD-NEXT: ret
; CHECK-SD-NEXT: .LBB24_2: // %bb1
; CHECK-SD-NEXT: cbz w8, .LBB24_4
; CHECK-SD-NEXT: // %bb.3:
; CHECK-SD-NEXT: tbz xzr, #63, .LBB24_1
; CHECK-SD-NEXT: b .LBB24_5
; CHECK-SD-NEXT: .LBB24_4: // %bb2
; CHECK-SD-NEXT: mov w8, #1 // =0x1
; CHECK-SD-NEXT: tbz x8, #63, .LBB24_1
; CHECK-SD-NEXT: .LBB24_5: // %bb4
; CHECK-SD-NEXT: mov w8, #1 // =0x1
; CHECK-SD-NEXT: mov w0, wzr
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: avifSequenceHeaderParse:
; CHECK-GI: // %bb.0: // %entry
; CHECK-GI-NEXT: mov w0, wzr
; CHECK-GI-NEXT: mov w8, #1 // =0x1
; CHECK-GI-NEXT: ret
; CHECK-LABEL: avifSequenceHeaderParse:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: mov w8, #1 // =0x1
; CHECK-NEXT: ret
entry:
%a = icmp slt i64 0, 0
br i1 %a, label %bb1, label %bb6
Expand Down
41 changes: 22 additions & 19 deletions llvm/test/CodeGen/AMDGPU/divergence-driven-trunc-to-i1.ll
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,15 @@ define amdgpu_kernel void @uniform_trunc_i16_to_i1(ptr addrspace(1) %out, i16 %x
; GCN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 61440
; GCN-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE killed [[COPY2]], %subreg.sub0, killed [[COPY1]], %subreg.sub1, killed [[S_MOV_B32_1]], %subreg.sub2, killed [[S_MOV_B32_]], %subreg.sub3
; GCN-NEXT: [[S_SEXT_I32_I16_:%[0-9]+]]:sreg_32 = S_SEXT_I32_I16 [[S_LOAD_DWORD_IMM]]
; GCN-NEXT: [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 16
; GCN-NEXT: [[S_LSHR_B32_:%[0-9]+]]:sreg_32 = S_LSHR_B32 [[S_LOAD_DWORD_IMM]], killed [[S_MOV_B32_2]], implicit-def dead $scc
; GCN-NEXT: [[S_LSHL_B32_:%[0-9]+]]:sreg_32 = S_LSHL_B32 [[S_LOAD_DWORD_IMM]], [[S_MOV_B32_2]], implicit-def dead $scc
; GCN-NEXT: [[S_LSHR_B32_:%[0-9]+]]:sreg_32 = S_LSHR_B32 [[S_LOAD_DWORD_IMM]], [[S_MOV_B32_2]], implicit-def dead $scc
; GCN-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY killed [[S_LSHR_B32_]]
; GCN-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 1, killed [[COPY3]], implicit-def dead $scc
; GCN-NEXT: S_CMP_EQ_U32 killed [[S_AND_B32_]], 1, implicit-def $scc
; GCN-NEXT: [[COPY4:%[0-9]+]]:sreg_64 = COPY $scc
; GCN-NEXT: [[S_MOV_B32_3:%[0-9]+]]:sreg_32 = S_MOV_B32 0
; GCN-NEXT: S_CMP_LT_I32 killed [[S_SEXT_I32_I16_]], killed [[S_MOV_B32_3]], implicit-def $scc
; GCN-NEXT: S_CMP_LT_I32 killed [[S_LSHL_B32_]], killed [[S_MOV_B32_3]], implicit-def $scc
; GCN-NEXT: [[COPY5:%[0-9]+]]:sreg_64 = COPY $scc
; GCN-NEXT: [[S_OR_B64_:%[0-9]+]]:sreg_64_xexec = S_OR_B64 killed [[COPY5]], killed [[COPY4]], implicit-def dead $scc
; GCN-NEXT: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, killed [[S_OR_B64_]], implicit $exec
Expand All @@ -41,11 +41,12 @@ define i1 @divergent_trunc_i16_to_i1(ptr addrspace(1) %out, i16 %x, i1 %z) {
; GCN-NEXT: {{ $}}
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
; GCN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16
; GCN-NEXT: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 killed [[S_MOV_B32_]], [[COPY1]], implicit $exec
; GCN-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 1, [[COPY]], implicit $exec
; GCN-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_U32_e64 killed [[V_AND_B32_e64_]], 1, implicit $exec
; GCN-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[COPY1]], 0, 16, implicit $exec
; GCN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
; GCN-NEXT: [[V_CMP_LT_I32_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_I32_e64 killed [[V_BFE_I32_e64_]], killed [[S_MOV_B32_]], implicit $exec
; GCN-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
; GCN-NEXT: [[V_CMP_LT_I32_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_I32_e64 killed [[V_LSHLREV_B32_e64_]], killed [[S_MOV_B32_1]], implicit $exec
; GCN-NEXT: [[S_OR_B64_:%[0-9]+]]:sreg_64_xexec = S_OR_B64 killed [[V_CMP_LT_I32_e64_]], killed [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
; GCN-NEXT: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, killed [[S_OR_B64_]], implicit $exec
; GCN-NEXT: $vgpr0 = COPY [[V_CNDMASK_B32_e64_]]
Expand Down Expand Up @@ -124,16 +125,17 @@ define amdgpu_kernel void @uniform_trunc_i64_to_i1(ptr addrspace(1) %out, i64 %x
; GCN-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 -1
; GCN-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE killed [[COPY4]], %subreg.sub0, killed [[COPY3]], %subreg.sub1, killed [[S_MOV_B32_1]], %subreg.sub2, killed [[S_MOV_B32_]], %subreg.sub3
; GCN-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX4_IMM]].sub3
; GCN-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX4_IMM]].sub2
; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64 = REG_SEQUENCE killed [[COPY6]], %subreg.sub0, killed [[COPY5]], %subreg.sub1
; GCN-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY killed [[S_LOAD_DWORD_IMM]]
; GCN-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 1, killed [[COPY7]], implicit-def dead $scc
; GCN-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64 = REG_SEQUENCE killed [[DEF]], %subreg.sub0, killed [[COPY5]], %subreg.sub1
; GCN-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY killed [[S_LOAD_DWORD_IMM]]
; GCN-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 1, killed [[COPY6]], implicit-def dead $scc
; GCN-NEXT: S_CMP_EQ_U32 killed [[S_AND_B32_]], 1, implicit-def $scc
; GCN-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY $scc
; GCN-NEXT: [[COPY7:%[0-9]+]]:sreg_64 = COPY $scc
; GCN-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
; GCN-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY killed [[S_MOV_B64_]]
; GCN-NEXT: [[V_CMP_LT_I64_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_I64_e64 killed [[REG_SEQUENCE2]], [[COPY9]], implicit $exec
; GCN-NEXT: [[S_OR_B64_:%[0-9]+]]:sreg_64_xexec = S_OR_B64 killed [[V_CMP_LT_I64_e64_]], killed [[COPY8]], implicit-def dead $scc
; GCN-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY killed [[S_MOV_B64_]]
; GCN-NEXT: [[V_CMP_LT_I64_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_I64_e64 killed [[REG_SEQUENCE2]], [[COPY8]], implicit $exec
; GCN-NEXT: [[S_OR_B64_:%[0-9]+]]:sreg_64_xexec = S_OR_B64 killed [[V_CMP_LT_I64_e64_]], killed [[COPY7]], implicit-def dead $scc
; GCN-NEXT: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, killed [[S_OR_B64_]], implicit $exec
; GCN-NEXT: BUFFER_STORE_BYTE_OFFSET killed [[V_CNDMASK_B32_e64_]], killed [[REG_SEQUENCE1]], 0, 0, 0, 0, implicit $exec :: (store (s8) into %ir.2, addrspace 1)
; GCN-NEXT: S_ENDPGM 0
Expand All @@ -146,17 +148,18 @@ define amdgpu_kernel void @uniform_trunc_i64_to_i1(ptr addrspace(1) %out, i64 %x
define i1 @divergent_trunc_i64_to_i1(ptr addrspace(1) %out, i64 %x, i1 %z) {
; GCN-LABEL: name: divergent_trunc_i64_to_i1
; GCN: bb.0 (%ir-block.0):
; GCN-NEXT: liveins: $vgpr2, $vgpr3, $vgpr4
; GCN-NEXT: liveins: $vgpr3, $vgpr4
; GCN-NEXT: {{ $}}
; GCN-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr4
; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr3
; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
; GCN-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
; GCN-NEXT: [[DEF1:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
; GCN-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE killed [[DEF]], %subreg.sub0, [[COPY1]], %subreg.sub1
; GCN-NEXT: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 1, [[COPY]], implicit $exec
; GCN-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_U32_e64 killed [[V_AND_B32_e64_]], 1, implicit $exec
; GCN-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
; GCN-NEXT: [[COPY3:%[0-9]+]]:vreg_64 = COPY killed [[S_MOV_B64_]]
; GCN-NEXT: [[V_CMP_LT_I64_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_I64_e64 killed [[REG_SEQUENCE]], [[COPY3]], implicit $exec
; GCN-NEXT: [[COPY2:%[0-9]+]]:vreg_64 = COPY killed [[S_MOV_B64_]]
; GCN-NEXT: [[V_CMP_LT_I64_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_I64_e64 killed [[REG_SEQUENCE]], [[COPY2]], implicit $exec
; GCN-NEXT: [[S_OR_B64_:%[0-9]+]]:sreg_64_xexec = S_OR_B64 killed [[V_CMP_LT_I64_e64_]], killed [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
; GCN-NEXT: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, killed [[S_OR_B64_]], implicit $exec
; GCN-NEXT: $vgpr0 = COPY [[V_CNDMASK_B32_e64_]]
Expand Down
24 changes: 8 additions & 16 deletions llvm/test/CodeGen/AMDGPU/llvm.is.fpclass.bf16.ll
Original file line number Diff line number Diff line change
Expand Up @@ -391,12 +391,11 @@ define i1 @posnormal_bf16(bfloat %x) nounwind {
; GFX7CHECK: ; %bb.0:
; GFX7CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7CHECK-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7CHECK-NEXT: v_ashrrev_i32_e32 v1, 16, v0
; GFX7CHECK-NEXT: v_cmp_lt_i32_e64 s[4:5], -1, v0
; GFX7CHECK-NEXT: v_bfe_u32 v0, v0, 16, 15
; GFX7CHECK-NEXT: v_add_i32_e32 v0, vcc, 0xffffff80, v0
; GFX7CHECK-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX7CHECK-NEXT: s_movk_i32 s6, 0x7f00
; GFX7CHECK-NEXT: v_cmp_lt_i32_e64 s[4:5], -1, v1
; GFX7CHECK-NEXT: v_cmp_gt_u32_e32 vcc, s6, v0
; GFX7CHECK-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
Expand Down Expand Up @@ -467,12 +466,11 @@ define i1 @negnormal_bf16(bfloat %x) nounwind {
; GFX7CHECK: ; %bb.0:
; GFX7CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7CHECK-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7CHECK-NEXT: v_ashrrev_i32_e32 v1, 16, v0
; GFX7CHECK-NEXT: v_cmp_gt_i32_e64 s[4:5], 0, v0
; GFX7CHECK-NEXT: v_bfe_u32 v0, v0, 16, 15
; GFX7CHECK-NEXT: v_add_i32_e32 v0, vcc, 0xffffff80, v0
; GFX7CHECK-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX7CHECK-NEXT: s_movk_i32 s6, 0x7f00
; GFX7CHECK-NEXT: v_cmp_gt_i32_e64 s[4:5], 0, v1
; GFX7CHECK-NEXT: v_cmp_gt_u32_e32 vcc, s6, v0
; GFX7CHECK-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
; GFX7CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
Expand Down Expand Up @@ -601,11 +599,10 @@ define i1 @negsubnormal_bf16(bfloat %x) nounwind {
; GFX7CHECK: ; %bb.0:
; GFX7CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7CHECK-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7CHECK-NEXT: v_ashrrev_i32_e32 v1, 16, v0
; GFX7CHECK-NEXT: v_cmp_gt_i32_e32 vcc, 0, v0
; GFX7CHECK-NEXT: v_bfe_u32 v0, v0, 16, 15
; GFX7CHECK-NEXT: v_add_i32_e64 v0, s[4:5], -1, v0
; GFX7CHECK-NEXT: s_movk_i32 s4, 0x7f
; GFX7CHECK-NEXT: v_cmp_gt_i32_e32 vcc, 0, v1
; GFX7CHECK-NEXT: v_cmp_gt_u32_e64 s[4:5], s4, v0
; GFX7CHECK-NEXT: s_and_b64 s[4:5], s[4:5], vcc
; GFX7CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
Expand Down Expand Up @@ -826,10 +823,9 @@ define i1 @negfinite_bf16(bfloat %x) nounwind {
; GFX7CHECK: ; %bb.0:
; GFX7CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7CHECK-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7CHECK-NEXT: v_ashrrev_i32_e32 v1, 16, v0
; GFX7CHECK-NEXT: v_cmp_gt_i32_e32 vcc, 0, v0
; GFX7CHECK-NEXT: v_bfe_u32 v0, v0, 16, 15
; GFX7CHECK-NEXT: s_movk_i32 s4, 0x7f80
; GFX7CHECK-NEXT: v_cmp_gt_i32_e32 vcc, 0, v1
; GFX7CHECK-NEXT: v_cmp_gt_i32_e64 s[4:5], s4, v0
; GFX7CHECK-NEXT: s_and_b64 s[4:5], s[4:5], vcc
; GFX7CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
Expand Down Expand Up @@ -1634,12 +1630,11 @@ define i1 @not_is_plus_normal_bf16(bfloat %x) {
; GFX7CHECK: ; %bb.0:
; GFX7CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7CHECK-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7CHECK-NEXT: v_ashrrev_i32_e32 v1, 16, v0
; GFX7CHECK-NEXT: v_cmp_gt_i32_e64 s[4:5], 0, v0
; GFX7CHECK-NEXT: v_bfe_u32 v0, v0, 16, 15
; GFX7CHECK-NEXT: v_add_i32_e32 v0, vcc, 0xffffff80, v0
; GFX7CHECK-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX7CHECK-NEXT: s_movk_i32 s6, 0x7eff
; GFX7CHECK-NEXT: v_cmp_gt_i32_e64 s[4:5], 0, v1
; GFX7CHECK-NEXT: v_cmp_lt_u32_e32 vcc, s6, v0
; GFX7CHECK-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
; GFX7CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
Expand Down Expand Up @@ -1710,12 +1705,11 @@ define i1 @not_is_neg_normal_bf16(bfloat %x) {
; GFX7CHECK: ; %bb.0:
; GFX7CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7CHECK-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7CHECK-NEXT: v_ashrrev_i32_e32 v1, 16, v0
; GFX7CHECK-NEXT: v_cmp_lt_i32_e64 s[4:5], -1, v0
; GFX7CHECK-NEXT: v_bfe_u32 v0, v0, 16, 15
; GFX7CHECK-NEXT: v_add_i32_e32 v0, vcc, 0xffffff80, v0
; GFX7CHECK-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX7CHECK-NEXT: s_movk_i32 s6, 0x7eff
; GFX7CHECK-NEXT: v_cmp_lt_i32_e64 s[4:5], -1, v1
; GFX7CHECK-NEXT: v_cmp_lt_u32_e32 vcc, s6, v0
; GFX7CHECK-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
; GFX7CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
Expand Down Expand Up @@ -2068,10 +2062,9 @@ define i1 @not_ispositive_bf16(bfloat %x) {
; GFX7CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7CHECK-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7CHECK-NEXT: v_lshrrev_b32_e32 v1, 16, v0
; GFX7CHECK-NEXT: v_ashrrev_i32_e32 v2, 16, v0
; GFX7CHECK-NEXT: v_cmp_gt_i32_e32 vcc, 0, v0
; GFX7CHECK-NEXT: v_bfe_u32 v0, v0, 16, 15
; GFX7CHECK-NEXT: s_movk_i32 s6, 0x7f80
; GFX7CHECK-NEXT: v_cmp_gt_i32_e32 vcc, 0, v2
; GFX7CHECK-NEXT: v_cmp_gt_i32_e64 s[4:5], s6, v0
; GFX7CHECK-NEXT: s_mov_b32 s7, 0xff80
; GFX7CHECK-NEXT: s_and_b64 s[4:5], s[4:5], vcc
Expand Down Expand Up @@ -2165,10 +2158,9 @@ define i1 @isnegative_bf16(bfloat %x) {
; GFX7CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7CHECK-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7CHECK-NEXT: v_lshrrev_b32_e32 v1, 16, v0
; GFX7CHECK-NEXT: v_ashrrev_i32_e32 v2, 16, v0
; GFX7CHECK-NEXT: v_cmp_gt_i32_e32 vcc, 0, v0
; GFX7CHECK-NEXT: v_bfe_u32 v0, v0, 16, 15
; GFX7CHECK-NEXT: s_movk_i32 s4, 0x7f80
; GFX7CHECK-NEXT: v_cmp_gt_i32_e32 vcc, 0, v2
; GFX7CHECK-NEXT: v_cmp_gt_i32_e64 s[4:5], s4, v0
; GFX7CHECK-NEXT: s_mov_b32 s6, 0xff80
; GFX7CHECK-NEXT: s_and_b64 s[4:5], s[4:5], vcc
Expand Down
Loading