Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
52 changes: 35 additions & 17 deletions compiler/rustc_const_eval/src/interpret/intrinsics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,18 +40,20 @@ pub(crate) enum MinMax {
/// In particular, `-0.0` is considered smaller than `+0.0` and
/// if either input is NaN, the result is NaN.
Minimum,
/// The IEEE-2008 `minNum` operation - see `f32::min` etc.
/// The IEEE-2008 `minNum` operation with the SNaN handling of the
/// IEEE-2019 `minimumNumber` operation - see `f32::min` etc.
/// In particular, if the inputs are `-0.0` and `+0.0`, the result is non-deterministic,
/// and if one argument is NaN, the other one is returned.
MinNum,
/// and if one argument is NaN (quiet or signaling), the other one is returned.
MinimumNumber,
/// The IEEE-2019 `maximum` operation - see `f32::maximum` etc.
/// In particular, `-0.0` is considered smaller than `+0.0` and
/// if either input is NaN, the result is NaN.
Maximum,
/// The IEEE-2008 `maxNum` operation - see `f32::max` etc.
/// The IEEE-2008 `maxNum` operation with the SNaN handling of the
/// IEEE-2019 `maximumNumber` operation - see `f32::max` etc.
/// In particular, if the inputs are `-0.0` and `+0.0`, the result is non-deterministic,
/// and if one argument is NaN, the other one is returned.
MaxNum,
/// and if one argument is NaN (quiet or signaling), the other one is returned.
MaximumNumber,
}

/// Directly returns an `Allocation` containing an absolute path representation of the given type.
Expand Down Expand Up @@ -524,10 +526,18 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
self.write_scalar(Scalar::from_target_usize(align.bytes(), self), dest)?;
}

sym::minnumf16 => self.float_minmax_intrinsic::<Half>(args, MinMax::MinNum, dest)?,
sym::minnumf32 => self.float_minmax_intrinsic::<Single>(args, MinMax::MinNum, dest)?,
sym::minnumf64 => self.float_minmax_intrinsic::<Double>(args, MinMax::MinNum, dest)?,
sym::minnumf128 => self.float_minmax_intrinsic::<Quad>(args, MinMax::MinNum, dest)?,
sym::minnumf16 => {
self.float_minmax_intrinsic::<Half>(args, MinMax::MinimumNumber, dest)?
}
sym::minnumf32 => {
self.float_minmax_intrinsic::<Single>(args, MinMax::MinimumNumber, dest)?
}
sym::minnumf64 => {
self.float_minmax_intrinsic::<Double>(args, MinMax::MinimumNumber, dest)?
}
sym::minnumf128 => {
self.float_minmax_intrinsic::<Quad>(args, MinMax::MinimumNumber, dest)?
}

sym::minimumf16 => self.float_minmax_intrinsic::<Half>(args, MinMax::Minimum, dest)?,
sym::minimumf32 => {
Expand All @@ -538,10 +548,18 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
}
sym::minimumf128 => self.float_minmax_intrinsic::<Quad>(args, MinMax::Minimum, dest)?,

sym::maxnumf16 => self.float_minmax_intrinsic::<Half>(args, MinMax::MaxNum, dest)?,
sym::maxnumf32 => self.float_minmax_intrinsic::<Single>(args, MinMax::MaxNum, dest)?,
sym::maxnumf64 => self.float_minmax_intrinsic::<Double>(args, MinMax::MaxNum, dest)?,
sym::maxnumf128 => self.float_minmax_intrinsic::<Quad>(args, MinMax::MaxNum, dest)?,
sym::maxnumf16 => {
self.float_minmax_intrinsic::<Half>(args, MinMax::MaximumNumber, dest)?
}
sym::maxnumf32 => {
self.float_minmax_intrinsic::<Single>(args, MinMax::MaximumNumber, dest)?
}
sym::maxnumf64 => {
self.float_minmax_intrinsic::<Double>(args, MinMax::MaximumNumber, dest)?
}
sym::maxnumf128 => {
self.float_minmax_intrinsic::<Quad>(args, MinMax::MaximumNumber, dest)?
}

sym::maximumf16 => self.float_minmax_intrinsic::<Half>(args, MinMax::Maximum, dest)?,
sym::maximumf32 => {
Expand Down Expand Up @@ -966,16 +984,16 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
{
let a: F = a.to_float()?;
let b: F = b.to_float()?;
let res = if matches!(op, MinMax::MinNum | MinMax::MaxNum) && a == b {
let res = if matches!(op, MinMax::MinimumNumber | MinMax::MaximumNumber) && a == b {
// They are definitely not NaN (those are never equal), but they could be `+0` and `-0`.
// Let the machine decide which one to return.
M::equal_float_min_max(self, a, b)
} else {
let result = match op {
MinMax::Minimum => a.minimum(b),
MinMax::MinNum => a.min(b),
MinMax::MinimumNumber => a.min(b),
MinMax::Maximum => a.maximum(b),
MinMax::MaxNum => a.max(b),
MinMax::MaximumNumber => a.max(b),
};
self.adjust_nan(result, &[a, b])
};
Expand Down
12 changes: 6 additions & 6 deletions compiler/rustc_const_eval/src/interpret/intrinsics/simd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -202,8 +202,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
sym::simd_le => Op::MirOp(BinOp::Le),
sym::simd_gt => Op::MirOp(BinOp::Gt),
sym::simd_ge => Op::MirOp(BinOp::Ge),
sym::simd_fmax => Op::FMinMax(MinMax::MaxNum),
sym::simd_fmin => Op::FMinMax(MinMax::MinNum),
sym::simd_fmax => Op::FMinMax(MinMax::MaximumNumber),
sym::simd_fmin => Op::FMinMax(MinMax::MinimumNumber),
sym::simd_saturating_add => Op::SaturatingOp(BinOp::Add),
sym::simd_saturating_sub => Op::SaturatingOp(BinOp::Sub),
sym::simd_arith_offset => Op::WrappingOffset,
Expand Down Expand Up @@ -295,8 +295,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
sym::simd_reduce_xor => Op::MirOp(BinOp::BitXor),
sym::simd_reduce_any => Op::MirOpBool(BinOp::BitOr),
sym::simd_reduce_all => Op::MirOpBool(BinOp::BitAnd),
sym::simd_reduce_max => Op::MinMax(MinMax::MaxNum),
sym::simd_reduce_min => Op::MinMax(MinMax::MinNum),
sym::simd_reduce_max => Op::MinMax(MinMax::MaximumNumber),
sym::simd_reduce_min => Op::MinMax(MinMax::MinimumNumber),
_ => unreachable!(),
};

Expand All @@ -320,8 +320,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
} else {
// Just boring integers, no NaNs to worry about.
let mirop = match mmop {
MinMax::MinNum | MinMax::Minimum => BinOp::Le,
MinMax::MaxNum | MinMax::Maximum => BinOp::Ge,
MinMax::MinimumNumber | MinMax::Minimum => BinOp::Le,
MinMax::MaximumNumber | MinMax::Maximum => BinOp::Ge,
};
if self.binary_op(mirop, &res, &op)?.to_scalar().to_bool()? {
res
Expand Down
52 changes: 23 additions & 29 deletions src/tools/miri/tests/pass/float.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,29 +48,15 @@ macro_rules! assert_approx_eq {
};
}

/// From IEEE 754 a Signaling NaN for single precision has the following representation:
/// ```
/// s | 1111 1111 | 0x..x
/// ````
/// Were at least one `x` is a 1.
///
/// This sNaN has the following representation and is used for testing purposes.:
/// ```
/// 0 | 1111111 | 01..0
/// ```
const SNAN_F32: f32 = f32::from_bits(0x7fa00000);

/// From IEEE 754 a Signaling NaN for double precision has the following representation:
/// ```
/// s | 1111 1111 111 | 0x..x
/// ````
/// Were at least one `x` is a 1.
///
/// This sNaN has the following representation and is used for testing purposes.:
/// ```
/// 0 | 1111 1111 111 | 01..0
/// ```
const SNAN_F64: f64 = f64::from_bits(0x7ff4000000000000);
/// We turn the quiet NaN f*::NAN into a signaling one by flipping the first (most significant)
/// two bits of the mantissa. For this we have to shift by `MANTISSA_DIGITS-3` because:
/// we subtract 1 as the actual mantissa is 1 bit smaller, and 2 more as that's the width
/// if the value we are shifting.
const F16_SNAN: f16 = f16::from_bits(f16::NAN.to_bits() ^ (0b11 << (f16::MANTISSA_DIGITS - 3)));
const F32_SNAN: f32 = f32::from_bits(f32::NAN.to_bits() ^ (0b11 << (f32::MANTISSA_DIGITS - 3)));
const F64_SNAN: f64 = f64::from_bits(f64::NAN.to_bits() ^ (0b11 << (f64::MANTISSA_DIGITS - 3)));
const F128_SNAN: f128 =
f128::from_bits(f128::NAN.to_bits() ^ (0b11 << (f128::MANTISSA_DIGITS - 3)));

fn main() {
basic();
Expand Down Expand Up @@ -757,6 +743,8 @@ fn ops() {
assert_eq(f16::NAN.max(-9.0), -9.0);
assert_eq((9.0_f16).min(f16::NAN), 9.0);
assert_eq((-9.0_f16).max(f16::NAN), -9.0);
assert_eq(F16_SNAN.min(9.0), 9.0);
assert_eq((-9.0_f16).max(F16_SNAN), -9.0);

// f32 min/max
assert_eq((1.0 as f32).max(-1.0), 1.0);
Expand All @@ -765,6 +753,8 @@ fn ops() {
assert_eq(f32::NAN.max(-9.0), -9.0);
assert_eq((9.0 as f32).min(f32::NAN), 9.0);
assert_eq((-9.0 as f32).max(f32::NAN), -9.0);
assert_eq(F32_SNAN.min(9.0), 9.0);
assert_eq((-9.0_f32).max(F32_SNAN), -9.0);

// f64 min/max
assert_eq((1.0 as f64).max(-1.0), 1.0);
Expand All @@ -773,6 +763,8 @@ fn ops() {
assert_eq(f64::NAN.max(-9.0), -9.0);
assert_eq((9.0 as f64).min(f64::NAN), 9.0);
assert_eq((-9.0 as f64).max(f64::NAN), -9.0);
assert_eq(F64_SNAN.min(9.0), 9.0);
assert_eq((-9.0_f64).max(F64_SNAN), -9.0);

// f128 min/max
assert_eq((1.0_f128).max(-1.0), 1.0);
Expand All @@ -781,6 +773,8 @@ fn ops() {
assert_eq(f128::NAN.max(-9.0), -9.0);
assert_eq((9.0_f128).min(f128::NAN), 9.0);
assert_eq((-9.0_f128).max(f128::NAN), -9.0);
assert_eq(F128_SNAN.min(9.0), 9.0);
assert_eq((-9.0_f128).max(F128_SNAN), -9.0);

// f16 copysign
assert_eq(3.5_f16.copysign(0.42), 3.5_f16);
Expand Down Expand Up @@ -1548,15 +1542,15 @@ fn test_non_determinism() {
test_operations_f128(25., 18.);

// SNaN^0 = (1 | NaN)
check_nondet(|| f32::powf(SNAN_F32, 0.0).is_nan());
check_nondet(|| f64::powf(SNAN_F64, 0.0).is_nan());
check_nondet(|| f32::powf(F32_SNAN, 0.0).is_nan());
check_nondet(|| f64::powf(F64_SNAN, 0.0).is_nan());

// 1^SNaN = (1 | NaN)
check_nondet(|| f32::powf(1.0, SNAN_F32).is_nan());
check_nondet(|| f64::powf(1.0, SNAN_F64).is_nan());
check_nondet(|| f32::powf(1.0, F32_SNAN).is_nan());
check_nondet(|| f64::powf(1.0, F64_SNAN).is_nan());

// same as powf (keep it consistent):
// x^SNaN = (1 | NaN)
check_nondet(|| f32::powi(SNAN_F32, 0).is_nan());
check_nondet(|| f64::powi(SNAN_F64, 0).is_nan());
check_nondet(|| f32::powi(F32_SNAN, 0).is_nan());
check_nondet(|| f64::powi(F64_SNAN, 0).is_nan());
}
Loading