blob: 8f2131d32c992dc057b0416e15da506b35b21779 [file] [log] [blame]
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <algorithm>
#include <array>
#include <cmath>
#include <cstdint>
#include <cstring>
#include <limits>
#include <tuple>
#include <type_traits>
#include <vector>
#include "src/base/bits.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/base/memory.h"
#include "src/base/overflowing-math.h"
#include "src/base/utils/random-number-generator.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/cpu-features.h"
#include "src/codegen/machine-type.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
#include "src/utils/utils.h"
#include "src/utils/vector.h"
#include "src/wasm/compilation-environment.h"
#include "src/wasm/value-type.h"
#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-opcodes.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/flag-utils.h"
#include "test/common/wasm/flag-utils.h"
#include "test/common/wasm/wasm-macro-gen.h"
namespace v8 {
namespace internal {
namespace wasm {
namespace test_run_wasm_simd {
namespace {
using DoubleUnOp = double (*)(double);
using DoubleBinOp = double (*)(double, double);
using DoubleCompareOp = int64_t (*)(double, double);
using FloatUnOp = float (*)(float);
using FloatBinOp = float (*)(float, float);
using FloatCompareOp = int (*)(float, float);
using Int64UnOp = int64_t (*)(int64_t);
using Int64BinOp = int64_t (*)(int64_t, int64_t);
using Int64ShiftOp = int64_t (*)(int64_t, int);
using Int32UnOp = int32_t (*)(int32_t);
using Int32BinOp = int32_t (*)(int32_t, int32_t);
using Int32CompareOp = int (*)(int32_t, int32_t);
using Int32ShiftOp = int32_t (*)(int32_t, int);
using Int16UnOp = int16_t (*)(int16_t);
using Int16BinOp = int16_t (*)(int16_t, int16_t);
using Int16CompareOp = int (*)(int16_t, int16_t);
using Int16ShiftOp = int16_t (*)(int16_t, int);
using Int8UnOp = int8_t (*)(int8_t);
using Int8BinOp = int8_t (*)(int8_t, int8_t);
using Int8CompareOp = int (*)(int8_t, int8_t);
using Int8ShiftOp = int8_t (*)(int8_t, int);
#define WASM_SIMD_TEST(name) \
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
TestExecutionTier execution_tier); \
TEST(RunWasm_##name##_turbofan) { \
EXPERIMENTAL_FLAG_SCOPE(simd); \
RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kTurbofan); \
} \
TEST(RunWasm_##name##_liftoff) { \
EXPERIMENTAL_FLAG_SCOPE(simd); \
RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kLiftoff); \
} \
TEST(RunWasm_##name##_interpreter) { \
EXPERIMENTAL_FLAG_SCOPE(simd); \
RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kInterpreter); \
} \
TEST(RunWasm_##name##_simd_lowered) { \
EXPERIMENTAL_FLAG_SCOPE(simd); \
RunWasm_##name##_Impl(kLowerSimd, TestExecutionTier::kTurbofan); \
} \
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
TestExecutionTier execution_tier)
// Generic expected value functions.
template <typename T, typename = typename std::enable_if<
std::is_floating_point<T>::value>::type>
T Negate(T a) {
return -a;
}
// For signed integral types, use base::AddWithWraparound.
template <typename T, typename = typename std::enable_if<
std::is_floating_point<T>::value>::type>
T Add(T a, T b) {
return a + b;
}
// For signed integral types, use base::SubWithWraparound.
template <typename T, typename = typename std::enable_if<
std::is_floating_point<T>::value>::type>
T Sub(T a, T b) {
return a - b;
}
// For signed integral types, use base::MulWithWraparound.
template <typename T, typename = typename std::enable_if<
std::is_floating_point<T>::value>::type>
T Mul(T a, T b) {
return a * b;
}
template <typename T>
T Minimum(T a, T b) {
return std::min(a, b);
}
template <typename T>
T Maximum(T a, T b) {
return std::max(a, b);
}
template <typename T>
T UnsignedMinimum(T a, T b) {
using UnsignedT = typename std::make_unsigned<T>::type;
return static_cast<UnsignedT>(a) <= static_cast<UnsignedT>(b) ? a : b;
}
template <typename T>
T UnsignedMaximum(T a, T b) {
using UnsignedT = typename std::make_unsigned<T>::type;
return static_cast<UnsignedT>(a) >= static_cast<UnsignedT>(b) ? a : b;
}
int Equal(float a, float b) { return a == b ? -1 : 0; }
template <typename T>
T Equal(T a, T b) {
return a == b ? -1 : 0;
}
int NotEqual(float a, float b) { return a != b ? -1 : 0; }
template <typename T>
T NotEqual(T a, T b) {
return a != b ? -1 : 0;
}
int Less(float a, float b) { return a < b ? -1 : 0; }
template <typename T>
T Less(T a, T b) {
return a < b ? -1 : 0;
}
int LessEqual(float a, float b) { return a <= b ? -1 : 0; }
template <typename T>
T LessEqual(T a, T b) {
return a <= b ? -1 : 0;
}
int Greater(float a, float b) { return a > b ? -1 : 0; }
template <typename T>
T Greater(T a, T b) {
return a > b ? -1 : 0;
}
int GreaterEqual(float a, float b) { return a >= b ? -1 : 0; }
template <typename T>
T GreaterEqual(T a, T b) {
return a >= b ? -1 : 0;
}
template <typename T>
T UnsignedLess(T a, T b) {
using UnsignedT = typename std::make_unsigned<T>::type;
return static_cast<UnsignedT>(a) < static_cast<UnsignedT>(b) ? -1 : 0;
}
template <typename T>
T UnsignedLessEqual(T a, T b) {
using UnsignedT = typename std::make_unsigned<T>::type;
return static_cast<UnsignedT>(a) <= static_cast<UnsignedT>(b) ? -1 : 0;
}
template <typename T>
T UnsignedGreater(T a, T b) {
using UnsignedT = typename std::make_unsigned<T>::type;
return static_cast<UnsignedT>(a) > static_cast<UnsignedT>(b) ? -1 : 0;
}
template <typename T>
T UnsignedGreaterEqual(T a, T b) {
using UnsignedT = typename std::make_unsigned<T>::type;
return static_cast<UnsignedT>(a) >= static_cast<UnsignedT>(b) ? -1 : 0;
}
template <typename T>
T LogicalShiftLeft(T a, int shift) {
using UnsignedT = typename std::make_unsigned<T>::type;
return static_cast<UnsignedT>(a) << (shift % (sizeof(T) * 8));
}
template <typename T>
T LogicalShiftRight(T a, int shift) {
using UnsignedT = typename std::make_unsigned<T>::type;
return static_cast<UnsignedT>(a) >> (shift % (sizeof(T) * 8));
}
// Define our own ArithmeticShiftRight instead of using the one from utils.h
// because the shift amount needs to be taken modulo lane width.
template <typename T>
T ArithmeticShiftRight(T a, int shift) {
return a >> (shift % (sizeof(T) * 8));
}
template <typename T>
T Abs(T a) {
return std::abs(a);
}
// only used for F64x2 tests below
int64_t Equal(double a, double b) { return a == b ? -1 : 0; }
int64_t NotEqual(double a, double b) { return a != b ? -1 : 0; }
int64_t Greater(double a, double b) { return a > b ? -1 : 0; }
int64_t GreaterEqual(double a, double b) { return a >= b ? -1 : 0; }
int64_t Less(double a, double b) { return a < b ? -1 : 0; }
int64_t LessEqual(double a, double b) { return a <= b ? -1 : 0; }
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
// Only used for qfma and qfms tests below.
// FMOperation holds the params (a, b, c) for a Multiply-Add or
// Multiply-Subtract operation, and the expected result if the operation was
// fused, rounded only once for the entire operation, or unfused, rounded after
// multiply and again after add/subtract.
template <typename T>
struct FMOperation {
const T a;
const T b;
const T c;
const T fused_result;
const T unfused_result;
};
// large_n is large number that overflows T when multiplied by itself, this is a
// useful constant to test fused/unfused behavior.
template <typename T>
constexpr T large_n = T(0);
template <>
constexpr double large_n<double> = 1e200;
template <>
constexpr float large_n<float> = 1e20;
// Fused Multiply-Add performs a + b * c.
template <typename T>
static constexpr FMOperation<T> qfma_array[] = {
{1.0f, 2.0f, 3.0f, 7.0f, 7.0f},
// fused: a + b * c = -inf + (positive overflow) = -inf
// unfused: a + b * c = -inf + inf = NaN
{-std::numeric_limits<T>::infinity(), large_n<T>, large_n<T>,
-std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
// fused: a + b * c = inf + (negative overflow) = inf
// unfused: a + b * c = inf + -inf = NaN
{std::numeric_limits<T>::infinity(), -large_n<T>, large_n<T>,
std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
// NaN
{std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()},
// -NaN
{-std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()}};
template <typename T>
static constexpr Vector<const FMOperation<T>> qfma_vector() {
return ArrayVector(qfma_array<T>);
}
// Fused Multiply-Subtract performs a - b * c.
template <typename T>
static constexpr FMOperation<T> qfms_array[]{
{1.0f, 2.0f, 3.0f, -5.0f, -5.0f},
// fused: a - b * c = inf - (positive overflow) = inf
// unfused: a - b * c = inf - inf = NaN
{std::numeric_limits<T>::infinity(), large_n<T>, large_n<T>,
std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
// fused: a - b * c = -inf - (negative overflow) = -inf
// unfused: a - b * c = -inf - -inf = NaN
{-std::numeric_limits<T>::infinity(), -large_n<T>, large_n<T>,
-std::numeric_limits<T>::infinity(), std::numeric_limits<T>::quiet_NaN()},
// NaN
{std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()},
// -NaN
{-std::numeric_limits<T>::quiet_NaN(), 2.0f, 3.0f,
std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()}};
template <typename T>
static constexpr Vector<const FMOperation<T>> qfms_vector() {
return ArrayVector(qfms_array<T>);
}
// Fused results only when fma3 feature is enabled, and running on TurboFan or
// Liftoff (which can fall back to TurboFan if FMA is not implemented).
bool ExpectFused(TestExecutionTier tier) {
#ifdef V8_TARGET_ARCH_X64
return CpuFeatures::IsSupported(FMA3) &&
(tier == TestExecutionTier::kTurbofan ||
tier == TestExecutionTier::kLiftoff);
#else
return (tier == TestExecutionTier::kTurbofan ||
tier == TestExecutionTier::kLiftoff);
#endif
}
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
} // namespace
#define WASM_SIMD_CHECK_LANE_S(TYPE, value, LANE_TYPE, lane_value, lane_index) \
WASM_IF(WASM_##LANE_TYPE##_NE(WASM_GET_LOCAL(lane_value), \
WASM_SIMD_##TYPE##_EXTRACT_LANE( \
lane_index, WASM_GET_LOCAL(value))), \
WASM_RETURN1(WASM_ZERO))
// Unsigned Extracts are only available for I8x16, I16x8 types
#define WASM_SIMD_CHECK_LANE_U(TYPE, value, LANE_TYPE, lane_value, lane_index) \
WASM_IF(WASM_##LANE_TYPE##_NE(WASM_GET_LOCAL(lane_value), \
WASM_SIMD_##TYPE##_EXTRACT_LANE_U( \
lane_index, WASM_GET_LOCAL(value))), \
WASM_RETURN1(WASM_ZERO))
// The macro below disables tests lowering for certain nodes where the simd
// lowering doesn't work correctly. Early return here if the CPU does not
// support SIMD as the graph will be implicitly lowered in that case.
#define WASM_SIMD_TEST_NO_LOWERING(name) \
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
TestExecutionTier execution_tier); \
TEST(RunWasm_##name##_turbofan) { \
if (!CpuFeatures::SupportsWasmSimd128()) return; \
EXPERIMENTAL_FLAG_SCOPE(simd); \
RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kTurbofan); \
} \
TEST(RunWasm_##name##_liftoff) { \
if (!CpuFeatures::SupportsWasmSimd128()) return; \
EXPERIMENTAL_FLAG_SCOPE(simd); \
RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kLiftoff); \
} \
TEST(RunWasm_##name##_interpreter) { \
EXPERIMENTAL_FLAG_SCOPE(simd); \
RunWasm_##name##_Impl(kNoLowerSimd, TestExecutionTier::kInterpreter); \
} \
void RunWasm_##name##_Impl(LowerSimd lower_simd, \
TestExecutionTier execution_tier)
// Returns true if the platform can represent the result.
template <typename T>
bool PlatformCanRepresent(T x) {
#if V8_TARGET_ARCH_ARM
return std::fpclassify(x) != FP_SUBNORMAL;
#else
return true;
#endif
}
// Returns true for very small and very large numbers. We skip these test
// values for the approximation instructions, which don't work at the extremes.
bool IsExtreme(float x) {
float abs_x = std::fabs(x);
const float kSmallFloatThreshold = 1.0e-32f;
const float kLargeFloatThreshold = 1.0e32f;
return abs_x != 0.0f && // 0 or -0 are fine.
(abs_x < kSmallFloatThreshold || abs_x > kLargeFloatThreshold);
}
#if V8_OS_AIX
template <typename T>
bool MightReverseSign(T float_op) {
return float_op == static_cast<T>(Negate) ||
float_op == static_cast<T>(std::abs);
}
#endif
WASM_SIMD_TEST(S128Globals) {
WasmRunner<int32_t> r(execution_tier, lower_simd);
// Set up a global to hold input and output vectors.
int32_t* g0 = r.builder().AddGlobal<int32_t>(kWasmS128);
int32_t* g1 = r.builder().AddGlobal<int32_t>(kWasmS128);
BUILD(r, WASM_SET_GLOBAL(1, WASM_GET_GLOBAL(0)), WASM_ONE);
FOR_INT32_INPUTS(x) {
for (int i = 0; i < 4; i++) {
WriteLittleEndianValue<int32_t>(&g0[i], x);
}
r.Call();
int32_t expected = x;
for (int i = 0; i < 4; i++) {
int32_t actual = ReadLittleEndianValue<int32_t>(&g1[i]);
CHECK_EQ(actual, expected);
}
}
}
WASM_SIMD_TEST(F32x4Splat) {
WasmRunner<int32_t, float> r(execution_tier, lower_simd);
// Set up a global to hold output vector.
float* g = r.builder().AddGlobal<float>(kWasmS128);
byte param1 = 0;
BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(param1))),
WASM_ONE);
FOR_FLOAT32_INPUTS(x) {
r.Call(x);
float expected = x;
for (int i = 0; i < 4; i++) {
float actual = ReadLittleEndianValue<float>(&g[i]);
if (std::isnan(expected)) {
CHECK(std::isnan(actual));
} else {
CHECK_EQ(actual, expected);
}
}
}
}
WASM_SIMD_TEST(F32x4ReplaceLane) {
WasmRunner<int32_t> r(execution_tier, lower_simd);
// Set up a global to hold input/output vector.
float* g = r.builder().AddGlobal<float>(kWasmS128);
// Build function to replace each lane with its (FP) index.
byte temp1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F32x4_SPLAT(WASM_F32(3.14159f))),
WASM_SET_LOCAL(temp1, WASM_SIMD_F32x4_REPLACE_LANE(
0, WASM_GET_LOCAL(temp1), WASM_F32(0.0f))),
WASM_SET_LOCAL(temp1, WASM_SIMD_F32x4_REPLACE_LANE(
1, WASM_GET_LOCAL(temp1), WASM_F32(1.0f))),
WASM_SET_LOCAL(temp1, WASM_SIMD_F32x4_REPLACE_LANE(
2, WASM_GET_LOCAL(temp1), WASM_F32(2.0f))),
WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_REPLACE_LANE(
3, WASM_GET_LOCAL(temp1), WASM_F32(3.0f))),
WASM_ONE);
r.Call();
for (int i = 0; i < 4; i++) {
CHECK_EQ(static_cast<float>(i), ReadLittleEndianValue<float>(&g[i]));
}
}
// Tests both signed and unsigned conversion.
WASM_SIMD_TEST(F32x4ConvertI32x4) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Create two output vectors to hold signed and unsigned results.
float* g0 = r.builder().AddGlobal<float>(kWasmS128);
float* g1 = r.builder().AddGlobal<float>(kWasmS128);
// Build fn to splat test value, perform conversions, and write the results.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value))),
WASM_SET_GLOBAL(
0, WASM_SIMD_UNOP(kExprF32x4SConvertI32x4, WASM_GET_LOCAL(temp1))),
WASM_SET_GLOBAL(
1, WASM_SIMD_UNOP(kExprF32x4UConvertI32x4, WASM_GET_LOCAL(temp1))),
WASM_ONE);
FOR_INT32_INPUTS(x) {
r.Call(x);
float expected_signed = static_cast<float>(x);
float expected_unsigned = static_cast<float>(static_cast<uint32_t>(x));
for (int i = 0; i < 4; i++) {
CHECK_EQ(expected_signed, ReadLittleEndianValue<float>(&g0[i]));
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<float>(&g1[i]));
}
}
}
bool IsSameNan(float expected, float actual) {
// Sign is non-deterministic.
uint32_t expected_bits = bit_cast<uint32_t>(expected) & ~0x80000000;
uint32_t actual_bits = bit_cast<uint32_t>(actual) & ~0x80000000;
// Some implementations convert signaling NaNs to quiet NaNs.
return (expected_bits == actual_bits) ||
((expected_bits | 0x00400000) == actual_bits);
}
bool IsCanonical(float actual) {
uint32_t actual_bits = bit_cast<uint32_t>(actual);
// Canonical NaN has quiet bit and no payload.
return (actual_bits & 0xFFC00000) == actual_bits;
}
void CheckFloatResult(float x, float y, float expected, float actual,
bool exact = true) {
if (std::isnan(expected)) {
CHECK(std::isnan(actual));
if (std::isnan(x) && IsSameNan(x, actual)) return;
if (std::isnan(y) && IsSameNan(y, actual)) return;
if (IsSameNan(expected, actual)) return;
if (IsCanonical(actual)) return;
// This is expected to assert; it's useful for debugging.
CHECK_EQ(bit_cast<uint32_t>(expected), bit_cast<uint32_t>(actual));
} else {
if (exact) {
CHECK_EQ(expected, actual);
// The sign of 0's must match.
CHECK_EQ(std::signbit(expected), std::signbit(actual));
return;
}
// Otherwise, perform an approximate equality test. First check for
// equality to handle +/-Infinity where approximate equality doesn't work.
if (expected == actual) return;
// 1% error allows all platforms to pass easily.
constexpr float kApproximationError = 0.01f;
float abs_error = std::abs(expected) * kApproximationError,
min = expected - abs_error, max = expected + abs_error;
CHECK_LE(min, actual);
CHECK_GE(max, actual);
}
}
// Test some values not included in the float inputs from value_helper. These
// tests are useful for opcodes that are synthesized during code gen, like Min
// and Max on ia32 and x64.
static constexpr uint32_t nan_test_array[] = {
// Bit patterns of quiet NaNs and signaling NaNs, with or without
// additional payload.
0x7FC00000, 0xFFC00000, 0x7FFFFFFF, 0xFFFFFFFF, 0x7F876543, 0xFF876543,
// NaN with top payload bit unset.
0x7FA00000,
// Both Infinities.
0x7F800000, 0xFF800000,
// Some "normal" numbers, 1 and -1.
0x3F800000, 0xBF800000};
#define FOR_FLOAT32_NAN_INPUTS(i) \
for (size_t i = 0; i < arraysize(nan_test_array); ++i)
void RunF32x4UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, FloatUnOp expected_op,
bool exact = true) {
WasmRunner<int32_t, float> r(execution_tier, lower_simd);
// Global to hold output.
float* g = r.builder().AddGlobal<float>(kWasmS128);
// Build fn to splat test value, perform unop, and write the result.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value))),
WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(opcode, WASM_GET_LOCAL(temp1))),
WASM_ONE);
FOR_FLOAT32_INPUTS(x) {
if (!PlatformCanRepresent(x)) continue;
// Extreme values have larger errors so skip them for approximation tests.
if (!exact && IsExtreme(x)) continue;
float expected = expected_op(x);
#if V8_OS_AIX
if (!MightReverseSign<FloatUnOp>(expected_op))
expected = FpOpWorkaround<float>(x, expected);
#endif
if (!PlatformCanRepresent(expected)) continue;
r.Call(x);
for (int i = 0; i < 4; i++) {
float actual = ReadLittleEndianValue<float>(&g[i]);
CheckFloatResult(x, x, expected, actual, exact);
}
}
FOR_FLOAT32_NAN_INPUTS(i) {
float x = bit_cast<float>(nan_test_array[i]);
if (!PlatformCanRepresent(x)) continue;
// Extreme values have larger errors so skip them for approximation tests.
if (!exact && IsExtreme(x)) continue;
float expected = expected_op(x);
if (!PlatformCanRepresent(expected)) continue;
r.Call(x);
for (int i = 0; i < 4; i++) {
float actual = ReadLittleEndianValue<float>(&g[i]);
CheckFloatResult(x, x, expected, actual, exact);
}
}
}
WASM_SIMD_TEST(F32x4Abs) {
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Abs, std::abs);
}
WASM_SIMD_TEST(F32x4Neg) {
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Neg, Negate);
}
WASM_SIMD_TEST(F32x4Sqrt) {
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Sqrt, std::sqrt);
}
WASM_SIMD_TEST(F32x4RecipApprox) {
FLAG_SCOPE(wasm_simd_post_mvp);
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipApprox,
base::Recip, false /* !exact */);
}
WASM_SIMD_TEST(F32x4RecipSqrtApprox) {
FLAG_SCOPE(wasm_simd_post_mvp);
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4RecipSqrtApprox,
base::RecipSqrt, false /* !exact */);
}
WASM_SIMD_TEST(F32x4Ceil) {
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Ceil, ceilf, true);
}
WASM_SIMD_TEST(F32x4Floor) {
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Floor, floorf, true);
}
WASM_SIMD_TEST(F32x4Trunc) {
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4Trunc, truncf, true);
}
WASM_SIMD_TEST(F32x4NearestInt) {
RunF32x4UnOpTest(execution_tier, lower_simd, kExprF32x4NearestInt, nearbyintf,
true);
}
void RunF32x4BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, FloatBinOp expected_op) {
WasmRunner<int32_t, float, float> r(execution_tier, lower_simd);
// Global to hold output.
float* g = r.builder().AddGlobal<float>(kWasmS128);
// Build fn to splat test values, perform binop, and write the result.
byte value1 = 0, value2 = 1;
byte temp1 = r.AllocateLocal(kWasmS128);
byte temp2 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value1))),
WASM_SET_LOCAL(temp2, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value2))),
WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
WASM_GET_LOCAL(temp2))),
WASM_ONE);
FOR_FLOAT32_INPUTS(x) {
if (!PlatformCanRepresent(x)) continue;
FOR_FLOAT32_INPUTS(y) {
if (!PlatformCanRepresent(y)) continue;
float expected = expected_op(x, y);
if (!PlatformCanRepresent(expected)) continue;
r.Call(x, y);
for (int i = 0; i < 4; i++) {
float actual = ReadLittleEndianValue<float>(&g[i]);
CheckFloatResult(x, y, expected, actual, true /* exact */);
}
}
}
FOR_FLOAT32_NAN_INPUTS(i) {
float x = bit_cast<float>(nan_test_array[i]);
if (!PlatformCanRepresent(x)) continue;
FOR_FLOAT32_NAN_INPUTS(j) {
float y = bit_cast<float>(nan_test_array[j]);
if (!PlatformCanRepresent(y)) continue;
float expected = expected_op(x, y);
if (!PlatformCanRepresent(expected)) continue;
r.Call(x, y);
for (int i = 0; i < 4; i++) {
float actual = ReadLittleEndianValue<float>(&g[i]);
CheckFloatResult(x, y, expected, actual, true /* exact */);
}
}
}
}
#undef FOR_FLOAT32_NAN_INPUTS
WASM_SIMD_TEST(F32x4Add) {
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Add, Add);
}
WASM_SIMD_TEST(F32x4Sub) {
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Sub, Sub);
}
WASM_SIMD_TEST(F32x4Mul) {
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Mul, Mul);
}
WASM_SIMD_TEST(F32x4Div) {
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Div, base::Divide);
}
WASM_SIMD_TEST(F32x4Min) {
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Min, JSMin);
}
WASM_SIMD_TEST(F32x4Max) {
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Max, JSMax);
}
WASM_SIMD_TEST(F32x4Pmin) {
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Pmin, Minimum);
}
WASM_SIMD_TEST(F32x4Pmax) {
RunF32x4BinOpTest(execution_tier, lower_simd, kExprF32x4Pmax, Maximum);
}
void RunF32x4CompareOpTest(TestExecutionTier execution_tier,
LowerSimd lower_simd, WasmOpcode opcode,
FloatCompareOp expected_op) {
WasmRunner<int32_t, float, float> r(execution_tier, lower_simd);
// Set up global to hold mask output.
int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
// Build fn to splat test values, perform compare op, and write the result.
byte value1 = 0, value2 = 1;
byte temp1 = r.AllocateLocal(kWasmS128);
byte temp2 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value1))),
WASM_SET_LOCAL(temp2, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value2))),
WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
WASM_GET_LOCAL(temp2))),
WASM_ONE);
FOR_FLOAT32_INPUTS(x) {
if (!PlatformCanRepresent(x)) continue;
FOR_FLOAT32_INPUTS(y) {
if (!PlatformCanRepresent(y)) continue;
float diff = x - y; // Model comparison as subtraction.
if (!PlatformCanRepresent(diff)) continue;
r.Call(x, y);
int32_t expected = expected_op(x, y);
for (int i = 0; i < 4; i++) {
CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
}
}
}
}
WASM_SIMD_TEST(F32x4Eq) {
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Eq, Equal);
}
WASM_SIMD_TEST(F32x4Ne) {
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Ne, NotEqual);
}
WASM_SIMD_TEST(F32x4Gt) {
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Gt, Greater);
}
WASM_SIMD_TEST(F32x4Ge) {
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Ge, GreaterEqual);
}
WASM_SIMD_TEST(F32x4Lt) {
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Lt, Less);
}
WASM_SIMD_TEST(F32x4Le) {
RunF32x4CompareOpTest(execution_tier, lower_simd, kExprF32x4Le, LessEqual);
}
#if V8_TARGET_ARCH_X64
// TODO(v8:10983) Prototyping sign select.
template <typename T>
void RunSignSelect(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode signselect, WasmOpcode splat,
std::array<int8_t, kSimd128Size> mask) {
FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, T, T> r(execution_tier, lower_simd);
T* output = r.builder().template AddGlobal<T>(kWasmS128);
// Splat 2 constant values, then use a mask that selects alternate lanes.
BUILD(r, WASM_GET_LOCAL(0), WASM_SIMD_OP(splat), WASM_GET_LOCAL(1),
WASM_SIMD_OP(splat), WASM_SIMD_CONSTANT(mask), WASM_SIMD_OP(signselect),
kExprGlobalSet, 0, WASM_ONE);
r.Call(1, 2);
constexpr int lanes = kSimd128Size / sizeof(T);
for (int i = 0; i < lanes; i += 2) {
CHECK_EQ(1, ReadLittleEndianValue<T>(&output[i]));
}
for (int i = 1; i < lanes; i += 2) {
CHECK_EQ(2, ReadLittleEndianValue<T>(&output[i]));
}
}
WASM_SIMD_TEST_NO_LOWERING(I8x16SignSelect) {
std::array<int8_t, kSimd128Size> mask = {0x80, 0, -1, 0, 0x80, 0, -1, 0,
0x80, 0, -1, 0, 0x80, 0, -1, 0};
RunSignSelect<int8_t>(execution_tier, lower_simd, kExprI8x16SignSelect,
kExprI8x16Splat, mask);
}
WASM_SIMD_TEST_NO_LOWERING(I16x8SignSelect) {
std::array<int16_t, kSimd128Size / 2> selection = {0x8000, 0, -1, 0,
0x8000, 0, -1, 0};
std::array<int8_t, kSimd128Size> mask;
memcpy(mask.data(), selection.data(), kSimd128Size);
RunSignSelect<int16_t>(execution_tier, lower_simd, kExprI16x8SignSelect,
kExprI16x8Splat, mask);
}
WASM_SIMD_TEST_NO_LOWERING(I32x4SignSelect) {
std::array<int32_t, kSimd128Size / 4> selection = {0x80000000, 0, -1, 0};
std::array<int8_t, kSimd128Size> mask;
memcpy(mask.data(), selection.data(), kSimd128Size);
RunSignSelect<int32_t>(execution_tier, lower_simd, kExprI32x4SignSelect,
kExprI32x4Splat, mask);
}
WASM_SIMD_TEST_NO_LOWERING(I64x2SignSelect) {
std::array<int64_t, kSimd128Size / 8> selection = {0x8000000000000000, 0};
std::array<int8_t, kSimd128Size> mask;
memcpy(mask.data(), selection.data(), kSimd128Size);
RunSignSelect<int64_t>(execution_tier, lower_simd, kExprI64x2SignSelect,
kExprI64x2Splat, mask);
}
#endif // V8_TARGET_ARCH_X64
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
WASM_SIMD_TEST_NO_LOWERING(F32x4Qfma) {
FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, float, float, float> r(execution_tier, lower_simd);
// Set up global to hold mask output.
float* g = r.builder().AddGlobal<float>(kWasmS128);
// Build fn to splat test values, perform compare op, and write the result.
byte value1 = 0, value2 = 1, value3 = 2;
BUILD(r,
WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_QFMA(
WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value1)),
WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value2)),
WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value3)))),
WASM_ONE);
for (FMOperation<float> x : qfma_vector<float>()) {
r.Call(x.a, x.b, x.c);
float expected =
ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
for (int i = 0; i < 4; i++) {
float actual = ReadLittleEndianValue<float>(&g[i]);
CheckFloatResult(x.a, x.b, expected, actual, true /* exact */);
}
}
}
WASM_SIMD_TEST_NO_LOWERING(F32x4Qfms) {
FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, float, float, float> r(execution_tier, lower_simd);
// Set up global to hold mask output.
float* g = r.builder().AddGlobal<float>(kWasmS128);
// Build fn to splat test values, perform compare op, and write the result.
byte value1 = 0, value2 = 1, value3 = 2;
BUILD(r,
WASM_SET_GLOBAL(0, WASM_SIMD_F32x4_QFMS(
WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value1)),
WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value2)),
WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value3)))),
WASM_ONE);
for (FMOperation<float> x : qfms_vector<float>()) {
r.Call(x.a, x.b, x.c);
float expected =
ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
for (int i = 0; i < 4; i++) {
float actual = ReadLittleEndianValue<float>(&g[i]);
CheckFloatResult(x.a, x.b, expected, actual, true /* exact */);
}
}
}
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
WASM_SIMD_TEST(I64x2Splat) {
WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
// Set up a global to hold output vector.
int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
byte param1 = 0;
BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(param1))),
WASM_ONE);
FOR_INT64_INPUTS(x) {
r.Call(x);
int64_t expected = x;
for (int i = 0; i < 2; i++) {
int64_t actual = ReadLittleEndianValue<int64_t>(&g[i]);
CHECK_EQ(actual, expected);
}
}
}
WASM_SIMD_TEST(I64x2ExtractLane) {
WasmRunner<int64_t> r(execution_tier, lower_simd);
r.AllocateLocal(kWasmI64);
r.AllocateLocal(kWasmS128);
BUILD(
r,
WASM_SET_LOCAL(0, WASM_SIMD_I64x2_EXTRACT_LANE(
0, WASM_SIMD_I64x2_SPLAT(WASM_I64V(0xFFFFFFFFFF)))),
WASM_SET_LOCAL(1, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(0))),
WASM_SIMD_I64x2_EXTRACT_LANE(1, WASM_GET_LOCAL(1)));
CHECK_EQ(0xFFFFFFFFFF, r.Call());
}
WASM_SIMD_TEST(I64x2ReplaceLane) {
WasmRunner<int32_t> r(execution_tier, lower_simd);
// Set up a global to hold input/output vector.
int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
// Build function to replace each lane with its index.
byte temp1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_SPLAT(WASM_I64V(-1))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_REPLACE_LANE(
0, WASM_GET_LOCAL(temp1), WASM_I64V(0))),
WASM_SET_GLOBAL(0, WASM_SIMD_I64x2_REPLACE_LANE(
1, WASM_GET_LOCAL(temp1), WASM_I64V(1))),
WASM_ONE);
r.Call();
for (int64_t i = 0; i < 2; i++) {
CHECK_EQ(i, ReadLittleEndianValue<int64_t>(&g[i]));
}
}
void RunI64x2UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int64UnOp expected_op) {
WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
// Global to hold output.
int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
// Build fn to splat test value, perform unop, and write the result.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value))),
WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(opcode, WASM_GET_LOCAL(temp1))),
WASM_ONE);
FOR_INT64_INPUTS(x) {
r.Call(x);
int64_t expected = expected_op(x);
for (int i = 0; i < 2; i++) {
CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
}
}
}
WASM_SIMD_TEST(I64x2Neg) {
RunI64x2UnOpTest(execution_tier, lower_simd, kExprI64x2Neg,
base::NegateWithWraparound);
}
void RunI64x2ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int64ShiftOp expected_op) {
// Intentionally shift by 64, should be no-op.
for (int shift = 1; shift <= 64; shift++) {
WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
int64_t* g_imm = r.builder().AddGlobal<int64_t>(kWasmS128);
int64_t* g_mem = r.builder().AddGlobal<int64_t>(kWasmS128);
byte value = 0;
byte simd = r.AllocateLocal(kWasmS128);
// Shift using an immediate, and shift using a value loaded from memory.
BUILD(
r, WASM_SET_LOCAL(simd, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value))),
WASM_SET_GLOBAL(0, WASM_SIMD_SHIFT_OP(opcode, WASM_GET_LOCAL(simd),
WASM_I32V(shift))),
WASM_SET_GLOBAL(1, WASM_SIMD_SHIFT_OP(
opcode, WASM_GET_LOCAL(simd),
WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
WASM_ONE);
r.builder().WriteMemory(&memory[0], shift);
FOR_INT64_INPUTS(x) {
r.Call(x);
int64_t expected = expected_op(x, shift);
for (int i = 0; i < 2; i++) {
CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g_imm[i]));
CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g_mem[i]));
}
}
}
}
WASM_SIMD_TEST(I64x2Shl) {
RunI64x2ShiftOpTest(execution_tier, lower_simd, kExprI64x2Shl,
LogicalShiftLeft);
}
WASM_SIMD_TEST(I64x2ShrS) {
RunI64x2ShiftOpTest(execution_tier, lower_simd, kExprI64x2ShrS,
ArithmeticShiftRight);
}
WASM_SIMD_TEST(I64x2ShrU) {
RunI64x2ShiftOpTest(execution_tier, lower_simd, kExprI64x2ShrU,
LogicalShiftRight);
}
void RunI64x2BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int64BinOp expected_op) {
FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, int64_t, int64_t> r(execution_tier, lower_simd);
// Global to hold output.
int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
// Build fn to splat test values, perform binop, and write the result.
byte value1 = 0, value2 = 1;
byte temp1 = r.AllocateLocal(kWasmS128);
byte temp2 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value1))),
WASM_SET_LOCAL(temp2, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(value2))),
WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
WASM_GET_LOCAL(temp2))),
WASM_ONE);
FOR_INT64_INPUTS(x) {
FOR_INT64_INPUTS(y) {
r.Call(x, y);
int64_t expected = expected_op(x, y);
for (int i = 0; i < 2; i++) {
CHECK_EQ(expected, ReadLittleEndianValue<int64_t>(&g[i]));
}
}
}
}
WASM_SIMD_TEST(I64x2Add) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Add,
base::AddWithWraparound);
}
WASM_SIMD_TEST(I64x2Sub) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Sub,
base::SubWithWraparound);
}
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
WASM_SIMD_TEST_NO_LOWERING(I64x2Eq) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Eq, Equal);
}
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
WASM_SIMD_TEST(F64x2Splat) {
WasmRunner<int32_t, double> r(execution_tier, lower_simd);
// Set up a global to hold output vector.
double* g = r.builder().AddGlobal<double>(kWasmS128);
byte param1 = 0;
BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(param1))),
WASM_ONE);
FOR_FLOAT64_INPUTS(x) {
r.Call(x);
double expected = x;
for (int i = 0; i < 2; i++) {
double actual = ReadLittleEndianValue<double>(&g[i]);
if (std::isnan(expected)) {
CHECK(std::isnan(actual));
} else {
CHECK_EQ(actual, expected);
}
}
}
}
WASM_SIMD_TEST(F64x2ExtractLane) {
WasmRunner<double, double> r(execution_tier, lower_simd);
byte param1 = 0;
byte temp1 = r.AllocateLocal(kWasmF64);
byte temp2 = r.AllocateLocal(kWasmS128);
BUILD(r,
WASM_SET_LOCAL(temp1,
WASM_SIMD_F64x2_EXTRACT_LANE(
0, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(param1)))),
WASM_SET_LOCAL(temp2, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(temp1))),
WASM_SIMD_F64x2_EXTRACT_LANE(1, WASM_GET_LOCAL(temp2)));
FOR_FLOAT64_INPUTS(x) {
double actual = r.Call(x);
double expected = x;
if (std::isnan(expected)) {
CHECK(std::isnan(actual));
} else {
CHECK_EQ(actual, expected);
}
}
}
WASM_SIMD_TEST(F64x2ReplaceLane) {
WasmRunner<int32_t> r(execution_tier, lower_simd);
// Set up globals to hold input/output vector.
double* g0 = r.builder().AddGlobal<double>(kWasmS128);
double* g1 = r.builder().AddGlobal<double>(kWasmS128);
// Build function to replace each lane with its (FP) index.
byte temp1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F64x2_SPLAT(WASM_F64(1e100))),
// Replace lane 0.
WASM_SET_GLOBAL(0, WASM_SIMD_F64x2_REPLACE_LANE(
0, WASM_GET_LOCAL(temp1), WASM_F64(0.0f))),
// Replace lane 1.
WASM_SET_GLOBAL(1, WASM_SIMD_F64x2_REPLACE_LANE(
1, WASM_GET_LOCAL(temp1), WASM_F64(1.0f))),
WASM_ONE);
r.Call();
CHECK_EQ(0., ReadLittleEndianValue<double>(&g0[0]));
CHECK_EQ(1e100, ReadLittleEndianValue<double>(&g0[1]));
CHECK_EQ(1e100, ReadLittleEndianValue<double>(&g1[0]));
CHECK_EQ(1., ReadLittleEndianValue<double>(&g1[1]));
}
WASM_SIMD_TEST(F64x2ExtractLaneWithI64x2) {
WasmRunner<int64_t> r(execution_tier, lower_simd);
BUILD(r, WASM_IF_ELSE_L(
WASM_F64_EQ(WASM_SIMD_F64x2_EXTRACT_LANE(
0, WASM_SIMD_I64x2_SPLAT(WASM_I64V(1e15))),
WASM_F64_REINTERPRET_I64(WASM_I64V(1e15))),
WASM_I64V(1), WASM_I64V(0)));
CHECK_EQ(1, r.Call());
}
WASM_SIMD_TEST(I64x2ExtractWithF64x2) {
WasmRunner<int64_t> r(execution_tier, lower_simd);
BUILD(r, WASM_IF_ELSE_L(
WASM_I64_EQ(WASM_SIMD_I64x2_EXTRACT_LANE(
0, WASM_SIMD_F64x2_SPLAT(WASM_F64(1e15))),
WASM_I64_REINTERPRET_F64(WASM_F64(1e15))),
WASM_I64V(1), WASM_I64V(0)));
CHECK_EQ(1, r.Call());
}
bool IsExtreme(double x) {
double abs_x = std::fabs(x);
const double kSmallFloatThreshold = 1.0e-298;
const double kLargeFloatThreshold = 1.0e298;
return abs_x != 0.0f && // 0 or -0 are fine.
(abs_x < kSmallFloatThreshold || abs_x > kLargeFloatThreshold);
}
bool IsSameNan(double expected, double actual) {
// Sign is non-deterministic.
uint64_t expected_bits = bit_cast<uint64_t>(expected) & ~0x8000000000000000;
uint64_t actual_bits = bit_cast<uint64_t>(actual) & ~0x8000000000000000;
// Some implementations convert signaling NaNs to quiet NaNs.
return (expected_bits == actual_bits) ||
((expected_bits | 0x0008000000000000) == actual_bits);
}
bool IsCanonical(double actual) {
uint64_t actual_bits = bit_cast<uint64_t>(actual);
// Canonical NaN has quiet bit and no payload.
return (actual_bits & 0xFFF8000000000000) == actual_bits;
}
void CheckDoubleResult(double x, double y, double expected, double actual,
bool exact = true) {
if (std::isnan(expected)) {
CHECK(std::isnan(actual));
if (std::isnan(x) && IsSameNan(x, actual)) return;
if (std::isnan(y) && IsSameNan(y, actual)) return;
if (IsSameNan(expected, actual)) return;
if (IsCanonical(actual)) return;
// This is expected to assert; it's useful for debugging.
CHECK_EQ(bit_cast<uint64_t>(expected), bit_cast<uint64_t>(actual));
} else {
if (exact) {
CHECK_EQ(expected, actual);
// The sign of 0's must match.
CHECK_EQ(std::signbit(expected), std::signbit(actual));
return;
}
// Otherwise, perform an approximate equality test. First check for
// equality to handle +/-Infinity where approximate equality doesn't work.
if (expected == actual) return;
// 1% error allows all platforms to pass easily.
constexpr double kApproximationError = 0.01f;
double abs_error = std::abs(expected) * kApproximationError,
min = expected - abs_error, max = expected + abs_error;
CHECK_LE(min, actual);
CHECK_GE(max, actual);
}
}
// Test some values not included in the double inputs from value_helper. These
// tests are useful for opcodes that are synthesized during code gen, like Min
// and Max on ia32 and x64.
static constexpr uint64_t double_nan_test_array[] = {
// quiet NaNs, + and -
0x7FF8000000000001, 0xFFF8000000000001,
// with payload
0x7FF8000000000011, 0xFFF8000000000011,
// signaling NaNs, + and -
0x7FF0000000000001, 0xFFF0000000000001,
// with payload
0x7FF0000000000011, 0xFFF0000000000011,
// Both Infinities.
0x7FF0000000000000, 0xFFF0000000000000,
// Some "normal" numbers, 1 and -1.
0x3FF0000000000000, 0xBFF0000000000000};
#define FOR_FLOAT64_NAN_INPUTS(i) \
for (size_t i = 0; i < arraysize(double_nan_test_array); ++i)
void RunF64x2UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, DoubleUnOp expected_op,
bool exact = true) {
WasmRunner<int32_t, double> r(execution_tier, lower_simd);
// Global to hold output.
double* g = r.builder().AddGlobal<double>(kWasmS128);
// Build fn to splat test value, perform unop, and write the result.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value))),
WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(opcode, WASM_GET_LOCAL(temp1))),
WASM_ONE);
FOR_FLOAT64_INPUTS(x) {
if (!PlatformCanRepresent(x)) continue;
// Extreme values have larger errors so skip them for approximation tests.
if (!exact && IsExtreme(x)) continue;
double expected = expected_op(x);
#if V8_OS_AIX
if (!MightReverseSign<DoubleUnOp>(expected_op))
expected = FpOpWorkaround<double>(x, expected);
#endif
if (!PlatformCanRepresent(expected)) continue;
r.Call(x);
for (int i = 0; i < 2; i++) {
double actual = ReadLittleEndianValue<double>(&g[i]);
CheckDoubleResult(x, x, expected, actual, exact);
}
}
FOR_FLOAT64_NAN_INPUTS(i) {
double x = bit_cast<double>(double_nan_test_array[i]);
if (!PlatformCanRepresent(x)) continue;
// Extreme values have larger errors so skip them for approximation tests.
if (!exact && IsExtreme(x)) continue;
double expected = expected_op(x);
if (!PlatformCanRepresent(expected)) continue;
r.Call(x);
for (int i = 0; i < 2; i++) {
double actual = ReadLittleEndianValue<double>(&g[i]);
CheckDoubleResult(x, x, expected, actual, exact);
}
}
}
WASM_SIMD_TEST(F64x2Abs) {
RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Abs, std::abs);
}
WASM_SIMD_TEST(F64x2Neg) {
RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Neg, Negate);
}
WASM_SIMD_TEST(F64x2Sqrt) {
RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Sqrt, std::sqrt);
}
WASM_SIMD_TEST(F64x2Ceil) {
RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Ceil, ceil, true);
}
WASM_SIMD_TEST(F64x2Floor) {
RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Floor, floor, true);
}
WASM_SIMD_TEST(F64x2Trunc) {
RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2Trunc, trunc, true);
}
WASM_SIMD_TEST(F64x2NearestInt) {
RunF64x2UnOpTest(execution_tier, lower_simd, kExprF64x2NearestInt, nearbyint,
true);
}
void RunF64x2BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, DoubleBinOp expected_op) {
WasmRunner<int32_t, double, double> r(execution_tier, lower_simd);
// Global to hold output.
double* g = r.builder().AddGlobal<double>(kWasmS128);
// Build fn to splat test value, perform binop, and write the result.
byte value1 = 0, value2 = 1;
byte temp1 = r.AllocateLocal(kWasmS128);
byte temp2 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value1))),
WASM_SET_LOCAL(temp2, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value2))),
WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
WASM_GET_LOCAL(temp2))),
WASM_ONE);
FOR_FLOAT64_INPUTS(x) {
if (!PlatformCanRepresent(x)) continue;
FOR_FLOAT64_INPUTS(y) {
if (!PlatformCanRepresent(x)) continue;
double expected = expected_op(x, y);
if (!PlatformCanRepresent(expected)) continue;
r.Call(x, y);
for (int i = 0; i < 2; i++) {
double actual = ReadLittleEndianValue<double>(&g[i]);
CheckDoubleResult(x, y, expected, actual, true /* exact */);
}
}
}
FOR_FLOAT64_NAN_INPUTS(i) {
double x = bit_cast<double>(double_nan_test_array[i]);
if (!PlatformCanRepresent(x)) continue;
FOR_FLOAT64_NAN_INPUTS(j) {
double y = bit_cast<double>(double_nan_test_array[j]);
double expected = expected_op(x, y);
if (!PlatformCanRepresent(expected)) continue;
r.Call(x, y);
for (int i = 0; i < 2; i++) {
double actual = ReadLittleEndianValue<double>(&g[i]);
CheckDoubleResult(x, y, expected, actual, true /* exact */);
}
}
}
}
#undef FOR_FLOAT64_NAN_INPUTS
WASM_SIMD_TEST(F64x2Add) {
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Add, Add);
}
WASM_SIMD_TEST(F64x2Sub) {
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Sub, Sub);
}
WASM_SIMD_TEST(F64x2Mul) {
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Mul, Mul);
}
WASM_SIMD_TEST(F64x2Div) {
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Div, base::Divide);
}
WASM_SIMD_TEST(F64x2Pmin) {
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Pmin, Minimum);
}
WASM_SIMD_TEST(F64x2Pmax) {
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Pmax, Maximum);
}
void RunF64x2CompareOpTest(TestExecutionTier execution_tier,
LowerSimd lower_simd, WasmOpcode opcode,
DoubleCompareOp expected_op) {
WasmRunner<int32_t, double, double> r(execution_tier, lower_simd);
// Set up global to hold mask output.
int64_t* g = r.builder().AddGlobal<int64_t>(kWasmS128);
// Build fn to splat test values, perform compare op, and write the result.
byte value1 = 0, value2 = 1;
byte temp1 = r.AllocateLocal(kWasmS128);
byte temp2 = r.AllocateLocal(kWasmS128);
// Make the lanes of each temp compare differently:
// temp1 = y, x and temp2 = y, y.
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value1))),
WASM_SET_LOCAL(temp1,
WASM_SIMD_F64x2_REPLACE_LANE(1, WASM_GET_LOCAL(temp1),
WASM_GET_LOCAL(value2))),
WASM_SET_LOCAL(temp2, WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value2))),
WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
WASM_GET_LOCAL(temp2))),
WASM_ONE);
FOR_FLOAT64_INPUTS(x) {
if (!PlatformCanRepresent(x)) continue;
FOR_FLOAT64_INPUTS(y) {
if (!PlatformCanRepresent(y)) continue;
double diff = x - y; // Model comparison as subtraction.
if (!PlatformCanRepresent(diff)) continue;
r.Call(x, y);
int64_t expected0 = expected_op(x, y);
int64_t expected1 = expected_op(y, y);
CHECK_EQ(expected0, ReadLittleEndianValue<int64_t>(&g[0]));
CHECK_EQ(expected1, ReadLittleEndianValue<int64_t>(&g[1]));
}
}
}
WASM_SIMD_TEST(F64x2Eq) {
RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Eq, Equal);
}
WASM_SIMD_TEST(F64x2Ne) {
RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Ne, NotEqual);
}
WASM_SIMD_TEST(F64x2Gt) {
RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Gt, Greater);
}
WASM_SIMD_TEST(F64x2Ge) {
RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Ge, GreaterEqual);
}
WASM_SIMD_TEST(F64x2Lt) {
RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Lt, Less);
}
WASM_SIMD_TEST(F64x2Le) {
RunF64x2CompareOpTest(execution_tier, lower_simd, kExprF64x2Le, LessEqual);
}
WASM_SIMD_TEST(F64x2Min) {
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Min, JSMin);
}
WASM_SIMD_TEST(F64x2Max) {
RunF64x2BinOpTest(execution_tier, lower_simd, kExprF64x2Max, JSMax);
}
WASM_SIMD_TEST(I64x2Mul) {
RunI64x2BinOpTest(execution_tier, lower_simd, kExprI64x2Mul,
base::MulWithWraparound);
}
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
WASM_SIMD_TEST_NO_LOWERING(F64x2Qfma) {
FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, double, double, double> r(execution_tier, lower_simd);
// Set up global to hold mask output.
double* g = r.builder().AddGlobal<double>(kWasmS128);
// Build fn to splat test values, perform compare op, and write the result.
byte value1 = 0, value2 = 1, value3 = 2;
BUILD(r,
WASM_SET_GLOBAL(0, WASM_SIMD_F64x2_QFMA(
WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value1)),
WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value2)),
WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value3)))),
WASM_ONE);
for (FMOperation<double> x : qfma_vector<double>()) {
r.Call(x.a, x.b, x.c);
double expected =
ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
for (int i = 0; i < 2; i++) {
double actual = ReadLittleEndianValue<double>(&g[i]);
CheckDoubleResult(x.a, x.b, expected, actual, true /* exact */);
}
}
}
WASM_SIMD_TEST_NO_LOWERING(F64x2Qfms) {
FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, double, double, double> r(execution_tier, lower_simd);
// Set up global to hold mask output.
double* g = r.builder().AddGlobal<double>(kWasmS128);
// Build fn to splat test values, perform compare op, and write the result.
byte value1 = 0, value2 = 1, value3 = 2;
BUILD(r,
WASM_SET_GLOBAL(0, WASM_SIMD_F64x2_QFMS(
WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value1)),
WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value2)),
WASM_SIMD_F64x2_SPLAT(WASM_GET_LOCAL(value3)))),
WASM_ONE);
for (FMOperation<double> x : qfms_vector<double>()) {
r.Call(x.a, x.b, x.c);
double expected =
ExpectFused(execution_tier) ? x.fused_result : x.unfused_result;
for (int i = 0; i < 2; i++) {
double actual = ReadLittleEndianValue<double>(&g[i]);
CheckDoubleResult(x.a, x.b, expected, actual, true /* exact */);
}
}
}
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_S390X
WASM_SIMD_TEST(I32x4Splat) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Set up a global to hold output vector.
int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
byte param1 = 0;
BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(param1))),
WASM_ONE);
FOR_INT32_INPUTS(x) {
r.Call(x);
int32_t expected = x;
for (int i = 0; i < 4; i++) {
int32_t actual = ReadLittleEndianValue<int32_t>(&g[i]);
CHECK_EQ(actual, expected);
}
}
}
WASM_SIMD_TEST(I32x4ReplaceLane) {
WasmRunner<int32_t> r(execution_tier, lower_simd);
// Set up a global to hold input/output vector.
int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
// Build function to replace each lane with its index.
byte temp1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_SPLAT(WASM_I32V(-1))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_REPLACE_LANE(
0, WASM_GET_LOCAL(temp1), WASM_I32V(0))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_REPLACE_LANE(
1, WASM_GET_LOCAL(temp1), WASM_I32V(1))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_REPLACE_LANE(
2, WASM_GET_LOCAL(temp1), WASM_I32V(2))),
WASM_SET_GLOBAL(0, WASM_SIMD_I32x4_REPLACE_LANE(
3, WASM_GET_LOCAL(temp1), WASM_I32V(3))),
WASM_ONE);
r.Call();
for (int32_t i = 0; i < 4; i++) {
CHECK_EQ(i, ReadLittleEndianValue<int32_t>(&g[i]));
}
}
WASM_SIMD_TEST(I16x8Splat) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Set up a global to hold output vector.
int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
byte param1 = 0;
BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(param1))),
WASM_ONE);
FOR_INT16_INPUTS(x) {
r.Call(x);
int16_t expected = x;
for (int i = 0; i < 8; i++) {
int16_t actual = ReadLittleEndianValue<int16_t>(&g[i]);
CHECK_EQ(actual, expected);
}
}
// Test values that do not fit in a int16.
FOR_INT32_INPUTS(x) {
r.Call(x);
int16_t expected = truncate_to_int16(x);
for (int i = 0; i < 8; i++) {
int16_t actual = ReadLittleEndianValue<int16_t>(&g[i]);
CHECK_EQ(actual, expected);
}
}
}
WASM_SIMD_TEST(I16x8ReplaceLane) {
WasmRunner<int32_t> r(execution_tier, lower_simd);
// Set up a global to hold input/output vector.
int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
// Build function to replace each lane with its index.
byte temp1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_SPLAT(WASM_I32V(-1))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
0, WASM_GET_LOCAL(temp1), WASM_I32V(0))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
1, WASM_GET_LOCAL(temp1), WASM_I32V(1))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
2, WASM_GET_LOCAL(temp1), WASM_I32V(2))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
3, WASM_GET_LOCAL(temp1), WASM_I32V(3))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
4, WASM_GET_LOCAL(temp1), WASM_I32V(4))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
5, WASM_GET_LOCAL(temp1), WASM_I32V(5))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_REPLACE_LANE(
6, WASM_GET_LOCAL(temp1), WASM_I32V(6))),
WASM_SET_GLOBAL(0, WASM_SIMD_I16x8_REPLACE_LANE(
7, WASM_GET_LOCAL(temp1), WASM_I32V(7))),
WASM_ONE);
r.Call();
for (int16_t i = 0; i < 8; i++) {
CHECK_EQ(i, ReadLittleEndianValue<int16_t>(&g[i]));
}
}
WASM_SIMD_TEST(I8x16BitMask) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
byte value1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(value1, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(0))),
WASM_SET_LOCAL(value1, WASM_SIMD_I8x16_REPLACE_LANE(
0, WASM_GET_LOCAL(value1), WASM_I32V(0))),
WASM_SET_LOCAL(value1, WASM_SIMD_I8x16_REPLACE_LANE(
1, WASM_GET_LOCAL(value1), WASM_I32V(-1))),
WASM_SIMD_UNOP(kExprI8x16BitMask, WASM_GET_LOCAL(value1)));
FOR_INT8_INPUTS(x) {
int32_t actual = r.Call(x);
// Lane 0 is always 0 (positive), lane 1 is always -1.
int32_t expected = std::signbit(static_cast<double>(x)) ? 0xFFFE : 0x0002;
CHECK_EQ(actual, expected);
}
}
WASM_SIMD_TEST(I16x8BitMask) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
byte value1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(value1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(0))),
WASM_SET_LOCAL(value1, WASM_SIMD_I16x8_REPLACE_LANE(
0, WASM_GET_LOCAL(value1), WASM_I32V(0))),
WASM_SET_LOCAL(value1, WASM_SIMD_I16x8_REPLACE_LANE(
1, WASM_GET_LOCAL(value1), WASM_I32V(-1))),
WASM_SIMD_UNOP(kExprI16x8BitMask, WASM_GET_LOCAL(value1)));
FOR_INT16_INPUTS(x) {
int32_t actual = r.Call(x);
// Lane 0 is always 0 (positive), lane 1 is always -1.
int32_t expected = std::signbit(static_cast<double>(x)) ? 0xFE : 2;
CHECK_EQ(actual, expected);
}
}
WASM_SIMD_TEST(I32x4BitMask) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
byte value1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(value1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(0))),
WASM_SET_LOCAL(value1, WASM_SIMD_I32x4_REPLACE_LANE(
0, WASM_GET_LOCAL(value1), WASM_I32V(0))),
WASM_SET_LOCAL(value1, WASM_SIMD_I32x4_REPLACE_LANE(
1, WASM_GET_LOCAL(value1), WASM_I32V(-1))),
WASM_SIMD_UNOP(kExprI32x4BitMask, WASM_GET_LOCAL(value1)));
FOR_INT32_INPUTS(x) {
int32_t actual = r.Call(x);
// Lane 0 is always 0 (positive), lane 1 is always -1.
int32_t expected = std::signbit(static_cast<double>(x)) ? 0xE : 2;
CHECK_EQ(actual, expected);
}
}
// TODO(v8:10997) Prototyping i64x2.bitmask.
#if V8_TARGET_ARCH_X64
WASM_SIMD_TEST_NO_LOWERING(I64x2BitMask) {
FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, int64_t> r(execution_tier, lower_simd);
byte value1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(value1, WASM_SIMD_I64x2_SPLAT(WASM_GET_LOCAL(0))),
WASM_SET_LOCAL(value1, WASM_SIMD_I64x2_REPLACE_LANE(
0, WASM_GET_LOCAL(value1), WASM_I64V_1(0))),
WASM_SIMD_UNOP(kExprI64x2BitMask, WASM_GET_LOCAL(value1)));
for (int64_t x : compiler::ValueHelper::GetVector<int64_t>()) {
int32_t actual = r.Call(x);
// Lane 0 is always 0 (positive).
int32_t expected = std::signbit(static_cast<double>(x)) ? 0x2 : 0x0;
CHECK_EQ(actual, expected);
}
}
#endif // V8_TARGET_ARCH_X64
WASM_SIMD_TEST(I8x16Splat) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Set up a global to hold output vector.
int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
byte param1 = 0;
BUILD(r, WASM_SET_GLOBAL(0, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(param1))),
WASM_ONE);
FOR_INT8_INPUTS(x) {
r.Call(x);
int8_t expected = x;
for (int i = 0; i < 16; i++) {
int8_t actual = ReadLittleEndianValue<int8_t>(&g[i]);
CHECK_EQ(actual, expected);
}
}
// Test values that do not fit in a int16.
FOR_INT16_INPUTS(x) {
r.Call(x);
int8_t expected = truncate_to_int8(x);
for (int i = 0; i < 16; i++) {
int8_t actual = ReadLittleEndianValue<int8_t>(&g[i]);
CHECK_EQ(actual, expected);
}
}
}
WASM_SIMD_TEST(I8x16ReplaceLane) {
WasmRunner<int32_t> r(execution_tier, lower_simd);
// Set up a global to hold input/output vector.
int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
// Build function to replace each lane with its index.
byte temp1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_SPLAT(WASM_I32V(-1))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
0, WASM_GET_LOCAL(temp1), WASM_I32V(0))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
1, WASM_GET_LOCAL(temp1), WASM_I32V(1))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
2, WASM_GET_LOCAL(temp1), WASM_I32V(2))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
3, WASM_GET_LOCAL(temp1), WASM_I32V(3))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
4, WASM_GET_LOCAL(temp1), WASM_I32V(4))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
5, WASM_GET_LOCAL(temp1), WASM_I32V(5))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
6, WASM_GET_LOCAL(temp1), WASM_I32V(6))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
7, WASM_GET_LOCAL(temp1), WASM_I32V(7))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
8, WASM_GET_LOCAL(temp1), WASM_I32V(8))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
9, WASM_GET_LOCAL(temp1), WASM_I32V(9))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
10, WASM_GET_LOCAL(temp1), WASM_I32V(10))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
11, WASM_GET_LOCAL(temp1), WASM_I32V(11))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
12, WASM_GET_LOCAL(temp1), WASM_I32V(12))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
13, WASM_GET_LOCAL(temp1), WASM_I32V(13))),
WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_REPLACE_LANE(
14, WASM_GET_LOCAL(temp1), WASM_I32V(14))),
WASM_SET_GLOBAL(0, WASM_SIMD_I8x16_REPLACE_LANE(
15, WASM_GET_LOCAL(temp1), WASM_I32V(15))),
WASM_ONE);
r.Call();
for (int8_t i = 0; i < 16; i++) {
CHECK_EQ(i, ReadLittleEndianValue<int8_t>(&g[i]));
}
}
// Use doubles to ensure exact conversion.
int32_t ConvertToInt(double val, bool unsigned_integer) {
if (std::isnan(val)) return 0;
if (unsigned_integer) {
if (val < 0) return 0;
if (val > kMaxUInt32) return kMaxUInt32;
return static_cast<uint32_t>(val);
} else {
if (val < kMinInt) return kMinInt;
if (val > kMaxInt) return kMaxInt;
return static_cast<int>(val);
}
}
// Tests both signed and unsigned conversion.
WASM_SIMD_TEST(I32x4ConvertF32x4) {
WasmRunner<int32_t, float> r(execution_tier, lower_simd);
// Create two output vectors to hold signed and unsigned results.
int32_t* g0 = r.builder().AddGlobal<int32_t>(kWasmS128);
int32_t* g1 = r.builder().AddGlobal<int32_t>(kWasmS128);
// Build fn to splat test value, perform conversions, and write the results.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_F32x4_SPLAT(WASM_GET_LOCAL(value))),
WASM_SET_GLOBAL(
0, WASM_SIMD_UNOP(kExprI32x4SConvertF32x4, WASM_GET_LOCAL(temp1))),
WASM_SET_GLOBAL(
1, WASM_SIMD_UNOP(kExprI32x4UConvertF32x4, WASM_GET_LOCAL(temp1))),
WASM_ONE);
FOR_FLOAT32_INPUTS(x) {
if (!PlatformCanRepresent(x)) continue;
r.Call(x);
int32_t expected_signed = ConvertToInt(x, false);
int32_t expected_unsigned = ConvertToInt(x, true);
for (int i = 0; i < 4; i++) {
CHECK_EQ(expected_signed, ReadLittleEndianValue<int32_t>(&g0[i]));
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int32_t>(&g1[i]));
}
}
}
// Tests both signed and unsigned conversion from I16x8 (unpacking).
WASM_SIMD_TEST(I32x4ConvertI16x8) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Create four output vectors to hold signed and unsigned results.
int32_t* g0 = r.builder().AddGlobal<int32_t>(kWasmS128);
int32_t* g1 = r.builder().AddGlobal<int32_t>(kWasmS128);
int32_t* g2 = r.builder().AddGlobal<int32_t>(kWasmS128);
int32_t* g3 = r.builder().AddGlobal<int32_t>(kWasmS128);
// Build fn to splat test value, perform conversions, and write the results.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value))),
WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(kExprI32x4SConvertI16x8High,
WASM_GET_LOCAL(temp1))),
WASM_SET_GLOBAL(1, WASM_SIMD_UNOP(kExprI32x4SConvertI16x8Low,
WASM_GET_LOCAL(temp1))),
WASM_SET_GLOBAL(2, WASM_SIMD_UNOP(kExprI32x4UConvertI16x8High,
WASM_GET_LOCAL(temp1))),
WASM_SET_GLOBAL(3, WASM_SIMD_UNOP(kExprI32x4UConvertI16x8Low,
WASM_GET_LOCAL(temp1))),
WASM_ONE);
FOR_INT16_INPUTS(x) {
r.Call(x);
int32_t expected_signed = static_cast<int32_t>(x);
int32_t expected_unsigned = static_cast<int32_t>(static_cast<uint16_t>(x));
for (int i = 0; i < 4; i++) {
CHECK_EQ(expected_signed, ReadLittleEndianValue<int32_t>(&g0[i]));
CHECK_EQ(expected_signed, ReadLittleEndianValue<int32_t>(&g1[i]));
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int32_t>(&g2[i]));
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int32_t>(&g3[i]));
}
}
}
// TODO(v8:10972) Prototyping i64x2 convert from i32x4.
// Tests both signed and unsigned conversion from I32x4 (unpacking).
#if V8_TARGET_ARCH_ARM64
WASM_SIMD_TEST_NO_LOWERING(I64x2ConvertI32x4) {
FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Create four output vectors to hold signed and unsigned results.
int64_t* g0 = r.builder().AddGlobal<int64_t>(kWasmS128);
int64_t* g1 = r.builder().AddGlobal<int64_t>(kWasmS128);
int64_t* g2 = r.builder().AddGlobal<int64_t>(kWasmS128);
int64_t* g3 = r.builder().AddGlobal<int64_t>(kWasmS128);
// Build fn to splat test value, perform conversions, and write the results.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value))),
WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(kExprI64x2SConvertI32x4High,
WASM_GET_LOCAL(temp1))),
WASM_SET_GLOBAL(1, WASM_SIMD_UNOP(kExprI64x2SConvertI32x4Low,
WASM_GET_LOCAL(temp1))),
WASM_SET_GLOBAL(2, WASM_SIMD_UNOP(kExprI64x2UConvertI32x4High,
WASM_GET_LOCAL(temp1))),
WASM_SET_GLOBAL(3, WASM_SIMD_UNOP(kExprI64x2UConvertI32x4Low,
WASM_GET_LOCAL(temp1))),
WASM_ONE);
FOR_INT32_INPUTS(x) {
r.Call(x);
int64_t expected_signed = static_cast<int64_t>(x);
int64_t expected_unsigned = static_cast<int64_t>(static_cast<uint32_t>(x));
for (int i = 0; i < 2; i++) {
CHECK_EQ(expected_signed, ReadLittleEndianValue<int64_t>(&g0[i]));
CHECK_EQ(expected_signed, ReadLittleEndianValue<int64_t>(&g1[i]));
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int64_t>(&g2[i]));
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int64_t>(&g3[i]));
}
}
}
#endif // V8_TARGET_ARCH_ARM64
void RunI32x4UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int32UnOp expected_op) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Global to hold output.
int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
// Build fn to splat test value, perform unop, and write the result.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value))),
WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(opcode, WASM_GET_LOCAL(temp1))),
WASM_ONE);
FOR_INT32_INPUTS(x) {
r.Call(x);
int32_t expected = expected_op(x);
for (int i = 0; i < 4; i++) {
CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
}
}
}
WASM_SIMD_TEST(I32x4Neg) {
RunI32x4UnOpTest(execution_tier, lower_simd, kExprI32x4Neg,
base::NegateWithWraparound);
}
WASM_SIMD_TEST(I32x4Abs) {
RunI32x4UnOpTest(execution_tier, lower_simd, kExprI32x4Abs, std::abs);
}
WASM_SIMD_TEST(S128Not) {
RunI32x4UnOpTest(execution_tier, lower_simd, kExprS128Not,
[](int32_t x) { return ~x; });
}
#if V8_TARGET_ARCH_ARM64
// TODO(v8:11086) Prototype i32x4.extadd_pairwise_i16x8_{s,u}
template <typename Narrow, typename Wide>
void RunExtAddPairwiseTest(TestExecutionTier execution_tier,
LowerSimd lower_simd, WasmOpcode ext_add_pairwise,
WasmOpcode splat) {
FLAG_SCOPE(wasm_simd_post_mvp);
constexpr int num_lanes = kSimd128Size / sizeof(Wide);
WasmRunner<int32_t, Narrow> r(execution_tier, lower_simd);
Wide* g = r.builder().template AddGlobal<Wide>(kWasmS128);
// TODO(v8:11086) We splat the same value, so pairwise adding ends up adding
// the same value to itself, consider a more complicated test, like having 2
// vectors, and shuffling them.
BUILD(r, WASM_GET_LOCAL(0), WASM_SIMD_OP(splat),
WASM_SIMD_OP(ext_add_pairwise), kExprGlobalSet, 0, WASM_ONE);
for (Narrow x : compiler::ValueHelper::GetVector<Narrow>()) {
r.Call(x);
Wide expected = AddLong<Wide>(x, x);
for (int i = 0; i < num_lanes; i++) {
CHECK_EQ(expected, ReadLittleEndianValue<Wide>(&g[i]));
}
}
}
WASM_SIMD_TEST_NO_LOWERING(I32x4ExtAddPairwiseI16x8S) {
RunExtAddPairwiseTest<int16_t, int32_t>(execution_tier, lower_simd,
kExprI32x4ExtAddPairwiseI16x8S,
kExprI16x8Splat);
}
WASM_SIMD_TEST_NO_LOWERING(I32x4ExtAddPairwiseI16x8U) {
RunExtAddPairwiseTest<uint16_t, uint32_t>(execution_tier, lower_simd,
kExprI32x4ExtAddPairwiseI16x8U,
kExprI16x8Splat);
}
WASM_SIMD_TEST_NO_LOWERING(I16x8ExtAddPairwiseI8x16S) {
RunExtAddPairwiseTest<int8_t, int16_t>(execution_tier, lower_simd,
kExprI16x8ExtAddPairwiseI8x16S,
kExprI8x16Splat);
}
WASM_SIMD_TEST_NO_LOWERING(I16x8ExtAddPairwiseI8x16U) {
RunExtAddPairwiseTest<uint8_t, uint16_t>(execution_tier, lower_simd,
kExprI16x8ExtAddPairwiseI8x16U,
kExprI8x16Splat);
}
#endif // V8_TARGET_ARCH_ARM64
void RunI32x4BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int32BinOp expected_op) {
WasmRunner<int32_t, int32_t, int32_t> r(execution_tier, lower_simd);
// Global to hold output.
int32_t* g = r.builder().AddGlobal<int32_t>(kWasmS128);
// Build fn to splat test values, perform binop, and write the result.
byte value1 = 0, value2 = 1;
byte temp1 = r.AllocateLocal(kWasmS128);
byte temp2 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value1))),
WASM_SET_LOCAL(temp2, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value2))),
WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
WASM_GET_LOCAL(temp2))),
WASM_ONE);
FOR_INT32_INPUTS(x) {
FOR_INT32_INPUTS(y) {
r.Call(x, y);
int32_t expected = expected_op(x, y);
for (int i = 0; i < 4; i++) {
CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
}
}
}
}
WASM_SIMD_TEST(I32x4Add) {
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Add,
base::AddWithWraparound);
}
WASM_SIMD_TEST(I32x4Sub) {
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Sub,
base::SubWithWraparound);
}
WASM_SIMD_TEST(I32x4Mul) {
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Mul,
base::MulWithWraparound);
}
WASM_SIMD_TEST(I32x4MinS) {
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4MinS, Minimum);
}
WASM_SIMD_TEST(I32x4MaxS) {
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4MaxS, Maximum);
}
WASM_SIMD_TEST(I32x4MinU) {
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4MinU,
UnsignedMinimum);
}
WASM_SIMD_TEST(I32x4MaxU) {
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4MaxU,
UnsignedMaximum);
}
WASM_SIMD_TEST(S128And) {
RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128And,
[](int32_t x, int32_t y) { return x & y; });
}
WASM_SIMD_TEST(S128Or) {
RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128Or,
[](int32_t x, int32_t y) { return x | y; });
}
WASM_SIMD_TEST(S128Xor) {
RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128Xor,
[](int32_t x, int32_t y) { return x ^ y; });
}
// Bitwise operation, doesn't really matter what simd type we test it with.
WASM_SIMD_TEST(S128AndNot) {
RunI32x4BinOpTest(execution_tier, lower_simd, kExprS128AndNot,
[](int32_t x, int32_t y) { return x & ~y; });
}
WASM_SIMD_TEST(I32x4Eq) {
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Eq, Equal);
}
WASM_SIMD_TEST(I32x4Ne) {
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4Ne, NotEqual);
}
WASM_SIMD_TEST(I32x4LtS) {
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4LtS, Less);
}
WASM_SIMD_TEST(I32x4LeS) {
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4LeS, LessEqual);
}
WASM_SIMD_TEST(I32x4GtS) {
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4GtS, Greater);
}
WASM_SIMD_TEST(I32x4GeS) {
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4GeS, GreaterEqual);
}
WASM_SIMD_TEST(I32x4LtU) {
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4LtU, UnsignedLess);
}
WASM_SIMD_TEST(I32x4LeU) {
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4LeU,
UnsignedLessEqual);
}
WASM_SIMD_TEST(I32x4GtU) {
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4GtU, UnsignedGreater);
}
WASM_SIMD_TEST(I32x4GeU) {
RunI32x4BinOpTest(execution_tier, lower_simd, kExprI32x4GeU,
UnsignedGreaterEqual);
}
void RunI32x4ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int32ShiftOp expected_op) {
// Intentionally shift by 32, should be no-op.
for (int shift = 1; shift <= 32; shift++) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
int32_t* g_imm = r.builder().AddGlobal<int32_t>(kWasmS128);
int32_t* g_mem = r.builder().AddGlobal<int32_t>(kWasmS128);
byte value = 0;
byte simd = r.AllocateLocal(kWasmS128);
// Shift using an immediate, and shift using a value loaded from memory.
BUILD(
r, WASM_SET_LOCAL(simd, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value))),
WASM_SET_GLOBAL(0, WASM_SIMD_SHIFT_OP(opcode, WASM_GET_LOCAL(simd),
WASM_I32V(shift))),
WASM_SET_GLOBAL(1, WASM_SIMD_SHIFT_OP(
opcode, WASM_GET_LOCAL(simd),
WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
WASM_ONE);
r.builder().WriteMemory(&memory[0], shift);
FOR_INT32_INPUTS(x) {
r.Call(x);
int32_t expected = expected_op(x, shift);
for (int i = 0; i < 4; i++) {
CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g_imm[i]));
CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g_mem[i]));
}
}
}
}
WASM_SIMD_TEST(I32x4Shl) {
RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4Shl,
LogicalShiftLeft);
}
WASM_SIMD_TEST(I32x4ShrS) {
RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4ShrS,
ArithmeticShiftRight);
}
WASM_SIMD_TEST(I32x4ShrU) {
RunI32x4ShiftOpTest(execution_tier, lower_simd, kExprI32x4ShrU,
LogicalShiftRight);
}
// Tests both signed and unsigned conversion from I8x16 (unpacking).
WASM_SIMD_TEST(I16x8ConvertI8x16) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Create four output vectors to hold signed and unsigned results.
int16_t* g0 = r.builder().AddGlobal<int16_t>(kWasmS128);
int16_t* g1 = r.builder().AddGlobal<int16_t>(kWasmS128);
int16_t* g2 = r.builder().AddGlobal<int16_t>(kWasmS128);
int16_t* g3 = r.builder().AddGlobal<int16_t>(kWasmS128);
// Build fn to splat test value, perform conversions, and write the results.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(value))),
WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(kExprI16x8SConvertI8x16High,
WASM_GET_LOCAL(temp1))),
WASM_SET_GLOBAL(1, WASM_SIMD_UNOP(kExprI16x8SConvertI8x16Low,
WASM_GET_LOCAL(temp1))),
WASM_SET_GLOBAL(2, WASM_SIMD_UNOP(kExprI16x8UConvertI8x16High,
WASM_GET_LOCAL(temp1))),
WASM_SET_GLOBAL(3, WASM_SIMD_UNOP(kExprI16x8UConvertI8x16Low,
WASM_GET_LOCAL(temp1))),
WASM_ONE);
FOR_INT8_INPUTS(x) {
r.Call(x);
int16_t expected_signed = static_cast<int16_t>(x);
int16_t expected_unsigned = static_cast<int16_t>(static_cast<uint8_t>(x));
for (int i = 0; i < 8; i++) {
CHECK_EQ(expected_signed, ReadLittleEndianValue<int16_t>(&g0[i]));
CHECK_EQ(expected_signed, ReadLittleEndianValue<int16_t>(&g1[i]));
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int16_t>(&g2[i]));
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int16_t>(&g3[i]));
}
}
}
// Tests both signed and unsigned conversion from I32x4 (packing).
WASM_SIMD_TEST(I16x8ConvertI32x4) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Create output vectors to hold signed and unsigned results.
int16_t* g0 = r.builder().AddGlobal<int16_t>(kWasmS128);
int16_t* g1 = r.builder().AddGlobal<int16_t>(kWasmS128);
// Build fn to splat test value, perform conversions, and write the results.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I32x4_SPLAT(WASM_GET_LOCAL(value))),
WASM_SET_GLOBAL(
0, WASM_SIMD_BINOP(kExprI16x8SConvertI32x4, WASM_GET_LOCAL(temp1),
WASM_GET_LOCAL(temp1))),
WASM_SET_GLOBAL(
1, WASM_SIMD_BINOP(kExprI16x8UConvertI32x4, WASM_GET_LOCAL(temp1),
WASM_GET_LOCAL(temp1))),
WASM_ONE);
FOR_INT32_INPUTS(x) {
r.Call(x);
int16_t expected_signed = Saturate<int16_t>(x);
int16_t expected_unsigned = Saturate<uint16_t>(x);
for (int i = 0; i < 8; i++) {
CHECK_EQ(expected_signed, ReadLittleEndianValue<int16_t>(&g0[i]));
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int16_t>(&g1[i]));
}
}
}
void RunI16x8UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int16UnOp expected_op) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Global to hold output.
int16_t* g = r.builder().AddGlobal<int16_t>(kWasmS128);
// Build fn to splat test value, perform unop, and write the result.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value))),
WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(opcode, WASM_GET_LOCAL(temp1))),
WASM_ONE);
FOR_INT16_INPUTS(x) {
r.Call(x);
int16_t expected = expected_op(x);
for (int i = 0; i < 8; i++) {
CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g[i]));
}
}
}
WASM_SIMD_TEST(I16x8Neg) {
RunI16x8UnOpTest(execution_tier, lower_simd, kExprI16x8Neg,
base::NegateWithWraparound);
}
WASM_SIMD_TEST(I16x8Abs) {
RunI16x8UnOpTest(execution_tier, lower_simd, kExprI16x8Abs, Abs);
}
template <typename T = int16_t, typename OpType = T (*)(T, T)>
void RunI16x8BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, OpType expected_op) {
WasmRunner<int32_t, T, T> r(execution_tier, lower_simd);
// Global to hold output.
T* g = r.builder().template AddGlobal<T>(kWasmS128);
// Build fn to splat test values, perform binop, and write the result.
byte value1 = 0, value2 = 1;
byte temp1 = r.AllocateLocal(kWasmS128);
byte temp2 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value1))),
WASM_SET_LOCAL(temp2, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value2))),
WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
WASM_GET_LOCAL(temp2))),
WASM_ONE);
for (T x : compiler::ValueHelper::GetVector<T>()) {
for (T y : compiler::ValueHelper::GetVector<T>()) {
r.Call(x, y);
T expected = expected_op(x, y);
for (int i = 0; i < 8; i++) {
CHECK_EQ(expected, ReadLittleEndianValue<T>(&g[i]));
}
}
}
}
WASM_SIMD_TEST(I16x8Add) {
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Add,
base::AddWithWraparound);
}
WASM_SIMD_TEST(I16x8AddSatS) {
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8AddSatS,
SaturateAdd<int16_t>);
}
WASM_SIMD_TEST(I16x8Sub) {
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Sub,
base::SubWithWraparound);
}
WASM_SIMD_TEST(I16x8SubSatS) {
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8SubSatS,
SaturateSub<int16_t>);
}
WASM_SIMD_TEST(I16x8Mul) {
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Mul,
base::MulWithWraparound);
}
WASM_SIMD_TEST(I16x8MinS) {
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8MinS, Minimum);
}
WASM_SIMD_TEST(I16x8MaxS) {
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8MaxS, Maximum);
}
WASM_SIMD_TEST(I16x8AddSatU) {
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8AddSatU,
SaturateAdd<uint16_t>);
}
WASM_SIMD_TEST(I16x8SubSatU) {
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8SubSatU,
SaturateSub<uint16_t>);
}
WASM_SIMD_TEST(I16x8MinU) {
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8MinU,
UnsignedMinimum);
}
WASM_SIMD_TEST(I16x8MaxU) {
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8MaxU,
UnsignedMaximum);
}
WASM_SIMD_TEST(I16x8Eq) {
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Eq, Equal);
}
WASM_SIMD_TEST(I16x8Ne) {
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8Ne, NotEqual);
}
WASM_SIMD_TEST(I16x8LtS) {
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8LtS, Less);
}
WASM_SIMD_TEST(I16x8LeS) {
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8LeS, LessEqual);
}
WASM_SIMD_TEST(I16x8GtS) {
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8GtS, Greater);
}
WASM_SIMD_TEST(I16x8GeS) {
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8GeS, GreaterEqual);
}
WASM_SIMD_TEST(I16x8GtU) {
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8GtU, UnsignedGreater);
}
WASM_SIMD_TEST(I16x8GeU) {
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8GeU,
UnsignedGreaterEqual);
}
WASM_SIMD_TEST(I16x8LtU) {
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8LtU, UnsignedLess);
}
WASM_SIMD_TEST(I16x8LeU) {
RunI16x8BinOpTest(execution_tier, lower_simd, kExprI16x8LeU,
UnsignedLessEqual);
}
WASM_SIMD_TEST(I16x8RoundingAverageU) {
RunI16x8BinOpTest<uint16_t>(execution_tier, lower_simd,
kExprI16x8RoundingAverageU,
base::RoundingAverageUnsigned);
}
#if V8_TARGET_ARCH_ARM64
// TODO(v8:10971) Prototype i16x8.q15mulr_sat_s
WASM_SIMD_TEST_NO_LOWERING(I16x8Q15MulRSatS) {
FLAG_SCOPE(wasm_simd_post_mvp);
RunI16x8BinOpTest<int16_t>(execution_tier, lower_simd, kExprI16x8Q15MulRSatS,
SaturateRoundingQMul<int16_t>);
}
// TODO(v8:11008) Prototype extended multiplication.
namespace {
enum class MulHalf { kLow, kHigh };
// Helper to run ext mul tests. It will splat 2 input values into 2 v128, call
// the mul op on these operands, and set the result into a global.
// It will zero the top or bottom half of one of the operands, this will catch
// mistakes if we are multiply the incorrect halves.
template <typename S, typename T, typename OpType = T (*)(S, S)>
void RunExtMulTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, OpType expected_op, WasmOpcode splat,
MulHalf half) {
FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, S, S> r(execution_tier, lower_simd);
int lane_to_zero = half == MulHalf::kLow ? 1 : 0;
T* g = r.builder().template AddGlobal<T>(kWasmS128);
BUILD(r,
WASM_SET_GLOBAL(
0, WASM_SIMD_BINOP(
opcode,
WASM_SIMD_I64x2_REPLACE_LANE(
lane_to_zero, WASM_SIMD_UNOP(splat, WASM_GET_LOCAL(0)),
WASM_I64V_1(0)),
WASM_SIMD_UNOP(splat, WASM_GET_LOCAL(1)))),
WASM_ONE);
constexpr int lanes = kSimd128Size / sizeof(T);
for (S x : compiler::ValueHelper::GetVector<S>()) {
for (S y : compiler::ValueHelper::GetVector<S>()) {
r.Call(x, y);
T expected = expected_op(x, y);
for (int i = 0; i < lanes; i++) {
CHECK_EQ(expected, ReadLittleEndianValue<T>(&g[i]));
}
}
}
}
} // namespace
WASM_SIMD_TEST_NO_LOWERING(I16x8ExtMulLowI8x16S) {
RunExtMulTest<int8_t, int16_t>(execution_tier, lower_simd,
kExprI16x8ExtMulLowI8x16S, MultiplyLong,
kExprI8x16Splat, MulHalf::kLow);
}
WASM_SIMD_TEST_NO_LOWERING(I16x8ExtMulHighI8x16S) {
RunExtMulTest<int8_t, int16_t>(execution_tier, lower_simd,
kExprI16x8ExtMulHighI8x16S, MultiplyLong,
kExprI8x16Splat, MulHalf::kHigh);
}
WASM_SIMD_TEST_NO_LOWERING(I16x8ExtMulLowI8x16U) {
RunExtMulTest<uint8_t, uint16_t>(execution_tier, lower_simd,
kExprI16x8ExtMulLowI8x16U, MultiplyLong,
kExprI8x16Splat, MulHalf::kLow);
}
WASM_SIMD_TEST_NO_LOWERING(I16x8ExtMulHighI8x16U) {
RunExtMulTest<uint8_t, uint16_t>(execution_tier, lower_simd,
kExprI16x8ExtMulHighI8x16U, MultiplyLong,
kExprI8x16Splat, MulHalf::kHigh);
}
WASM_SIMD_TEST_NO_LOWERING(I32x4ExtMulLowI16x8S) {
RunExtMulTest<int16_t, int32_t>(execution_tier, lower_simd,
kExprI32x4ExtMulLowI16x8S, MultiplyLong,
kExprI16x8Splat, MulHalf::kLow);
}
WASM_SIMD_TEST_NO_LOWERING(I32x4ExtMulHighI16x8S) {
RunExtMulTest<int16_t, int32_t>(execution_tier, lower_simd,
kExprI32x4ExtMulHighI16x8S, MultiplyLong,
kExprI16x8Splat, MulHalf::kHigh);
}
WASM_SIMD_TEST_NO_LOWERING(I32x4ExtMulLowI16x8U) {
RunExtMulTest<uint16_t, uint32_t>(execution_tier, lower_simd,
kExprI32x4ExtMulLowI16x8U, MultiplyLong,
kExprI16x8Splat, MulHalf::kLow);
}
WASM_SIMD_TEST_NO_LOWERING(I32x4ExtMulHighI16x8U) {
RunExtMulTest<uint16_t, uint32_t>(execution_tier, lower_simd,
kExprI32x4ExtMulHighI16x8U, MultiplyLong,
kExprI16x8Splat, MulHalf::kHigh);
}
WASM_SIMD_TEST_NO_LOWERING(I64x2ExtMulLowI32x4S) {
RunExtMulTest<int32_t, int64_t>(execution_tier, lower_simd,
kExprI64x2ExtMulLowI32x4S, MultiplyLong,
kExprI32x4Splat, MulHalf::kLow);
}
WASM_SIMD_TEST_NO_LOWERING(I64x2ExtMulHighI32x4S) {
RunExtMulTest<int32_t, int64_t>(execution_tier, lower_simd,
kExprI64x2ExtMulHighI32x4S, MultiplyLong,
kExprI32x4Splat, MulHalf::kHigh);
}
WASM_SIMD_TEST_NO_LOWERING(I64x2ExtMulLowI32x4U) {
RunExtMulTest<uint32_t, uint64_t>(execution_tier, lower_simd,
kExprI64x2ExtMulLowI32x4U, MultiplyLong,
kExprI32x4Splat, MulHalf::kLow);
}
WASM_SIMD_TEST_NO_LOWERING(I64x2ExtMulHighI32x4U) {
RunExtMulTest<uint32_t, uint64_t>(execution_tier, lower_simd,
kExprI64x2ExtMulHighI32x4U, MultiplyLong,
kExprI32x4Splat, MulHalf::kHigh);
}
#endif // V8_TARGET_ARCH_ARM64
WASM_SIMD_TEST(I32x4DotI16x8S) {
WasmRunner<int32_t, int16_t, int16_t> r(execution_tier, lower_simd);
int32_t* g = r.builder().template AddGlobal<int32_t>(kWasmS128);
byte value1 = 0, value2 = 1;
byte temp1 = r.AllocateLocal(kWasmS128);
byte temp2 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value1))),
WASM_SET_LOCAL(temp2, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value2))),
WASM_SET_GLOBAL(
0, WASM_SIMD_BINOP(kExprI32x4DotI16x8S, WASM_GET_LOCAL(temp1),
WASM_GET_LOCAL(temp2))),
WASM_ONE);
for (int16_t x : compiler::ValueHelper::GetVector<int16_t>()) {
for (int16_t y : compiler::ValueHelper::GetVector<int16_t>()) {
r.Call(x, y);
// x * y * 2 can overflow (0x8000), the behavior is to wraparound.
int32_t expected = base::MulWithWraparound(x * y, 2);
for (int i = 0; i < 4; i++) {
CHECK_EQ(expected, ReadLittleEndianValue<int32_t>(&g[i]));
}
}
}
}
void RunI16x8ShiftOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int16ShiftOp expected_op) {
// Intentionally shift by 16, should be no-op.
for (int shift = 1; shift <= 16; shift++) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
int32_t* memory = r.builder().AddMemoryElems<int32_t>(1);
int16_t* g_imm = r.builder().AddGlobal<int16_t>(kWasmS128);
int16_t* g_mem = r.builder().AddGlobal<int16_t>(kWasmS128);
byte value = 0;
byte simd = r.AllocateLocal(kWasmS128);
// Shift using an immediate, and shift using a value loaded from memory.
BUILD(
r, WASM_SET_LOCAL(simd, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value))),
WASM_SET_GLOBAL(0, WASM_SIMD_SHIFT_OP(opcode, WASM_GET_LOCAL(simd),
WASM_I32V(shift))),
WASM_SET_GLOBAL(1, WASM_SIMD_SHIFT_OP(
opcode, WASM_GET_LOCAL(simd),
WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO))),
WASM_ONE);
r.builder().WriteMemory(&memory[0], shift);
FOR_INT16_INPUTS(x) {
r.Call(x);
int16_t expected = expected_op(x, shift);
for (int i = 0; i < 8; i++) {
CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g_imm[i]));
CHECK_EQ(expected, ReadLittleEndianValue<int16_t>(&g_mem[i]));
}
}
}
}
WASM_SIMD_TEST(I16x8Shl) {
RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8Shl,
LogicalShiftLeft);
}
WASM_SIMD_TEST(I16x8ShrS) {
RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8ShrS,
ArithmeticShiftRight);
}
WASM_SIMD_TEST(I16x8ShrU) {
RunI16x8ShiftOpTest(execution_tier, lower_simd, kExprI16x8ShrU,
LogicalShiftRight);
}
void RunI8x16UnOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, Int8UnOp expected_op) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Global to hold output.
int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
// Build fn to splat test value, perform unop, and write the result.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(value))),
WASM_SET_GLOBAL(0, WASM_SIMD_UNOP(opcode, WASM_GET_LOCAL(temp1))),
WASM_ONE);
FOR_INT8_INPUTS(x) {
r.Call(x);
int8_t expected = expected_op(x);
for (int i = 0; i < 16; i++) {
CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g[i]));
}
}
}
WASM_SIMD_TEST(I8x16Neg) {
RunI8x16UnOpTest(execution_tier, lower_simd, kExprI8x16Neg,
base::NegateWithWraparound);
}
WASM_SIMD_TEST(I8x16Abs) {
RunI8x16UnOpTest(execution_tier, lower_simd, kExprI8x16Abs, Abs);
}
#if V8_TARGET_ARCH_ARM64
// TODO(v8:11002) Prototype i8x16.popcnt.
WASM_SIMD_TEST_NO_LOWERING(I8x16Popcnt) {
FLAG_SCOPE(wasm_simd_post_mvp);
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Global to hold output.
int8_t* g = r.builder().AddGlobal<int8_t>(kWasmS128);
// Build fn to splat test value, perform unop, and write the result.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(value))),
WASM_SET_GLOBAL(
0, WASM_SIMD_UNOP(kExprI8x16Popcnt, WASM_GET_LOCAL(temp1))),
WASM_ONE);
FOR_UINT8_INPUTS(x) {
r.Call(x);
unsigned expected = base::bits::CountPopulation(x);
for (int i = 0; i < 16; i++) {
CHECK_EQ(expected, ReadLittleEndianValue<int8_t>(&g[i]));
}
}
}
#endif // V8_TARGET_ARCH_ARM64
// Tests both signed and unsigned conversion from I16x8 (packing).
WASM_SIMD_TEST(I8x16ConvertI16x8) {
WasmRunner<int32_t, int32_t> r(execution_tier, lower_simd);
// Create output vectors to hold signed and unsigned results.
int8_t* g0 = r.builder().AddGlobal<int8_t>(kWasmS128);
int8_t* g1 = r.builder().AddGlobal<int8_t>(kWasmS128);
// Build fn to splat test value, perform conversions, and write the results.
byte value = 0;
byte temp1 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I16x8_SPLAT(WASM_GET_LOCAL(value))),
WASM_SET_GLOBAL(
0, WASM_SIMD_BINOP(kExprI8x16SConvertI16x8, WASM_GET_LOCAL(temp1),
WASM_GET_LOCAL(temp1))),
WASM_SET_GLOBAL(
1, WASM_SIMD_BINOP(kExprI8x16UConvertI16x8, WASM_GET_LOCAL(temp1),
WASM_GET_LOCAL(temp1))),
WASM_ONE);
FOR_INT16_INPUTS(x) {
r.Call(x);
int8_t expected_signed = Saturate<int8_t>(x);
int8_t expected_unsigned = Saturate<uint8_t>(x);
for (int i = 0; i < 16; i++) {
CHECK_EQ(expected_signed, ReadLittleEndianValue<int8_t>(&g0[i]));
CHECK_EQ(expected_unsigned, ReadLittleEndianValue<int8_t>(&g1[i]));
}
}
}
template <typename T = int8_t, typename OpType = T (*)(T, T)>
void RunI8x16BinOpTest(TestExecutionTier execution_tier, LowerSimd lower_simd,
WasmOpcode opcode, OpType expected_op) {
WasmRunner<int32_t, T, T> r(execution_tier, lower_simd);
// Global to hold output.
T* g = r.builder().template AddGlobal<T>(kWasmS128);
// Build fn to splat test values, perform binop, and write the result.
byte value1 = 0, value2 = 1;
byte temp1 = r.AllocateLocal(kWasmS128);
byte temp2 = r.AllocateLocal(kWasmS128);
BUILD(r, WASM_SET_LOCAL(temp1, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(value1))),
WASM_SET_LOCAL(temp2, WASM_SIMD_I8x16_SPLAT(WASM_GET_LOCAL(value2))),
WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(opcode, WASM_GET_LOCAL(temp1),
WASM_GET_LOCAL(temp2))),
WASM_ONE);
for (T x : compiler::ValueHelper::GetVector<T>()) {
for (T y : compiler::ValueHelper::GetVector<T>()) {
r.Call(x, y);
T expected = expected_op(x, y);
for (int i = 0; i < 16; i++) {
CHECK_EQ(expected, ReadLittleEndianValue<T>(&g[i]));
}
}
}
}
WASM_SIMD_TEST(I8x16Add) {
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Add,
base::AddWithWraparound);
}
WASM_SIMD_TEST(I8x16AddSatS) {
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16AddSatS,
SaturateAdd<int8_t>);
}
WASM_SIMD_TEST(I8x16Sub) {
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Sub,
base::SubWithWraparound);
}
WASM_SIMD_TEST(I8x16SubSatS) {
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16SubSatS,
SaturateSub<int8_t>);
}
WASM_SIMD_TEST(I8x16MinS) {
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16MinS, Minimum);
}
WASM_SIMD_TEST(I8x16MaxS) {
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16MaxS, Maximum);
}
WASM_SIMD_TEST(I8x16AddSatU) {
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16AddSatU,
SaturateAdd<uint8_t>);
}
WASM_SIMD_TEST(I8x16SubSatU) {
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16SubSatU,
SaturateSub<uint8_t>);
}
WASM_SIMD_TEST(I8x16MinU) {
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16MinU,
UnsignedMinimum);
}
WASM_SIMD_TEST(I8x16MaxU) {
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16MaxU,
UnsignedMaximum);
}
WASM_SIMD_TEST(I8x16Eq) {
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Eq, Equal);
}
WASM_SIMD_TEST(I8x16Ne) {
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16Ne, NotEqual);
}
WASM_SIMD_TEST(I8x16GtS) {
RunI8x16BinOpTest(execution_tier, lower_simd, kExprI8x16GtS, Greater);
}
WASM_SIMD_TEST(I8x16GeS) {