blob: 4fc80201d2a9c7f542fba6d50e54d303b5e82001 [file] [log] [blame]
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cmath>
#include <limits>
#include "src/v8.h"
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/decoder-arm64-inl.h"
#include "src/arm64/disasm-arm64.h"
#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/arm64/simulator-arm64.h"
#include "src/arm64/utils-arm64.h"
#include "src/base/platform/platform.h"
#include "src/base/utils/random-number-generator.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-utils-arm64.h"
namespace v8 {
namespace internal {
// Test infrastructure.
//
// Tests are functions which accept no parameters and have no return values.
// The testing code should not perform an explicit return once completed. For
// example to test the mov immediate instruction a very simple test would be:
//
// TEST(mov_x0_one) {
// SETUP();
//
// START();
// __ mov(x0, Operand(1));
// END();
//
// RUN();
//
// CHECK_EQUAL_64(1, x0);
//
// TEARDOWN();
// }
//
// Within a START ... END block all registers but sp can be modified. sp has to
// be explicitly saved/restored. The END() macro replaces the function return
// so it may appear multiple times in a test if the test has multiple exit
// points.
//
// Once the test has been run all integer and floating point registers as well
// as flags are accessible through a RegisterDump instance, see
// utils-arm64.cc for more info on RegisterDump.
//
// We provide some helper assert to handle common cases:
//
// CHECK_EQUAL_32(int32_t, int_32t)
// CHECK_EQUAL_FP32(float, float)
// CHECK_EQUAL_32(int32_t, W register)
// CHECK_EQUAL_FP32(float, S register)
// CHECK_EQUAL_64(int64_t, int_64t)
// CHECK_EQUAL_FP64(double, double)
// CHECK_EQUAL_64(int64_t, X register)
// CHECK_EQUAL_64(X register, X register)
// CHECK_EQUAL_FP64(double, D register)
//
// e.g. CHECK_EQUAL_64(0.5, d30);
//
// If more advance computation is required before the assert then access the
// RegisterDump named core directly:
//
// CHECK_EQUAL_64(0x1234, core.xreg(0) & 0xFFFF);
#if 0 // TODO(all): enable.
static v8::Persistent<v8::Context> env;
static void InitializeVM() {
if (env.IsEmpty()) {
env = v8::Context::New();
}
}
#endif
#define __ masm.
#define BUF_SIZE 8192
#define SETUP() SETUP_SIZE(BUF_SIZE)
#define INIT_V8() \
CcTest::InitializeVM(); \
#ifdef USE_SIMULATOR
// Run tests with the simulator.
#define SETUP_SIZE(buf_size) \
Isolate* isolate = CcTest::i_isolate(); \
HandleScope scope(isolate); \
CHECK_NOT_NULL(isolate); \
byte* buf = new byte[buf_size]; \
MacroAssembler masm(isolate, buf, buf_size, \
v8::internal::CodeObjectRequired::kYes); \
Decoder<DispatchingDecoderVisitor>* decoder = \
new Decoder<DispatchingDecoderVisitor>(); \
Simulator simulator(decoder); \
PrintDisassembler* pdis = nullptr; \
RegisterDump core;
/* if (Cctest::trace_sim()) { \
pdis = new PrintDisassembler(stdout); \
decoder.PrependVisitor(pdis); \
} \
*/
// Reset the assembler and simulator, so that instructions can be generated,
// but don't actually emit any code. This can be used by tests that need to
// emit instructions at the start of the buffer. Note that START_AFTER_RESET
// must be called before any callee-saved register is modified, and before an
// END is encountered.
//
// Most tests should call START, rather than call RESET directly.
#define RESET() \
__ Reset(); \
simulator.ResetState();
#define START_AFTER_RESET() \
__ PushCalleeSavedRegisters(); \
__ Debug("Start test.", __LINE__, TRACE_ENABLE | LOG_ALL);
#define START() \
RESET(); \
START_AFTER_RESET();
#define RUN() \
simulator.RunFrom(reinterpret_cast<Instruction*>(buf))
#define END() \
__ Debug("End test.", __LINE__, TRACE_DISABLE | LOG_ALL); \
core.Dump(&masm); \
__ PopCalleeSavedRegisters(); \
__ Ret(); \
__ GetCode(masm.isolate(), nullptr);
#define TEARDOWN() \
delete pdis; \
delete[] buf;
#else // ifdef USE_SIMULATOR.
// Run the test on real hardware or models.
#define SETUP_SIZE(buf_size) \
Isolate* isolate = CcTest::i_isolate(); \
HandleScope scope(isolate); \
CHECK_NOT_NULL(isolate); \
size_t allocated; \
byte* buf = AllocateAssemblerBuffer(&allocated, buf_size); \
MacroAssembler masm(isolate, buf, static_cast<int>(allocated), \
v8::internal::CodeObjectRequired::kYes); \
RegisterDump core;
#define RESET() \
__ Reset(); \
/* Reset the machine state (like simulator.ResetState()). */ \
__ Msr(NZCV, xzr); \
__ Msr(FPCR, xzr);
#define START_AFTER_RESET() \
__ PushCalleeSavedRegisters();
#define START() \
RESET(); \
START_AFTER_RESET();
#define RUN() \
MakeAssemblerBufferExecutable(buf, allocated); \
Assembler::FlushICache(isolate, buf, masm.SizeOfGeneratedCode()); \
{ \
void (*test_function)(void); \
memcpy(&test_function, &buf, sizeof(buf)); \
test_function(); \
}
#define END() \
core.Dump(&masm); \
__ PopCalleeSavedRegisters(); \
__ Ret(); \
__ GetCode(masm.isolate(), nullptr);
#define TEARDOWN() CHECK(v8::internal::FreePages(buf, allocated));
#endif // ifdef USE_SIMULATOR.
#define CHECK_EQUAL_NZCV(expected) \
CHECK(EqualNzcv(expected, core.flags_nzcv()))
#define CHECK_EQUAL_REGISTERS(expected) \
CHECK(EqualRegisters(&expected, &core))
#define CHECK_EQUAL_32(expected, result) \
CHECK(Equal32(static_cast<uint32_t>(expected), &core, result))
#define CHECK_EQUAL_FP32(expected, result) \
CHECK(EqualFP32(expected, &core, result))
#define CHECK_EQUAL_64(expected, result) \
CHECK(Equal64(expected, &core, result))
#define CHECK_EQUAL_FP64(expected, result) \
CHECK(EqualFP64(expected, &core, result))
// Expected values for 128-bit comparisons are passed as two 64-bit values,
// where expected_h (high) is <127:64> and expected_l (low) is <63:0>.
#define CHECK_EQUAL_128(expected_h, expected_l, result) \
CHECK(Equal128(expected_h, expected_l, &core, result))
#ifdef DEBUG
#define CHECK_CONSTANT_POOL_SIZE(expected) \
CHECK_EQ(expected, __ GetConstantPoolEntriesSizeForTesting())
#else
#define CHECK_CONSTANT_POOL_SIZE(expected) ((void)0)
#endif
TEST(stack_ops) {
INIT_V8();
SETUP();
START();
// save csp.
__ Mov(x29, csp);
// Set the csp to a known value.
__ Mov(x16, 0x1000);
__ Mov(csp, x16);
__ Mov(x0, csp);
// Add immediate to the csp, and move the result to a normal register.
__ Add(csp, csp, Operand(0x50));
__ Mov(x1, csp);
// Add extended to the csp, and move the result to a normal register.
__ Mov(x17, 0xFFF);
__ Add(csp, csp, Operand(x17, SXTB));
__ Mov(x2, csp);
// Create an csp using a logical instruction, and move to normal register.
__ Orr(csp, xzr, Operand(0x1FFF));
__ Mov(x3, csp);
// Write wcsp using a logical instruction.
__ Orr(wcsp, wzr, Operand(0xFFFFFFF8L));
__ Mov(x4, csp);
// Write csp, and read back wcsp.
__ Orr(csp, xzr, Operand(0xFFFFFFF8L));
__ Mov(w5, wcsp);
// restore csp.
__ Mov(csp, x29);
END();
RUN();
CHECK_EQUAL_64(0x1000, x0);
CHECK_EQUAL_64(0x1050, x1);
CHECK_EQUAL_64(0x104F, x2);
CHECK_EQUAL_64(0x1FFF, x3);
CHECK_EQUAL_64(0xFFFFFFF8, x4);
CHECK_EQUAL_64(0xFFFFFFF8, x5);
TEARDOWN();
}
TEST(mvn) {
INIT_V8();
SETUP();
START();
__ Mvn(w0, 0xFFF);
__ Mvn(x1, 0xFFF);
__ Mvn(w2, Operand(w0, LSL, 1));
__ Mvn(x3, Operand(x1, LSL, 2));
__ Mvn(w4, Operand(w0, LSR, 3));
__ Mvn(x5, Operand(x1, LSR, 4));
__ Mvn(w6, Operand(w0, ASR, 11));
__ Mvn(x7, Operand(x1, ASR, 12));
__ Mvn(w8, Operand(w0, ROR, 13));
__ Mvn(x9, Operand(x1, ROR, 14));
__ Mvn(w10, Operand(w2, UXTB));
__ Mvn(x11, Operand(x2, SXTB, 1));
__ Mvn(w12, Operand(w2, UXTH, 2));
__ Mvn(x13, Operand(x2, SXTH, 3));
__ Mvn(x14, Operand(w2, UXTW, 4));
__ Mvn(x15, Operand(w2, SXTW, 4));
END();
RUN();
CHECK_EQUAL_64(0xFFFFF000, x0);
CHECK_EQUAL_64(0xFFFFFFFFFFFFF000UL, x1);
CHECK_EQUAL_64(0x00001FFF, x2);
CHECK_EQUAL_64(0x0000000000003FFFUL, x3);
CHECK_EQUAL_64(0xE00001FF, x4);
CHECK_EQUAL_64(0xF0000000000000FFUL, x5);
CHECK_EQUAL_64(0x00000001, x6);
CHECK_EQUAL_64(0x0, x7);
CHECK_EQUAL_64(0x7FF80000, x8);
CHECK_EQUAL_64(0x3FFC000000000000UL, x9);
CHECK_EQUAL_64(0xFFFFFF00, x10);
CHECK_EQUAL_64(0x0000000000000001UL, x11);
CHECK_EQUAL_64(0xFFFF8003, x12);
CHECK_EQUAL_64(0xFFFFFFFFFFFF0007UL, x13);
CHECK_EQUAL_64(0xFFFFFFFFFFFE000FUL, x14);
CHECK_EQUAL_64(0xFFFFFFFFFFFE000FUL, x15);
TEARDOWN();
}
TEST(mov) {
INIT_V8();
SETUP();
START();
__ Mov(x0, 0xFFFFFFFFFFFFFFFFL);
__ Mov(x1, 0xFFFFFFFFFFFFFFFFL);
__ Mov(x2, 0xFFFFFFFFFFFFFFFFL);
__ Mov(x3, 0xFFFFFFFFFFFFFFFFL);
__ Mov(x0, 0x0123456789ABCDEFL);
__ movz(x1, 0xABCDL << 16);
__ movk(x2, 0xABCDL << 32);
__ movn(x3, 0xABCDL << 48);
__ Mov(x4, 0x0123456789ABCDEFL);
__ Mov(x5, x4);
__ Mov(w6, -1);
// Test that moves back to the same register have the desired effect. This
// is a no-op for X registers, and a truncation for W registers.
__ Mov(x7, 0x0123456789ABCDEFL);
__ Mov(x7, x7);
__ Mov(x8, 0x0123456789ABCDEFL);
__ Mov(w8, w8);
__ Mov(x9, 0x0123456789ABCDEFL);
__ Mov(x9, Operand(x9));
__ Mov(x10, 0x0123456789ABCDEFL);
__ Mov(w10, Operand(w10));
__ Mov(w11, 0xFFF);
__ Mov(x12, 0xFFF);
__ Mov(w13, Operand(w11, LSL, 1));
__ Mov(x14, Operand(x12, LSL, 2));
__ Mov(w15, Operand(w11, LSR, 3));
__ Mov(x18, Operand(x12, LSR, 4));
__ Mov(w19, Operand(w11, ASR, 11));
__ Mov(x20, Operand(x12, ASR, 12));
__ Mov(w21, Operand(w11, ROR, 13));
__ Mov(x22, Operand(x12, ROR, 14));
__ Mov(w23, Operand(w13, UXTB));
__ Mov(x24, Operand(x13, SXTB, 1));
__ Mov(w25, Operand(w13, UXTH, 2));
__ Mov(x26, Operand(x13, SXTH, 3));
__ Mov(x27, Operand(w13, UXTW, 4));
END();
RUN();
CHECK_EQUAL_64(0x0123456789ABCDEFL, x0);
CHECK_EQUAL_64(0x00000000ABCD0000L, x1);
CHECK_EQUAL_64(0xFFFFABCDFFFFFFFFL, x2);
CHECK_EQUAL_64(0x5432FFFFFFFFFFFFL, x3);
CHECK_EQUAL_64(x4, x5);
CHECK_EQUAL_32(-1, w6);
CHECK_EQUAL_64(0x0123456789ABCDEFL, x7);
CHECK_EQUAL_32(0x89ABCDEFL, w8);
CHECK_EQUAL_64(0x0123456789ABCDEFL, x9);
CHECK_EQUAL_32(0x89ABCDEFL, w10);
CHECK_EQUAL_64(0x00000FFF, x11);
CHECK_EQUAL_64(0x0000000000000FFFUL, x12);
CHECK_EQUAL_64(0x00001FFE, x13);
CHECK_EQUAL_64(0x0000000000003FFCUL, x14);
CHECK_EQUAL_64(0x000001FF, x15);
CHECK_EQUAL_64(0x00000000000000FFUL, x18);
CHECK_EQUAL_64(0x00000001, x19);
CHECK_EQUAL_64(0x0, x20);
CHECK_EQUAL_64(0x7FF80000, x21);
CHECK_EQUAL_64(0x3FFC000000000000UL, x22);
CHECK_EQUAL_64(0x000000FE, x23);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFCUL, x24);
CHECK_EQUAL_64(0x00007FF8, x25);
CHECK_EQUAL_64(0x000000000000FFF0UL, x26);
CHECK_EQUAL_64(0x000000000001FFE0UL, x27);
TEARDOWN();
}
TEST(mov_imm_w) {
INIT_V8();
SETUP();
START();
__ Mov(w0, 0xFFFFFFFFL);
__ Mov(w1, 0xFFFF1234L);
__ Mov(w2, 0x1234FFFFL);
__ Mov(w3, 0x00000000L);
__ Mov(w4, 0x00001234L);
__ Mov(w5, 0x12340000L);
__ Mov(w6, 0x12345678L);
__ Mov(w7, (int32_t)0x80000000);
__ Mov(w8, (int32_t)0xFFFF0000);
__ Mov(w9, kWMinInt);
END();
RUN();
CHECK_EQUAL_64(0xFFFFFFFFL, x0);
CHECK_EQUAL_64(0xFFFF1234L, x1);
CHECK_EQUAL_64(0x1234FFFFL, x2);
CHECK_EQUAL_64(0x00000000L, x3);
CHECK_EQUAL_64(0x00001234L, x4);
CHECK_EQUAL_64(0x12340000L, x5);
CHECK_EQUAL_64(0x12345678L, x6);
CHECK_EQUAL_64(0x80000000L, x7);
CHECK_EQUAL_64(0xFFFF0000L, x8);
CHECK_EQUAL_32(kWMinInt, w9);
TEARDOWN();
}
TEST(mov_imm_x) {
INIT_V8();
SETUP();
START();
__ Mov(x0, 0xFFFFFFFFFFFFFFFFL);
__ Mov(x1, 0xFFFFFFFFFFFF1234L);
__ Mov(x2, 0xFFFFFFFF12345678L);
__ Mov(x3, 0xFFFF1234FFFF5678L);
__ Mov(x4, 0x1234FFFFFFFF5678L);
__ Mov(x5, 0x1234FFFF5678FFFFL);
__ Mov(x6, 0x12345678FFFFFFFFL);
__ Mov(x7, 0x1234FFFFFFFFFFFFL);
__ Mov(x8, 0x123456789ABCFFFFL);
__ Mov(x9, 0x12345678FFFF9ABCL);
__ Mov(x10, 0x1234FFFF56789ABCL);
__ Mov(x11, 0xFFFF123456789ABCL);
__ Mov(x12, 0x0000000000000000L);
__ Mov(x13, 0x0000000000001234L);
__ Mov(x14, 0x0000000012345678L);
__ Mov(x15, 0x0000123400005678L);
__ Mov(x18, 0x1234000000005678L);
__ Mov(x19, 0x1234000056780000L);
__ Mov(x20, 0x1234567800000000L);
__ Mov(x21, 0x1234000000000000L);
__ Mov(x22, 0x123456789ABC0000L);
__ Mov(x23, 0x1234567800009ABCL);
__ Mov(x24, 0x1234000056789ABCL);
__ Mov(x25, 0x0000123456789ABCL);
__ Mov(x26, 0x123456789ABCDEF0L);
__ Mov(x27, 0xFFFF000000000001L);
__ Mov(x28, 0x8000FFFF00000000L);
END();
RUN();
CHECK_EQUAL_64(0xFFFFFFFFFFFF1234L, x1);
CHECK_EQUAL_64(0xFFFFFFFF12345678L, x2);
CHECK_EQUAL_64(0xFFFF1234FFFF5678L, x3);
CHECK_EQUAL_64(0x1234FFFFFFFF5678L, x4);
CHECK_EQUAL_64(0x1234FFFF5678FFFFL, x5);
CHECK_EQUAL_64(0x12345678FFFFFFFFL, x6);
CHECK_EQUAL_64(0x1234FFFFFFFFFFFFL, x7);
CHECK_EQUAL_64(0x123456789ABCFFFFL, x8);
CHECK_EQUAL_64(0x12345678FFFF9ABCL, x9);
CHECK_EQUAL_64(0x1234FFFF56789ABCL, x10);
CHECK_EQUAL_64(0xFFFF123456789ABCL, x11);
CHECK_EQUAL_64(0x0000000000000000L, x12);
CHECK_EQUAL_64(0x0000000000001234L, x13);
CHECK_EQUAL_64(0x0000000012345678L, x14);
CHECK_EQUAL_64(0x0000123400005678L, x15);
CHECK_EQUAL_64(0x1234000000005678L, x18);
CHECK_EQUAL_64(0x1234000056780000L, x19);
CHECK_EQUAL_64(0x1234567800000000L, x20);
CHECK_EQUAL_64(0x1234000000000000L, x21);
CHECK_EQUAL_64(0x123456789ABC0000L, x22);
CHECK_EQUAL_64(0x1234567800009ABCL, x23);
CHECK_EQUAL_64(0x1234000056789ABCL, x24);
CHECK_EQUAL_64(0x0000123456789ABCL, x25);
CHECK_EQUAL_64(0x123456789ABCDEF0L, x26);
CHECK_EQUAL_64(0xFFFF000000000001L, x27);
CHECK_EQUAL_64(0x8000FFFF00000000L, x28);
TEARDOWN();
}
TEST(orr) {
INIT_V8();
SETUP();
START();
__ Mov(x0, 0xF0F0);
__ Mov(x1, 0xF00000FF);
__ Orr(x2, x0, Operand(x1));
__ Orr(w3, w0, Operand(w1, LSL, 28));
__ Orr(x4, x0, Operand(x1, LSL, 32));
__ Orr(x5, x0, Operand(x1, LSR, 4));
__ Orr(w6, w0, Operand(w1, ASR, 4));
__ Orr(x7, x0, Operand(x1, ASR, 4));
__ Orr(w8, w0, Operand(w1, ROR, 12));
__ Orr(x9, x0, Operand(x1, ROR, 12));
__ Orr(w10, w0, Operand(0xF));
__ Orr(x11, x0, Operand(0xF0000000F0000000L));
END();
RUN();
CHECK_EQUAL_64(0xF000F0FF, x2);
CHECK_EQUAL_64(0xF000F0F0, x3);
CHECK_EQUAL_64(0xF00000FF0000F0F0L, x4);
CHECK_EQUAL_64(0x0F00F0FF, x5);
CHECK_EQUAL_64(0xFF00F0FF, x6);
CHECK_EQUAL_64(0x0F00F0FF, x7);
CHECK_EQUAL_64(0x0FFFF0F0, x8);
CHECK_EQUAL_64(0x0FF00000000FF0F0L, x9);
CHECK_EQUAL_64(0xF0FF, x10);
CHECK_EQUAL_64(0xF0000000F000F0F0L, x11);
TEARDOWN();
}
TEST(orr_extend) {
INIT_V8();
SETUP();
START();
__ Mov(x0, 1);
__ Mov(x1, 0x8000000080008080UL);
__ Orr(w6, w0, Operand(w1, UXTB));
__ Orr(x7, x0, Operand(x1, UXTH, 1));
__ Orr(w8, w0, Operand(w1, UXTW, 2));
__ Orr(x9, x0, Operand(x1, UXTX, 3));
__ Orr(w10, w0, Operand(w1, SXTB));
__ Orr(x11, x0, Operand(x1, SXTH, 1));
__ Orr(x12, x0, Operand(x1, SXTW, 2));
__ Orr(x13, x0, Operand(x1, SXTX, 3));
END();
RUN();
CHECK_EQUAL_64(0x00000081, x6);
CHECK_EQUAL_64(0x00010101, x7);
CHECK_EQUAL_64(0x00020201, x8);
CHECK_EQUAL_64(0x0000000400040401UL, x9);
CHECK_EQUAL_64(0x00000000FFFFFF81UL, x10);
CHECK_EQUAL_64(0xFFFFFFFFFFFF0101UL, x11);
CHECK_EQUAL_64(0xFFFFFFFE00020201UL, x12);
CHECK_EQUAL_64(0x0000000400040401UL, x13);
TEARDOWN();
}
TEST(bitwise_wide_imm) {
INIT_V8();
SETUP();
START();
__ Mov(x0, 0);
__ Mov(x1, 0xF0F0F0F0F0F0F0F0UL);
__ Orr(x10, x0, Operand(0x1234567890ABCDEFUL));
__ Orr(w11, w1, Operand(0x90ABCDEF));
__ Orr(w12, w0, kWMinInt);
__ Eor(w13, w0, kWMinInt);
END();
RUN();
CHECK_EQUAL_64(0, x0);
CHECK_EQUAL_64(0xF0F0F0F0F0F0F0F0UL, x1);
CHECK_EQUAL_64(0x1234567890ABCDEFUL, x10);
CHECK_EQUAL_64(0xF0FBFDFFUL, x11);
CHECK_EQUAL_32(kWMinInt, w12);
CHECK_EQUAL_32(kWMinInt, w13);
TEARDOWN();
}
TEST(orn) {
INIT_V8();
SETUP();
START();
__ Mov(x0, 0xF0F0);
__ Mov(x1, 0xF00000FF);
__ Orn(x2, x0, Operand(x1));
__ Orn(w3, w0, Operand(w1, LSL, 4));
__ Orn(x4, x0, Operand(x1, LSL, 4));
__ Orn(x5, x0, Operand(x1, LSR, 1));
__ Orn(w6, w0, Operand(w1, ASR, 1));
__ Orn(x7, x0, Operand(x1, ASR, 1));
__ Orn(w8, w0, Operand(w1, ROR, 16));
__ Orn(x9, x0, Operand(x1, ROR, 16));
__ Orn(w10, w0, Operand(0xFFFF));
__ Orn(x11, x0, Operand(0xFFFF0000FFFFL));
END();
RUN();
CHECK_EQUAL_64(0xFFFFFFFF0FFFFFF0L, x2);
CHECK_EQUAL_64(0xFFFFF0FF, x3);
CHECK_EQUAL_64(0xFFFFFFF0FFFFF0FFL, x4);
CHECK_EQUAL_64(0xFFFFFFFF87FFFFF0L, x5);
CHECK_EQUAL_64(0x07FFFFF0, x6);
CHECK_EQUAL_64(0xFFFFFFFF87FFFFF0L, x7);
CHECK_EQUAL_64(0xFF00FFFF, x8);
CHECK_EQUAL_64(0xFF00FFFFFFFFFFFFL, x9);
CHECK_EQUAL_64(0xFFFFF0F0, x10);
CHECK_EQUAL_64(0xFFFF0000FFFFF0F0L, x11);
TEARDOWN();
}
TEST(orn_extend) {
INIT_V8();
SETUP();
START();
__ Mov(x0, 1);
__ Mov(x1, 0x8000000080008081UL);
__ Orn(w6, w0, Operand(w1, UXTB));
__ Orn(x7, x0, Operand(x1, UXTH, 1));
__ Orn(w8, w0, Operand(w1, UXTW, 2));
__ Orn(x9, x0, Operand(x1, UXTX, 3));
__ Orn(w10, w0, Operand(w1, SXTB));
__ Orn(x11, x0, Operand(x1, SXTH, 1));
__ Orn(x12, x0, Operand(x1, SXTW, 2));
__ Orn(x13, x0, Operand(x1, SXTX, 3));
END();
RUN();
CHECK_EQUAL_64(0xFFFFFF7F, x6);
CHECK_EQUAL_64(0xFFFFFFFFFFFEFEFDUL, x7);
CHECK_EQUAL_64(0xFFFDFDFB, x8);
CHECK_EQUAL_64(0xFFFFFFFBFFFBFBF7UL, x9);
CHECK_EQUAL_64(0x0000007F, x10);
CHECK_EQUAL_64(0x0000FEFD, x11);
CHECK_EQUAL_64(0x00000001FFFDFDFBUL, x12);
CHECK_EQUAL_64(0xFFFFFFFBFFFBFBF7UL, x13);
TEARDOWN();
}
TEST(and_) {
INIT_V8();
SETUP();
START();
__ Mov(x0, 0xFFF0);
__ Mov(x1, 0xF00000FF);
__ And(x2, x0, Operand(x1));
__ And(w3, w0, Operand(w1, LSL, 4));
__ And(x4, x0, Operand(x1, LSL, 4));
__ And(x5, x0, Operand(x1, LSR, 1));
__ And(w6, w0, Operand(w1, ASR, 20));
__ And(x7, x0, Operand(x1, ASR, 20));
__ And(w8, w0, Operand(w1, ROR, 28));
__ And(x9, x0, Operand(x1, ROR, 28));
__ And(w10, w0, Operand(0xFF00));
__ And(x11, x0, Operand(0xFF));
END();
RUN();
CHECK_EQUAL_64(0x000000F0, x2);
CHECK_EQUAL_64(0x00000FF0, x3);
CHECK_EQUAL_64(0x00000FF0, x4);
CHECK_EQUAL_64(0x00000070, x5);
CHECK_EQUAL_64(0x0000FF00, x6);
CHECK_EQUAL_64(0x00000F00, x7);
CHECK_EQUAL_64(0x00000FF0, x8);
CHECK_EQUAL_64(0x00000000, x9);
CHECK_EQUAL_64(0x0000FF00, x10);
CHECK_EQUAL_64(0x000000F0, x11);
TEARDOWN();
}
TEST(and_extend) {
INIT_V8();
SETUP();
START();
__ Mov(x0, 0xFFFFFFFFFFFFFFFFUL);
__ Mov(x1, 0x8000000080008081UL);
__ And(w6, w0, Operand(w1, UXTB));
__ And(x7, x0, Operand(x1, UXTH, 1));
__ And(w8, w0, Operand(w1, UXTW, 2));
__ And(x9, x0, Operand(x1, UXTX, 3));
__ And(w10, w0, Operand(w1, SXTB));
__ And(x11, x0, Operand(x1, SXTH, 1));
__ And(x12, x0, Operand(x1, SXTW, 2));
__ And(x13, x0, Operand(x1, SXTX, 3));
END();
RUN();
CHECK_EQUAL_64(0x00000081, x6);
CHECK_EQUAL_64(0x00010102, x7);
CHECK_EQUAL_64(0x00020204, x8);
CHECK_EQUAL_64(0x0000000400040408UL, x9);
CHECK_EQUAL_64(0xFFFFFF81, x10);
CHECK_EQUAL_64(0xFFFFFFFFFFFF0102UL, x11);
CHECK_EQUAL_64(0xFFFFFFFE00020204UL, x12);
CHECK_EQUAL_64(0x0000000400040408UL, x13);
TEARDOWN();
}
TEST(ands) {
INIT_V8();
SETUP();
START();
__ Mov(x1, 0xF00000FF);
__ Ands(w0, w1, Operand(w1));
END();
RUN();
CHECK_EQUAL_NZCV(NFlag);
CHECK_EQUAL_64(0xF00000FF, x0);
START();
__ Mov(x0, 0xFFF0);
__ Mov(x1, 0xF00000FF);
__ Ands(w0, w0, Operand(w1, LSR, 4));
END();
RUN();
CHECK_EQUAL_NZCV(ZFlag);
CHECK_EQUAL_64(0x00000000, x0);
START();
__ Mov(x0, 0x8000000000000000L);
__ Mov(x1, 0x00000001);
__ Ands(x0, x0, Operand(x1, ROR, 1));
END();
RUN();
CHECK_EQUAL_NZCV(NFlag);
CHECK_EQUAL_64(0x8000000000000000L, x0);
START();
__ Mov(x0, 0xFFF0);
__ Ands(w0, w0, Operand(0xF));
END();
RUN();
CHECK_EQUAL_NZCV(ZFlag);
CHECK_EQUAL_64(0x00000000, x0);
START();
__ Mov(x0, 0xFF000000);
__ Ands(w0, w0, Operand(0x80000000));
END();
RUN();
CHECK_EQUAL_NZCV(NFlag);
CHECK_EQUAL_64(0x80000000, x0);
TEARDOWN();
}
TEST(bic) {
INIT_V8();
SETUP();
START();
__ Mov(x0, 0xFFF0);
__ Mov(x1, 0xF00000FF);
__ Bic(x2, x0, Operand(x1));
__ Bic(w3, w0, Operand(w1, LSL, 4));
__ Bic(x4, x0, Operand(x1, LSL, 4));
__ Bic(x5, x0, Operand(x1, LSR, 1));
__ Bic(w6, w0, Operand(w1, ASR, 20));
__ Bic(x7, x0, Operand(x1, ASR, 20));
__ Bic(w8, w0, Operand(w1, ROR, 28));
__ Bic(x9, x0, Operand(x1, ROR, 24));
__ Bic(x10, x0, Operand(0x1F));
__ Bic(x11, x0, Operand(0x100));
// Test bic into csp when the constant cannot be encoded in the immediate
// field.
// Use x20 to preserve csp. We check for the result via x21 because the
// test infrastructure requires that csp be restored to its original value.
__ Mov(x20, csp);
__ Mov(x0, 0xFFFFFF);
__ Bic(csp, x0, Operand(0xABCDEF));
__ Mov(x21, csp);
__ Mov(csp, x20);
END();
RUN();
CHECK_EQUAL_64(0x0000FF00, x2);
CHECK_EQUAL_64(0x0000F000, x3);
CHECK_EQUAL_64(0x0000F000, x4);
CHECK_EQUAL_64(0x0000FF80, x5);
CHECK_EQUAL_64(0x000000F0, x6);
CHECK_EQUAL_64(0x0000F0F0, x7);
CHECK_EQUAL_64(0x0000F000, x8);
CHECK_EQUAL_64(0x0000FF00, x9);
CHECK_EQUAL_64(0x0000FFE0, x10);
CHECK_EQUAL_64(0x0000FEF0, x11);
CHECK_EQUAL_64(0x543210, x21);
TEARDOWN();
}
TEST(bic_extend) {
INIT_V8();
SETUP();
START();
__ Mov(x0, 0xFFFFFFFFFFFFFFFFUL);
__ Mov(x1, 0x8000000080008081UL);
__ Bic(w6, w0, Operand(w1, UXTB));
__ Bic(x7, x0, Operand(x1, UXTH, 1));
__ Bic(w8, w0, Operand(w1, UXTW, 2));
__ Bic(x9, x0, Operand(x1, UXTX, 3));
__ Bic(w10, w0, Operand(w1, SXTB));
__ Bic(x11, x0, Operand(x1, SXTH, 1));
__ Bic(x12, x0, Operand(x1, SXTW, 2));
__ Bic(x13, x0, Operand(x1, SXTX, 3));
END();
RUN();
CHECK_EQUAL_64(0xFFFFFF7E, x6);
CHECK_EQUAL_64(0xFFFFFFFFFFFEFEFDUL, x7);
CHECK_EQUAL_64(0xFFFDFDFB, x8);
CHECK_EQUAL_64(0xFFFFFFFBFFFBFBF7UL, x9);
CHECK_EQUAL_64(0x0000007E, x10);
CHECK_EQUAL_64(0x0000FEFD, x11);
CHECK_EQUAL_64(0x00000001FFFDFDFBUL, x12);
CHECK_EQUAL_64(0xFFFFFFFBFFFBFBF7UL, x13);
TEARDOWN();
}
TEST(bics) {
INIT_V8();
SETUP();
START();
__ Mov(x1, 0xFFFF);
__ Bics(w0, w1, Operand(w1));
END();
RUN();
CHECK_EQUAL_NZCV(ZFlag);
CHECK_EQUAL_64(0x00000000, x0);
START();
__ Mov(x0, 0xFFFFFFFF);
__ Bics(w0, w0, Operand(w0, LSR, 1));
END();
RUN();
CHECK_EQUAL_NZCV(NFlag);
CHECK_EQUAL_64(0x80000000, x0);
START();
__ Mov(x0, 0x8000000000000000L);
__ Mov(x1, 0x00000001);
__ Bics(x0, x0, Operand(x1, ROR, 1));
END();
RUN();
CHECK_EQUAL_NZCV(ZFlag);
CHECK_EQUAL_64(0x00000000, x0);
START();
__ Mov(x0, 0xFFFFFFFFFFFFFFFFL);
__ Bics(x0, x0, Operand(0x7FFFFFFFFFFFFFFFL));
END();
RUN();
CHECK_EQUAL_NZCV(NFlag);
CHECK_EQUAL_64(0x8000000000000000L, x0);
START();
__ Mov(w0, 0xFFFF0000);
__ Bics(w0, w0, Operand(0xFFFFFFF0));
END();
RUN();
CHECK_EQUAL_NZCV(ZFlag);
CHECK_EQUAL_64(0x00000000, x0);
TEARDOWN();
}
TEST(eor) {
INIT_V8();
SETUP();
START();
__ Mov(x0, 0xFFF0);
__ Mov(x1, 0xF00000FF);
__ Eor(x2, x0, Operand(x1));
__ Eor(w3, w0, Operand(w1, LSL, 4));
__ Eor(x4, x0, Operand(x1, LSL, 4));
__ Eor(x5, x0, Operand(x1, LSR, 1));
__ Eor(w6, w0, Operand(w1, ASR, 20));
__ Eor(x7, x0, Operand(x1, ASR, 20));
__ Eor(w8, w0, Operand(w1, ROR, 28));
__ Eor(x9, x0, Operand(x1, ROR, 28));
__ Eor(w10, w0, Operand(0xFF00FF00));
__ Eor(x11, x0, Operand(0xFF00FF00FF00FF00L));
END();
RUN();
CHECK_EQUAL_64(0xF000FF0F, x2);
CHECK_EQUAL_64(0x0000F000, x3);
CHECK_EQUAL_64(0x0000000F0000F000L, x4);
CHECK_EQUAL_64(0x7800FF8F, x5);
CHECK_EQUAL_64(0xFFFF00F0, x6);
CHECK_EQUAL_64(0x0000F0F0, x7);
CHECK_EQUAL_64(0x0000F00F, x8);
CHECK_EQUAL_64(0x00000FF00000FFFFL, x9);
CHECK_EQUAL_64(0xFF0000F0, x10);
CHECK_EQUAL_64(0xFF00FF00FF0000F0L, x11);
TEARDOWN();
}
TEST(eor_extend) {
INIT_V8();
SETUP();
START();
__ Mov(x0, 0x1111111111111111UL);
__ Mov(x1, 0x8000000080008081UL);
__ Eor(w6, w0, Operand(w1, UXTB));
__ Eor(x7, x0, Operand(x1, UXTH, 1));
__ Eor(w8, w0, Operand(w1, UXTW, 2));
__ Eor(x9, x0, Operand(x1, UXTX, 3));
__ Eor(w10, w0, Operand(w1, SXTB));
__ Eor(x11, x0, Operand(x1, SXTH, 1));
__ Eor(x12, x0, Operand(x1, SXTW, 2));
__ Eor(x13, x0, Operand(x1, SXTX, 3));
END();
RUN();
CHECK_EQUAL_64(0x11111190, x6);
CHECK_EQUAL_64(0x1111111111101013UL, x7);
CHECK_EQUAL_64(0x11131315, x8);
CHECK_EQUAL_64(0x1111111511151519UL, x9);
CHECK_EQUAL_64(0xEEEEEE90, x10);
CHECK_EQUAL_64(0xEEEEEEEEEEEE1013UL, x11);
CHECK_EQUAL_64(0xEEEEEEEF11131315UL, x12);
CHECK_EQUAL_64(0x1111111511151519UL, x13);
TEARDOWN();
}
TEST(eon) {
INIT_V8();
SETUP();
START();
__ Mov(x0, 0xFFF0);
__ Mov(x1, 0xF00000FF);
__ Eon(x2, x0, Operand(x1));
__ Eon(w3, w0, Operand(w1, LSL, 4));
__ Eon(x4, x0, Operand(x1, LSL, 4));
__ Eon(x5, x0, Operand(x1, LSR, 1));
__ Eon(w6, w0, Operand(w1, ASR, 20));
__ Eon(x7, x0, Operand(x1, ASR, 20));
__ Eon(w8, w0, Operand(w1, ROR, 28));
__ Eon(x9, x0, Operand(x1, ROR, 28));
__ Eon(w10, w0, Operand(0x03C003C0));
__ Eon(x11, x0, Operand(0x0000100000001000L));
END();
RUN();
CHECK_EQUAL_64(0xFFFFFFFF0FFF00F0L, x2);
CHECK_EQUAL_64(0xFFFF0FFF, x3);
CHECK_EQUAL_64(0xFFFFFFF0FFFF0FFFL, x4);
CHECK_EQUAL_64(0xFFFFFFFF87FF0070L, x5);
CHECK_EQUAL_64(0x0000FF0F, x6);
CHECK_EQUAL_64(0xFFFFFFFFFFFF0F0FL, x7);
CHECK_EQUAL_64(0xFFFF0FF0, x8);
CHECK_EQUAL_64(0xFFFFF00FFFFF0000L, x9);
CHECK_EQUAL_64(0xFC3F03CF, x10);
CHECK_EQUAL_64(0xFFFFEFFFFFFF100FL, x11);
TEARDOWN();
}
TEST(eon_extend) {
INIT_V8();
SETUP();
START();
__ Mov(x0, 0x1111111111111111UL);
__ Mov(x1, 0x8000000080008081UL);
__ Eon(w6, w0, Operand(w1, UXTB));
__ Eon(x7, x0, Operand(x1, UXTH, 1));
__ Eon(w8, w0, Operand(w1, UXTW, 2));
__ Eon(x9, x0, Operand(x1, UXTX, 3));
__ Eon(w10, w0, Operand(w1, SXTB));
__ Eon(x11, x0, Operand(x1, SXTH, 1));
__ Eon(x12, x0, Operand(x1, SXTW, 2));
__ Eon(x13, x0, Operand(x1, SXTX, 3));
END();
RUN();
CHECK_EQUAL_64(0xEEEEEE6F, x6);
CHECK_EQUAL_64(0xEEEEEEEEEEEFEFECUL, x7);
CHECK_EQUAL_64(0xEEECECEA, x8);
CHECK_EQUAL_64(0xEEEEEEEAEEEAEAE6UL, x9);
CHECK_EQUAL_64(0x1111116F, x10);
CHECK_EQUAL_64(0x111111111111EFECUL, x11);
CHECK_EQUAL_64(0x11111110EEECECEAUL, x12);
CHECK_EQUAL_64(0xEEEEEEEAEEEAEAE6UL, x13);
TEARDOWN();
}
TEST(mul) {
INIT_V8();
SETUP();
START();
__ Mov(x16, 0);
__ Mov(x17, 1);
__ Mov(x18, 0xFFFFFFFF);
__ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Mul(w0, w16, w16);
__ Mul(w1, w16, w17);
__ Mul(w2, w17, w18);
__ Mul(w3, w18, w19);
__ Mul(x4, x16, x16);
__ Mul(x5, x17, x18);
__ Mul(x6, x18, x19);
__ Mul(x7, x19, x19);
__ Smull(x8, w17, w18);
__ Smull(x9, w18, w18);
__ Smull(x10, w19, w19);
__ Mneg(w11, w16, w16);
__ Mneg(w12, w16, w17);
__ Mneg(w13, w17, w18);
__ Mneg(w14, w18, w19);
__ Mneg(x20, x16, x16);
__ Mneg(x21, x17, x18);
__ Mneg(x22, x18, x19);
__ Mneg(x23, x19, x19);
END();
RUN();
CHECK_EQUAL_64(0, x0);
CHECK_EQUAL_64(0, x1);
CHECK_EQUAL_64(0xFFFFFFFF, x2);
CHECK_EQUAL_64(1, x3);
CHECK_EQUAL_64(0, x4);
CHECK_EQUAL_64(0xFFFFFFFF, x5);
CHECK_EQUAL_64(0xFFFFFFFF00000001UL, x6);
CHECK_EQUAL_64(1, x7);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x8);
CHECK_EQUAL_64(1, x9);
CHECK_EQUAL_64(1, x10);
CHECK_EQUAL_64(0, x11);
CHECK_EQUAL_64(0, x12);
CHECK_EQUAL_64(1, x13);
CHECK_EQUAL_64(0xFFFFFFFF, x14);
CHECK_EQUAL_64(0, x20);
CHECK_EQUAL_64(0xFFFFFFFF00000001UL, x21);
CHECK_EQUAL_64(0xFFFFFFFF, x22);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x23);
TEARDOWN();
}
static void SmullHelper(int64_t expected, int64_t a, int64_t b) {
SETUP();
START();
__ Mov(w0, a);
__ Mov(w1, b);
__ Smull(x2, w0, w1);
END();
RUN();
CHECK_EQUAL_64(expected, x2);
TEARDOWN();
}
TEST(smull) {
INIT_V8();
SmullHelper(0, 0, 0);
SmullHelper(1, 1, 1);
SmullHelper(-1, -1, 1);
SmullHelper(1, -1, -1);
SmullHelper(0xFFFFFFFF80000000, 0x80000000, 1);
SmullHelper(0x0000000080000000, 0x00010000, 0x00008000);
}
TEST(madd) {
INIT_V8();
SETUP();
START();
__ Mov(x16, 0);
__ Mov(x17, 1);
__ Mov(x18, 0xFFFFFFFF);
__ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Madd(w0, w16, w16, w16);
__ Madd(w1, w16, w16, w17);
__ Madd(w2, w16, w16, w18);
__ Madd(w3, w16, w16, w19);
__ Madd(w4, w16, w17, w17);
__ Madd(w5, w17, w17, w18);
__ Madd(w6, w17, w17, w19);
__ Madd(w7, w17, w18, w16);
__ Madd(w8, w17, w18, w18);
__ Madd(w9, w18, w18, w17);
__ Madd(w10, w18, w19, w18);
__ Madd(w11, w19, w19, w19);
__ Madd(x12, x16, x16, x16);
__ Madd(x13, x16, x16, x17);
__ Madd(x14, x16, x16, x18);
__ Madd(x15, x16, x16, x19);
__ Madd(x20, x16, x17, x17);
__ Madd(x21, x17, x17, x18);
__ Madd(x22, x17, x17, x19);
__ Madd(x23, x17, x18, x16);
__ Madd(x24, x17, x18, x18);
__ Madd(x25, x18, x18, x17);
__ Madd(x26, x18, x19, x18);
__ Madd(x27, x19, x19, x19);
END();
RUN();
CHECK_EQUAL_64(0, x0);
CHECK_EQUAL_64(1, x1);
CHECK_EQUAL_64(0xFFFFFFFF, x2);
CHECK_EQUAL_64(0xFFFFFFFF, x3);
CHECK_EQUAL_64(1, x4);
CHECK_EQUAL_64(0, x5);
CHECK_EQUAL_64(0, x6);
CHECK_EQUAL_64(0xFFFFFFFF, x7);
CHECK_EQUAL_64(0xFFFFFFFE, x8);
CHECK_EQUAL_64(2, x9);
CHECK_EQUAL_64(0, x10);
CHECK_EQUAL_64(0, x11);
CHECK_EQUAL_64(0, x12);
CHECK_EQUAL_64(1, x13);
CHECK_EQUAL_64(0xFFFFFFFF, x14);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFF, x15);
CHECK_EQUAL_64(1, x20);
CHECK_EQUAL_64(0x100000000UL, x21);
CHECK_EQUAL_64(0, x22);
CHECK_EQUAL_64(0xFFFFFFFF, x23);
CHECK_EQUAL_64(0x1FFFFFFFE, x24);
CHECK_EQUAL_64(0xFFFFFFFE00000002UL, x25);
CHECK_EQUAL_64(0, x26);
CHECK_EQUAL_64(0, x27);
TEARDOWN();
}
TEST(msub) {
INIT_V8();
SETUP();
START();
__ Mov(x16, 0);
__ Mov(x17, 1);
__ Mov(x18, 0xFFFFFFFF);
__ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Msub(w0, w16, w16, w16);
__ Msub(w1, w16, w16, w17);
__ Msub(w2, w16, w16, w18);
__ Msub(w3, w16, w16, w19);
__ Msub(w4, w16, w17, w17);
__ Msub(w5, w17, w17, w18);
__ Msub(w6, w17, w17, w19);
__ Msub(w7, w17, w18, w16);
__ Msub(w8, w17, w18, w18);
__ Msub(w9, w18, w18, w17);
__ Msub(w10, w18, w19, w18);
__ Msub(w11, w19, w19, w19);
__ Msub(x12, x16, x16, x16);
__ Msub(x13, x16, x16, x17);
__ Msub(x14, x16, x16, x18);
__ Msub(x15, x16, x16, x19);
__ Msub(x20, x16, x17, x17);
__ Msub(x21, x17, x17, x18);
__ Msub(x22, x17, x17, x19);
__ Msub(x23, x17, x18, x16);
__ Msub(x24, x17, x18, x18);
__ Msub(x25, x18, x18, x17);
__ Msub(x26, x18, x19, x18);
__ Msub(x27, x19, x19, x19);
END();
RUN();
CHECK_EQUAL_64(0, x0);
CHECK_EQUAL_64(1, x1);
CHECK_EQUAL_64(0xFFFFFFFF, x2);
CHECK_EQUAL_64(0xFFFFFFFF, x3);
CHECK_EQUAL_64(1, x4);
CHECK_EQUAL_64(0xFFFFFFFE, x5);
CHECK_EQUAL_64(0xFFFFFFFE, x6);
CHECK_EQUAL_64(1, x7);
CHECK_EQUAL_64(0, x8);
CHECK_EQUAL_64(0, x9);
CHECK_EQUAL_64(0xFFFFFFFE, x10);
CHECK_EQUAL_64(0xFFFFFFFE, x11);
CHECK_EQUAL_64(0, x12);
CHECK_EQUAL_64(1, x13);
CHECK_EQUAL_64(0xFFFFFFFF, x14);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x15);
CHECK_EQUAL_64(1, x20);
CHECK_EQUAL_64(0xFFFFFFFEUL, x21);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFEUL, x22);
CHECK_EQUAL_64(0xFFFFFFFF00000001UL, x23);
CHECK_EQUAL_64(0, x24);
CHECK_EQUAL_64(0x200000000UL, x25);
CHECK_EQUAL_64(0x1FFFFFFFEUL, x26);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFEUL, x27);
TEARDOWN();
}
TEST(smulh) {
INIT_V8();
SETUP();
START();
__ Mov(x20, 0);
__ Mov(x21, 1);
__ Mov(x22, 0x0000000100000000L);
__ Mov(x23, 0x12345678);
__ Mov(x24, 0x0123456789ABCDEFL);
__ Mov(x25, 0x0000000200000000L);
__ Mov(x26, 0x8000000000000000UL);
__ Mov(x27, 0xFFFFFFFFFFFFFFFFUL);
__ Mov(x28, 0x5555555555555555UL);
__ Mov(x29, 0xAAAAAAAAAAAAAAAAUL);
__ Smulh(x0, x20, x24);
__ Smulh(x1, x21, x24);
__ Smulh(x2, x22, x23);
__ Smulh(x3, x22, x24);
__ Smulh(x4, x24, x25);
__ Smulh(x5, x23, x27);
__ Smulh(x6, x26, x26);
__ Smulh(x7, x26, x27);
__ Smulh(x8, x27, x27);
__ Smulh(x9, x28, x28);
__ Smulh(x10, x28, x29);
__ Smulh(x11, x29, x29);
END();
RUN();
CHECK_EQUAL_64(0, x0);
CHECK_EQUAL_64(0, x1);
CHECK_EQUAL_64(0, x2);
CHECK_EQUAL_64(0x01234567, x3);
CHECK_EQUAL_64(0x02468ACF, x4);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x5);
CHECK_EQUAL_64(0x4000000000000000UL, x6);
CHECK_EQUAL_64(0, x7);
CHECK_EQUAL_64(0, x8);
CHECK_EQUAL_64(0x1C71C71C71C71C71UL, x9);
CHECK_EQUAL_64(0xE38E38E38E38E38EUL, x10);
CHECK_EQUAL_64(0x1C71C71C71C71C72UL, x11);
TEARDOWN();
}
TEST(smaddl_umaddl) {
INIT_V8();
SETUP();
START();
__ Mov(x17, 1);
__ Mov(x18, 0xFFFFFFFF);
__ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Mov(x20, 4);
__ Mov(x21, 0x200000000UL);
__ Smaddl(x9, w17, w18, x20);
__ Smaddl(x10, w18, w18, x20);
__ Smaddl(x11, w19, w19, x20);
__ Smaddl(x12, w19, w19, x21);
__ Umaddl(x13, w17, w18, x20);
__ Umaddl(x14, w18, w18, x20);
__ Umaddl(x15, w19, w19, x20);
__ Umaddl(x22, w19, w19, x21);
END();
RUN();
CHECK_EQUAL_64(3, x9);
CHECK_EQUAL_64(5, x10);
CHECK_EQUAL_64(5, x11);
CHECK_EQUAL_64(0x200000001UL, x12);
CHECK_EQUAL_64(0x100000003UL, x13);
CHECK_EQUAL_64(0xFFFFFFFE00000005UL, x14);
CHECK_EQUAL_64(0xFFFFFFFE00000005UL, x15);
CHECK_EQUAL_64(0x1, x22);
TEARDOWN();
}
TEST(smsubl_umsubl) {
INIT_V8();
SETUP();
START();
__ Mov(x17, 1);
__ Mov(x18, 0xFFFFFFFF);
__ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Mov(x20, 4);
__ Mov(x21, 0x200000000UL);
__ Smsubl(x9, w17, w18, x20);
__ Smsubl(x10, w18, w18, x20);
__ Smsubl(x11, w19, w19, x20);
__ Smsubl(x12, w19, w19, x21);
__ Umsubl(x13, w17, w18, x20);
__ Umsubl(x14, w18, w18, x20);
__ Umsubl(x15, w19, w19, x20);
__ Umsubl(x22, w19, w19, x21);
END();
RUN();
CHECK_EQUAL_64(5, x9);
CHECK_EQUAL_64(3, x10);
CHECK_EQUAL_64(3, x11);
CHECK_EQUAL_64(0x1FFFFFFFFUL, x12);
CHECK_EQUAL_64(0xFFFFFFFF00000005UL, x13);
CHECK_EQUAL_64(0x200000003UL, x14);
CHECK_EQUAL_64(0x200000003UL, x15);
CHECK_EQUAL_64(0x3FFFFFFFFUL, x22);
TEARDOWN();
}
TEST(div) {
INIT_V8();
SETUP();
START();
__ Mov(x16, 1);
__ Mov(x17, 0xFFFFFFFF);
__ Mov(x18, 0xFFFFFFFFFFFFFFFFUL);
__ Mov(x19, 0x80000000);
__ Mov(x20, 0x8000000000000000UL);
__ Mov(x21, 2);
__ Udiv(w0, w16, w16);
__ Udiv(w1, w17, w16);
__ Sdiv(w2, w16, w16);
__ Sdiv(w3, w16, w17);
__ Sdiv(w4, w17, w18);
__ Udiv(x5, x16, x16);
__ Udiv(x6, x17, x18);
__ Sdiv(x7, x16, x16);
__ Sdiv(x8, x16, x17);
__ Sdiv(x9, x17, x18);
__ Udiv(w10, w19, w21);
__ Sdiv(w11, w19, w21);
__ Udiv(x12, x19, x21);
__ Sdiv(x13, x19, x21);
__ Udiv(x14, x20, x21);
__ Sdiv(x15, x20, x21);
__ Udiv(w22, w19, w17);
__ Sdiv(w23, w19, w17);
__ Udiv(x24, x20, x18);
__ Sdiv(x25, x20, x18);
__ Udiv(x26, x16, x21);
__ Sdiv(x27, x16, x21);
__ Udiv(x28, x18, x21);
__ Sdiv(x29, x18, x21);
__ Mov(x17, 0);
__ Udiv(w18, w16, w17);
__ Sdiv(w19, w16, w17);
__ Udiv(x20, x16, x17);
__ Sdiv(x21, x16, x17);
END();
RUN();
CHECK_EQUAL_64(1, x0);
CHECK_EQUAL_64(0xFFFFFFFF, x1);
CHECK_EQUAL_64(1, x2);
CHECK_EQUAL_64(0xFFFFFFFF, x3);
CHECK_EQUAL_64(1, x4);
CHECK_EQUAL_64(1, x5);
CHECK_EQUAL_64(0, x6);
CHECK_EQUAL_64(1, x7);
CHECK_EQUAL_64(0, x8);
CHECK_EQUAL_64(0xFFFFFFFF00000001UL, x9);
CHECK_EQUAL_64(0x40000000, x10);
CHECK_EQUAL_64(0xC0000000, x11);
CHECK_EQUAL_64(0x40000000, x12);
CHECK_EQUAL_64(0x40000000, x13);
CHECK_EQUAL_64(0x4000000000000000UL, x14);
CHECK_EQUAL_64(0xC000000000000000UL, x15);
CHECK_EQUAL_64(0, x22);
CHECK_EQUAL_64(0x80000000, x23);
CHECK_EQUAL_64(0, x24);
CHECK_EQUAL_64(0x8000000000000000UL, x25);
CHECK_EQUAL_64(0, x26);
CHECK_EQUAL_64(0, x27);
CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x28);
CHECK_EQUAL_64(0, x29);
CHECK_EQUAL_64(0, x18);
CHECK_EQUAL_64(0, x19);
CHECK_EQUAL_64(0, x20);
CHECK_EQUAL_64(0, x21);
TEARDOWN();
}
TEST(rbit_rev) {
INIT_V8();
SETUP();
START();
__ Mov(x24, 0xFEDCBA9876543210UL);
__ Rbit(w0, w24);
__ Rbit(x1, x24);
__ Rev16(w2, w24);
__ Rev16(x3, x24);
__ Rev(w4, w24);
__ Rev32(x5, x24);
__ Rev(x6, x24);
END();
RUN();
CHECK_EQUAL_64(0x084C2A6E, x0);
CHECK_EQUAL_64(0x084C2A6E195D3B7FUL, x1);
CHECK_EQUAL_64(0x54761032, x2);
CHECK_EQUAL_64(0xDCFE98BA54761032UL, x3);
CHECK_EQUAL_64(0x10325476, x4);
CHECK_EQUAL_64(0x98BADCFE10325476UL, x5);
CHECK_EQUAL_64(0x1032547698BADCFEUL, x6);
TEARDOWN();
}
TEST(clz_cls) {
INIT_V8();
SETUP();
START();
__ Mov(x24, 0x0008000000800000UL);
__ Mov(x25, 0xFF800000FFF80000UL);
__ Mov(x26, 0);
__ Clz(w0, w24);
__ Clz(x1, x24);
__ Clz(w2, w25);
__ Clz(x3, x25);
__ Clz(w4, w26);
__ Clz(x5, x26);
__ Cls(w6, w24);
__ Cls(x7, x24);
__ Cls(w8, w25);
__ Cls(x9, x25);
__ Cls(w10, w26);
__ Cls(x11, x26);
END();
RUN();
CHECK_EQUAL_64(8, x0);
CHECK_EQUAL_64(12, x1);
CHECK_EQUAL_64(0, x2);
CHECK_EQUAL_64(0, x3);
CHECK_EQUAL_64(32, x4);
CHECK_EQUAL_64(64, x5);
CHECK_EQUAL_64(7, x6);
CHECK_EQUAL_64(11, x7);
CHECK_EQUAL_64(12, x8);
CHECK_EQUAL_64(8, x9);
CHECK_EQUAL_64(31, x10);
CHECK_EQUAL_64(63, x11);
TEARDOWN();
}
TEST(label) {
INIT_V8();
SETUP();
Label label_1, label_2, label_3, label_4;
START();
__ Mov(x0, 0x1);
__ Mov(x1, 0x0);
__ Mov(x22, lr); // Save lr.
__ B(&label_1);
__ B(&label_1);
__ B(&label_1); // Multiple branches to the same label.
__ Mov(x0, 0x0);
__ Bind(&label_2);
__ B(&label_3); // Forward branch.
__ Mov(x0, 0x0);
__ Bind(&label_1);
__ B(&label_2); // Backward branch.
__ Mov(x0, 0x0);
__ Bind(&label_3);
__ Bl(&label_4);
END();
__ Bind(&label_4);
__ Mov(x1, 0x1);
__ Mov(lr, x22);
END();
RUN();
CHECK_EQUAL_64(0x1, x0);
CHECK_EQUAL_64(0x1, x1);
TEARDOWN();
}
TEST(branch_at_start) {
INIT_V8();
SETUP();
Label good, exit;
// Test that branches can exist at the start of the buffer. (This is a
// boundary condition in the label-handling code.) To achieve this, we have
// to work around the code generated by START.
RESET();
__ B(&good);
START_AFTER_RESET();
__ Mov(x0, 0x0);
END();
__ Bind(&exit);
START_AFTER_RESET();
__ Mov(x0, 0x1);
END();
__ Bind(&good);
__ B(&exit);
END();
RUN();
CHECK_EQUAL_64(0x1, x0);
TEARDOWN();
}
TEST(adr) {
INIT_V8();
SETUP();
Label label_1, label_2, label_3, label_4;
START();
__ Mov(x0, 0x0); // Set to non-zero to indicate failure.
__ Adr(x1, &label_3); // Set to zero to indicate success.
__ Adr(x2, &label_1); // Multiple forward references to the same label.
__ Adr(x3, &label_1);
__ Adr(x4, &label_1);
__ Bind(&label_2);
__ Eor(x5, x2, Operand(x3)); // Ensure that x2,x3 and x4 are identical.
__ Eor(x6, x2, Operand(x4));
__ Orr(x0, x0, Operand(x5));
__ Orr(x0, x0, Operand(x6));
__ Br(x2); // label_1, label_3
__ Bind(&label_3);
__ Adr(x2, &label_3); // Self-reference (offset 0).
__ Eor(x1, x1, Operand(x2));
__ Adr(x2, &label_4); // Simple forward reference.
__ Br(x2); // label_4
__ Bind(&label_1);
__ Adr(x2, &label_3); // Multiple reverse references to the same label.
__ Adr(x3, &label_3);
__ Adr(x4, &label_3);
__ Adr(x5, &label_2); // Simple reverse reference.
__ Br(x5); // label_2
__ Bind(&label_4);
END();
RUN();
CHECK_EQUAL_64(0x0, x0);
CHECK_EQUAL_64(0x0, x1);
TEARDOWN();
}
TEST(adr_far) {
INIT_V8();
int max_range = 1 << (Instruction::ImmPCRelRangeBitwidth - 1);
SETUP_SIZE(max_range + 1000 * kInstructionSize);
Label done, fail;
Label test_near, near_forward, near_backward;
Label test_far, far_forward, far_backward;
START();
__ Mov(x0, 0x0);
__ Bind(&test_near);
__ Adr(x10, &near_forward, MacroAssembler::kAdrFar);
__ Br(x10);
__ B(&fail);
__ Bind(&near_backward);
__ Orr(x0, x0, 1 << 1);
__ B(&test_far);
__ Bind(&near_forward);
__ Orr(x0, x0, 1 << 0);
__ Adr(x10, &near_backward, MacroAssembler::kAdrFar);
__ Br(x10);
__ Bind(&test_far);
__ Adr(x10, &far_forward, MacroAssembler::kAdrFar);
__ Br(x10);
__ B(&fail);
__ Bind(&far_backward);
__ Orr(x0, x0, 1 << 3);
__ B(&done);
for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
if (i % 100 == 0) {
// If we do land in this code, we do not want to execute so many nops
// before reaching the end of test (especially if tracing is activated).
__ b(&fail);
} else {
__ nop();
}
}
__ Bind(&far_forward);
__ Orr(x0, x0, 1 << 2);
__ Adr(x10, &far_backward, MacroAssembler::kAdrFar);
__ Br(x10);
__ B(&done);
__ Bind(&fail);
__ Orr(x0, x0, 1 << 4);
__ Bind(&done);
END();
RUN();
CHECK_EQUAL_64(0xF, x0);
TEARDOWN();
}
TEST(branch_cond) {
INIT_V8();
SETUP();
Label wrong;
START();
__ Mov(x0, 0x1);
__ Mov(x1, 0x1);
__ Mov(x2, 0x8000000000000000L);
// For each 'cmp' instruction below, condition codes other than the ones
// following it would branch.
__ Cmp(x1, 0);
__ B(&wrong, eq);
__ B(&wrong, lo);
__ B(&wrong, mi);
__ B(&wrong, vs);
__ B(&wrong, ls);
__ B(&wrong, lt);
__ B(&wrong, le);
Label ok_1;
__ B(&ok_1, ne);
__ Mov(x0, 0x0);
__ Bind(&ok_1);
__ Cmp(x1, 1);
__ B(&wrong, ne);
__ B(&wrong, lo);
__ B(&wrong, mi);
__ B(&wrong, vs);
__ B(&wrong, hi);
__ B(&wrong, lt);
__ B(&wrong, gt);
Label ok_2;
__ B(&ok_2, pl);
__ Mov(x0, 0x0);
__ Bind(&ok_2);
__ Cmp(x1, 2);
__ B(&wrong, eq);
__ B(&wrong, hs);
__ B(&wrong, pl);
__ B(&wrong, vs);
__ B(&wrong, hi);
__ B(&wrong, ge);
__ B(&wrong, gt);
Label ok_3;
__ B(&ok_3, vc);
__ Mov(x0, 0x0);
__ Bind(&ok_3);
__ Cmp(x2, 1);
__ B(&wrong, eq);
__ B(&wrong, lo);
__ B(&wrong, mi);
__ B(&wrong, vc);
__ B(&wrong, ls);
__ B(&wrong, ge);
__ B(&wrong, gt);
Label ok_4;
__ B(&ok_4, le);
__ Mov(x0, 0x0);
__ Bind(&ok_4);
Label ok_5;
__ b(&ok_5, al);
__ Mov(x0, 0x0);
__ Bind(&ok_5);
Label ok_6;
__ b(&ok_6, nv);
__ Mov(x0, 0x0);
__ Bind(&ok_6);
END();
__ Bind(&wrong);
__ Mov(x0, 0x0);
END();
RUN();
CHECK_EQUAL_64(0x1, x0);
TEARDOWN();
}
TEST(branch_to_reg) {
INIT_V8();
SETUP();
// Test br.
Label fn1, after_fn1;
START();
__ Mov(x29, lr);
__ Mov(x1, 0);
__ B(&after_fn1);
__ Bind(&fn1);
__ Mov(x0, lr);
__ Mov(x1, 42);
__ Br(x0);
__ Bind(&after_fn1);
__ Bl(&fn1);
// Test blr.
Label fn2, after_fn2;
__ Mov(x2, 0);
__ B(&after_fn2);
__ Bind(&fn2);
__ Mov(x0, lr);
__ Mov(x2, 84);
__ Blr(x0);
__ Bind(&after_fn2);
__ Bl(&fn2);
__ Mov(x3, lr);
__ Mov(lr, x29);
END();
RUN();
CHECK_EQUAL_64(core.xreg(3) + kInstructionSize, x0);
CHECK_EQUAL_64(42, x1);
CHECK_EQUAL_64(84, x2);
TEARDOWN();
}
TEST(compare_branch) {
INIT_V8();
SETUP();
START();
__ Mov(x0, 0);
__ Mov(x1, 0);
__ Mov(x2, 0);
__ Mov(x3, 0);
__ Mov(x4, 0);
__ Mov(x5, 0);
__ Mov(x16, 0);
__ Mov(x17, 42);
Label zt, zt_end;
__ Cbz(w16, &zt);
__ B(&zt_end);
__ Bind(&zt);
__ Mov(x0, 1);
__ Bind(&zt_end);
Label zf, zf_end;
__ Cbz(x17, &zf);
__ B(&zf_end);
__ Bind(&zf);
__ Mov(x1, 1);
__ Bind(&zf_end);
Label nzt, nzt_end;
__ Cbnz(w17, &nzt);
__ B(&nzt_end);
__ Bind(&nzt);
__ Mov(x2, 1);
__ Bind(&nzt_end);
Label nzf, nzf_end;
__ Cbnz(x16, &nzf);
__ B(&nzf_end);
__ Bind(&nzf);
__ Mov(x3, 1);
__ Bind(&nzf_end);
__ Mov(x18, 0xFFFFFFFF00000000UL);
Label a, a_end;
__ Cbz(w18, &a);
__ B(&a_end);
__ Bind(&a);
__ Mov(x4, 1);
__ Bind(&a_end);
Label b, b_end;
__ Cbnz(w18, &b);
__ B(&b_end);
__ Bind(&b);
__ Mov(x5, 1);
__ Bind(&b_end);
END();
RUN();
CHECK_EQUAL_64(1, x0);
CHECK_EQUAL_64(0, x1);
CHECK_EQUAL_64(1, x2);
CHECK_EQUAL_64(0, x3);
CHECK_EQUAL_64(1, x4);
CHECK_EQUAL_64(0, x5);
TEARDOWN();
}
TEST(test_branch) {
INIT_V8();
SETUP();
START();
__ Mov(x0, 0);
__ Mov(x1, 0);
__ Mov(x2, 0);
__ Mov(x3, 0);
__ Mov(x16, 0xAAAAAAAAAAAAAAAAUL);
Label bz, bz_end;
__ Tbz(w16, 0, &bz);
__ B(&bz_end);
__ Bind(&bz);
__ Mov(x0, 1);
__ Bind(&bz_end);
Label bo, bo_end;
__ Tbz(x16, 63, &bo);
__ B(&bo_end);
__ Bind(&bo);
__ Mov(x1, 1);
__ Bind(&bo_end);
Label nbz, nbz_end;
__ Tbnz(x16, 61, &nbz);
__ B(&nbz_end);
__ Bind(&nbz);
__ Mov(x2, 1);
__ Bind(&nbz_end);
Label nbo, nbo_end;
__ Tbnz(w16, 2, &nbo);
__ B(&nbo_end);
__ Bind(&nbo);
__ Mov(x3, 1);
__ Bind(&nbo_end);
END();
RUN();
CHECK_EQUAL_64(1, x0);
CHECK_EQUAL_64(0, x1);
CHECK_EQUAL_64(1, x2);
CHECK_EQUAL_64(0, x3);
TEARDOWN();
}
TEST(far_branch_backward) {
INIT_V8();
// Test that the MacroAssembler correctly resolves backward branches to labels
// that are outside the immediate range of branch instructions.
int max_range =
std::max(Instruction::ImmBranchRange(TestBranchType),
std::max(Instruction::ImmBranchRange(CompareBranchType),
Instruction::ImmBranchRange(CondBranchType)));
SETUP_SIZE(max_range + 1000 * kInstructionSize);
START();
Label done, fail;
Label test_tbz, test_cbz, test_bcond;
Label success_tbz, success_cbz, success_bcond;
__ Mov(x0, 0);
__ Mov(x1, 1);
__ Mov(x10, 0);
__ B(&test_tbz);
__ Bind(&success_tbz);
__ Orr(x0, x0, 1 << 0);
__ B(&test_cbz);
__ Bind(&success_cbz);
__ Orr(x0, x0, 1 << 1);
__ B(&test_bcond);
__ Bind(&success_bcond);
__ Orr(x0, x0, 1 << 2);
__ B(&done);
// Generate enough code to overflow the immediate range of the three types of
// branches below.
for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
if (i % 100 == 0) {
// If we do land in this code, we do not want to execute so many nops
// before reaching the end of test (especially if tracing is activated).
__ B(&fail);
} else {
__ Nop();
}
}
__ B(&fail);
__ Bind(&test_tbz);
__ Tbz(x10, 7, &success_tbz);
__ Bind(&test_cbz);
__ Cbz(x10, &success_cbz);
__ Bind(&test_bcond);
__ Cmp(x10, 0);
__ B(eq, &success_bcond);
// For each out-of-range branch instructions, at least two instructions should
// have been generated.
CHECK_GE(7 * kInstructionSize, __ SizeOfCodeGeneratedSince(&test_tbz));
__ Bind(&fail);
__ Mov(x1, 0);
__ Bind(&done);
END();
RUN();
CHECK_EQUAL_64(0x7, x0);
CHECK_EQUAL_64(0x1, x1);
TEARDOWN();
}
TEST(far_branch_simple_veneer) {
INIT_V8();
// Test that the MacroAssembler correctly emits veneers for forward branches
// to labels that are outside the immediate range of branch instructions.
int max_range =
std::max(Instruction::ImmBranchRange(TestBranchType),
std::max(Instruction::ImmBranchRange(CompareBranchType),
Instruction::ImmBranchRange(CondBranchType)));
SETUP_SIZE(max_range + 1000 * kInstructionSize);
START();
Label done, fail;
Label test_tbz, test_cbz, test_bcond;
Label success_tbz, success_cbz, success_bcond;
__ Mov(x0, 0);
__ Mov(x1, 1);
__ Mov(x10, 0);
__ Bind(&test_tbz);
__ Tbz(x10, 7, &success_tbz);
__ Bind(&test_cbz);
__ Cbz(x10, &success_cbz);
__ Bind(&test_bcond);
__ Cmp(x10, 0);
__ B(eq, &success_bcond);
// Generate enough code to overflow the immediate range of the three types of
// branches below.
for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
if (i % 100 == 0) {
// If we do land in this code, we do not want to execute so many nops
// before reaching the end of test (especially if tracing is activated).
// Also, the branches give the MacroAssembler the opportunity to emit the
// veneers.
__ B(&fail);
} else {
__ Nop();
}
}
__ B(&fail);
__ Bind(&success_tbz);
__ Orr(x0, x0, 1 << 0);
__ B(&test_cbz);
__ Bind(&success_cbz);
__ Orr(x0, x0, 1 << 1);
__ B(&test_bcond);
__ Bind(&success_bcond);
__ Orr(x0, x0, 1 << 2);
__ B(&done);
__ Bind(&fail);
__ Mov(x1, 0);
__ Bind(&done);
END();
RUN();
CHECK_EQUAL_64(0x7, x0);
CHECK_EQUAL_64(0x1, x1);
TEARDOWN();
}
TEST(far_branch_veneer_link_chain) {
INIT_V8();
// Test that the MacroAssembler correctly emits veneers for forward branches
// that target out-of-range labels and are part of multiple instructions
// jumping to that label.
//
// We test the three situations with the different types of instruction:
// (1)- When the branch is at the start of the chain with tbz.
// (2)- When the branch is in the middle of the chain with cbz.
// (3)- When the branch is at the end of the chain with bcond.
int max_range =
std::max(Instruction::ImmBranchRange(TestBranchType),
std::max(Instruction::ImmBranchRange(CompareBranchType),
Instruction::ImmBranchRange(CondBranchType)));
SETUP_SIZE(max_range + 1000 * kInstructionSize);
START();
Label skip, fail, done;
Label test_tbz, test_cbz, test_bcond;
Label success_tbz, success_cbz, success_bcond;
__ Mov(x0, 0);
__ Mov(x1, 1);
__ Mov(x10, 0);
__ B(&skip);
// Branches at the start of the chain for situations (2) and (3).
__ B(&success_cbz);
__ B(&success_bcond);
__ Nop();
__ B(&success_bcond);
__ B(&success_cbz);
__ Bind(&skip);
__ Bind(&test_tbz);
__ Tbz(x10, 7, &success_tbz);
__ Bind(&test_cbz);
__ Cbz(x10, &success_cbz);
__ Bind(&test_bcond);
__ Cmp(x10, 0);
__ B(eq, &success_bcond);
skip.Unuse();
__ B(&skip);
// Branches at the end of the chain for situations (1) and (2).
__ B(&success_cbz);
__ B(&success_tbz);
__ Nop();
__ B(&success_tbz);
__ B(&success_cbz);
__ Bind(&skip);
// Generate enough code to overflow the immediate range of the three types of
// branches below.
for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
if (i % 100 == 0) {
// If we do land in this code, we do not want to execute so many nops
// before reaching the end of test (especially if tracing is activated).
// Also, the branches give the MacroAssembler the opportunity to emit the
// veneers.
__ B(&fail);
} else {
__ Nop();
}
}
__ B(&fail);
__ Bind(&success_tbz);
__ Orr(x0, x0, 1 << 0);
__ B(&test_cbz);
__ Bind(&success_cbz);
__ Orr(x0, x0, 1 << 1);
__ B(&test_bcond);
__ Bind(&success_bcond);
__ Orr(x0, x0, 1 << 2);
__ B(&done);
__ Bind(&fail);
__ Mov(x1, 0);
__ Bind(&done);
END();
RUN();
CHECK_EQUAL_64(0x7, x0);
CHECK_EQUAL_64(0x1, x1);
TEARDOWN();
}
TEST(far_branch_veneer_broken_link_chain) {
INIT_V8();
// Check that the MacroAssembler correctly handles the situation when removing
// a branch from the link chain of a label and the two links on each side of
// the removed branch cannot be linked together (out of range).
//
// We test with tbz because it has a small range.
int max_range = Instruction::ImmBranchRange(TestBranchType);
int inter_range = max_range / 2 + max_range / 10;
SETUP_SIZE(3 * inter_range + 1000 * kInstructionSize);
START();
Label skip, fail, done;
Label test_1, test_2, test_3;
Label far_target;
__ Mov(x0, 0); // Indicates the origin of the branch.
__ Mov(x1, 1);
__ Mov(x10, 0);
// First instruction in the label chain.
__ Bind(&test_1);
__ Mov(x0, 1);
__ B(&far_target);
for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
if (i % 100 == 0) {
// Do not allow generating veneers. They should not be needed.
__ b(&fail);
} else {
__ Nop();
}
}
// Will need a veneer to point to reach the target.
__ Bind(&test_2);
__ Mov(x0, 2);
__ Tbz(x10, 7, &far_target);
for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
if (i % 100 == 0) {
// Do not allow generating veneers. They should not be needed.
__ b(&fail);
} else {
__ Nop();
}
}
// Does not need a veneer to reach the target, but the initial branch
// instruction is out of range.
__ Bind(&test_3);
__ Mov(x0, 3);
__ Tbz(x10, 7, &far_target);
for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
if (i % 100 == 0) {
// Allow generating veneers.
__ B(&fail);
} else {
__ Nop();
}
}
__ B(&fail);
__ Bind(&far_target);
__ Cmp(x0, 1);
__ B(eq, &test_2);
__ Cmp(x0, 2);
__ B(eq, &test_3);
__ B(&done);
__ Bind(&fail);
__ Mov(x1, 0);
__ Bind(&done);
END();
RUN();
CHECK_EQUAL_64(0x3, x0);
CHECK_EQUAL_64(0x1, x1);
TEARDOWN();
}
TEST(branch_type) {
INIT_V8();
SETUP();
Label fail, done;
START();
__ Mov(x0, 0x0);
__ Mov(x10, 0x7);
__ Mov(x11, 0x0);
// Test non taken branches.
__ Cmp(x10, 0x7);
__ B(&fail, ne);
__ B(&fail, never);
__ B(&fail, reg_zero, x10);
__ B(&fail, reg_not_zero, x11);
__ B(&fail, reg_bit_clear, x10, 0);
__ B(&fail, reg_bit_set, x10, 3);
// Test taken branches.
Label l1, l2, l3, l4, l5;
__ Cmp(x10, 0x7);
__ B(&l1, eq);
__ B(&fail);
__ Bind(&l1);
__ B(&l2, always);
__ B(&fail);
__ Bind(&l2);
__ B(&l3, reg_not_zero, x10);
__ B(&fail);
__ Bind(&l3);
__ B(&l4, reg_bit_clear, x10, 15);
__ B(&fail);
__ Bind(&l4);
__ B(&l5, reg_bit_set, x10, 1);
__ B(&fail);
__ Bind(&l5);
__ B(&done);
__ Bind(&fail);
__ Mov(x0, 0x1);
__ Bind(&done);
END();
RUN();
CHECK_EQUAL_64(0x0, x0);
TEARDOWN();
}
TEST(ldr_str_offset) {
INIT_V8();
SETUP();
uint64_t src[2] = {0xFEDCBA9876543210UL, 0x0123456789ABCDEFUL};
uint64_t dst[5] = {0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
START();
__ Mov(x17, src_base);
__ Mov(x18, dst_base);
__ Ldr(w0, MemOperand(x17));
__ Str(w0, MemOperand(x18));
__ Ldr(w1, MemOperand(x17, 4));
__ Str(w1, MemOperand(x18, 12));
__ Ldr(x2, MemOperand(x17, 8));
__ Str(x2, MemOperand(x18, 16));
__ Ldrb(w3, MemOperand(x17, 1));
__ Strb(w3, MemOperand(x18, 25));
__ Ldrh(w4, MemOperand(x17, 2));
__ Strh(w4, MemOperand(x18, 33));
END();
RUN();
CHECK_EQUAL_64(0x76543210, x0);
CHECK_EQUAL_64(0x76543210, dst[0]);
CHECK_EQUAL_64(0xFEDCBA98, x1);
CHECK_EQUAL_64(0xFEDCBA9800000000UL, dst[1]);
CHECK_EQUAL_64(0x0123456789ABCDEFUL, x2);
CHECK_EQUAL_64(0x0123456789ABCDEFUL, dst[2]);
CHECK_EQUAL_64(0x32, x3);
CHECK_EQUAL_64(0x3200, dst[3]);
CHECK_EQUAL_64(0x7654, x4);
CHECK_EQUAL_64(0x765400, dst[4]);
CHECK_EQUAL_64(src_base, x17);
CHECK_EQUAL_64(dst_base, x18);
TEARDOWN();
}
TEST(ldr_str_wide) {
INIT_V8();
SETUP();
uint32_t src[8192];
uint32_t dst[8192];
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
memset(src, 0xAA, 8192 * sizeof(src[0]));
memset(dst, 0xAA, 8192 * sizeof(dst[0]));
src[0] = 0;
src[6144] = 6144;
src[8191] = 8191;
START();
__ Mov(x22, src_base);
__ Mov(x23, dst_base);
__ Mov(x24, src_base);
__ Mov(x25, dst_base);
__ Mov(x26, src_base);
__ Mov(x27, dst_base);
__ Ldr(w0, MemOperand(x22, 8191 * sizeof(src[0])));
__ Str(w0, MemOperand(x23, 8191 * sizeof(dst[0])));
__ Ldr(w1, MemOperand(x24, 4096 * sizeof(src[0]), PostIndex));
__ Str(w1, MemOperand(x25, 4096 * sizeof(dst[0]), PostIndex));
__ Ldr(w2, MemOperand(x26, 6144 * sizeof(src[0]), PreIndex));
__ Str(w2, MemOperand(x27, 6144 * sizeof(dst[0]), PreIndex));
END();
RUN();
CHECK_EQUAL_32(8191, w0);
CHECK_EQUAL_32(8191, dst[8191]);
CHECK_EQUAL_64(src_base, x22);
CHECK_EQUAL_64(dst_base, x23);
CHECK_EQUAL_32(0, w1);
CHECK_EQUAL_32(0, dst[0]);
CHECK_EQUAL_64(src_base + 4096 * sizeof(src[0]), x24);
CHECK_EQUAL_64(dst_base + 4096 * sizeof(dst[0]), x25);
CHECK_EQUAL_32(6144, w2);
CHECK_EQUAL_32(6144, dst[6144]);
CHECK_EQUAL_64(src_base + 6144 * sizeof(src[0]), x26);
CHECK_EQUAL_64(dst_base + 6144 * sizeof(dst[0]), x27);
TEARDOWN();
}
TEST(ldr_str_preindex) {
INIT_V8();
SETUP();
uint64_t src[2] = {0xFEDCBA9876543210UL, 0x0123456789ABCDEFUL};
uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
START();
__ Mov(x17, src_base);
__ Mov(x18, dst_base);
__ Mov(x19, src_base);
__ Mov(x20, dst_base);
__ Mov(x21, src_base + 16);
__ Mov(x22, dst_base + 40);
__ Mov(x23, src_base);
__ Mov(x24, dst_base);
__ Mov(x25, src_base);
__ Mov(x26, dst_base);
__ Ldr(w0, MemOperand(x17, 4, PreIndex));
__ Str(w0, MemOperand(x18, 12, PreIndex));
__ Ldr(x1, MemOperand(x19, 8, PreIndex));
__ Str(x1, MemOperand(x20, 16, PreIndex));
__ Ldr(w2, MemOperand(x21, -4, PreIndex));
__ Str(w2, MemOperand(x22, -4, PreIndex));
__ Ldrb(w3, MemOperand(x23, 1, PreIndex));
__ Strb(w3, MemOperand(x24, 25, PreIndex));
__ Ldrh(w4, MemOperand(x25, 3, PreIndex));
__ Strh(w4, MemOperand(x26, 41, PreIndex));
END();
RUN();
CHECK_EQUAL_64(0xFEDCBA98, x0);
CHECK_EQUAL_64(0xFEDCBA9800000000UL, dst[1]);
CHECK_EQUAL_64(0x0123456789ABCDEFUL, x1);
CHECK_EQUAL_64(0x0123456789ABCDEFUL, dst[2]);
CHECK_EQUAL_64(0x01234567, x2);
CHECK_EQUAL_64(0x0123456700000000UL, dst[4]);
CHECK_EQUAL_64(0x32, x3);
CHECK_EQUAL_64(0x3200, dst[3]);
CHECK_EQUAL_64(0x9876, x4);
CHECK_EQUAL_64(0x987600, dst[5]);
CHECK_EQUAL_64(src_base + 4, x17);
CHECK_EQUAL_64(dst_base + 12, x18);
CHECK_EQUAL_64(src_base + 8, x19);
CHECK_EQUAL_64(dst_base + 16, x20);
CHECK_EQUAL_64(src_base + 12, x21);
CHECK_EQUAL_64(dst_base + 36, x22);
CHECK_EQUAL_64(src_base + 1, x23);
CHECK_EQUAL_64(dst_base + 25, x24);
CHECK_EQUAL_64(src_base + 3, x25);
CHECK_EQUAL_64(dst_base + 41, x26);
TEARDOWN();
}
TEST(ldr_str_postindex) {
INIT_V8();
SETUP();
uint64_t src[2] = {0xFEDCBA9876543210UL, 0x0123456789ABCDEFUL};
uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
START();
__ Mov(x17, src_base + 4);
__ Mov(x18, dst_base + 12);
__ Mov(x19, src_base + 8);
__ Mov(x20, dst_base + 16);
__ Mov(x21, src_base + 8);
__ Mov(x22, dst_base + 32);
__ Mov(x23, src_base + 1);
__ Mov(x24, dst_base + 25);
__ Mov(x25, src_base + 3);
__ Mov(x26, dst_base + 41);
__ Ldr(w0, MemOperand(x17, 4, PostIndex));
__ Str(w0, MemOperand(x18, 12, PostIndex));
__ Ldr(x1, MemOperand(x19, 8, PostIndex));
__ Str(x1, MemOperand(x20, 16, PostIndex));
__ Ldr(x2, MemOperand(x21, -8, PostIndex));
__ Str(x2, MemOperand(x22, -32, PostIndex));
__ Ldrb(w3, MemOperand(x23, 1, PostIndex));
__ Strb(w3, MemOperand(x24, 5, PostIndex));
__ Ldrh(w4, MemOperand(x25, -3, PostIndex));
__ Strh(w4, MemOperand(x26, -41, PostIndex));
END();
RUN();
CHECK_EQUAL_64(0xFEDCBA98, x0);
CHECK_EQUAL_64(0xFEDCBA9800000000UL, dst[1]);
CHECK_EQUAL_64(0x0123456789ABCDEFUL, x1);
CHECK_EQUAL_64(0x0123456789ABCDEFUL, dst[2]);
CHECK_EQUAL_64(0x0123456789ABCDEFUL, x2);
CHECK_EQUAL_64(0x0123456789ABCDEFUL, dst[4]);
CHECK_EQUAL_64(0x32, x3);
CHECK_EQUAL_64(0x3200, dst[3]);
CHECK_EQUAL_64(0x9876, x4);
CHECK_EQUAL_64(0x987600, dst[5]);
CHECK_EQUAL_64(src_base + 8, x17);
CHECK_EQUAL_64(dst_base + 24, x18);
CHECK_EQUAL_64(src_base + 16, x19);
CHECK_EQUAL_64(dst_base + 32, x20);
CHECK_EQUAL_64(src_base, x21);
CHECK_EQUAL_64(dst_base, x22);
CHECK_EQUAL_64(src_base + 2, x23);
CHECK_EQUAL_64(dst_base + 30, x24);
CHECK_EQUAL_64(src_base, x25);
CHECK_EQUAL_64(dst_base, x26);
TEARDOWN();
}
TEST(load_signed) {
INIT_V8();
SETUP();
uint32_t src[2] = {0x80008080, 0x7FFF7F7F};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
START();
__ Mov(x24, src_base);
__ Ldrsb(w0, MemOperand(x24));
__ Ldrsb(w1, MemOperand(x24, 4));
__ Ldrsh(w2, MemOperand(x24));
__ Ldrsh(w3, MemOperand(x24, 4));
__ Ldrsb(x4, MemOperand(x24));
__ Ldrsb(x5, MemOperand(x24, 4));
__ Ldrsh(x6, MemOperand(x24));
__ Ldrsh(x7, MemOperand(x24, 4));
__ Ldrsw(x8, MemOperand(x24));
__ Ldrsw(x9, MemOperand(x24, 4));
END();
RUN();
CHECK_EQUAL_64(0xFFFFFF80, x0);
CHECK_EQUAL_64(0x0000007F, x1);
CHECK_EQUAL_64(0xFFFF8080, x2);
CHECK_EQUAL_64(0x00007F7F, x3);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFF80UL, x4);
CHECK_EQUAL_64(0x000000000000007FUL, x5);
CHECK_EQUAL_64(0xFFFFFFFFFFFF8080UL, x6);
CHECK_EQUAL_64(0x0000000000007F7FUL, x7);
CHECK_EQUAL_64(0xFFFFFFFF80008080UL, x8);
CHECK_EQUAL_64(0x000000007FFF7F7FUL, x9);
TEARDOWN();
}
TEST(load_store_regoffset) {
INIT_V8();
SETUP();
uint32_t src[3] = {1, 2, 3};
uint32_t dst[4] = {0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
START();
__ Mov(x16, src_base);
__ Mov(x17, dst_base);
__ Mov(x18, src_base + 3 * sizeof(src[0]));
__ Mov(x19, dst_base + 3 * sizeof(dst[0]));
__ Mov(x20, dst_base + 4 * sizeof(dst[0]));
__ Mov(x24, 0);
__ Mov(x25, 4);
__ Mov(x26, -4);
__ Mov(x27, 0xFFFFFFFC); // 32-bit -4.
__ Mov(x28, 0xFFFFFFFE); // 32-bit -2.
__ Mov(x29, 0xFFFFFFFF); // 32-bit -1.
__ Ldr(w0, MemOperand(x16, x24));
__ Ldr(x1, MemOperand(x16, x25));
__ Ldr(w2, MemOperand(x18, x26));
__ Ldr(w3, MemOperand(x18, x27, SXTW));
__ Ldr(w4, MemOperand(x18, x28, SXTW, 2));
__ Str(w0, MemOperand(x17, x24));
__ Str(x1, MemOperand(x17, x25));
__ Str(w2, MemOperand(x20, x29, SXTW, 2));
END();
RUN();
CHECK_EQUAL_64(1, x0);
CHECK_EQUAL_64(0x0000000300000002UL, x1);
CHECK_EQUAL_64(3, x2);
CHECK_EQUAL_64(3, x3);
CHECK_EQUAL_64(2, x4);
CHECK_EQUAL_32(1, dst[0]);
CHECK_EQUAL_32(2, dst[1]);
CHECK_EQUAL_32(3, dst[2]);
CHECK_EQUAL_32(3, dst[3]);
TEARDOWN();
}
TEST(load_store_float) {
INIT_V8();
SETUP();
float src[3] = {1.0, 2.0, 3.0};
float dst[3] = {0.0, 0.0, 0.0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
START();
__ Mov(x17, src_base);
__ Mov(x18, dst_base);
__ Mov(x19, src_base);
__ Mov(x20, dst_base);
__ Mov(x21, src_base);
__ Mov(x22, dst_base);
__ Ldr(s0, MemOperand(x17, sizeof(src[0])));
__ Str(s0, MemOperand(x18, sizeof(dst[0]), PostIndex));
__ Ldr(s1, MemOperand(x19, sizeof(src[0]), PostIndex));
__ Str(s1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
__ Ldr(s2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
__ Str(s2, MemOperand(x22, sizeof(dst[0])));
END();
RUN();
CHECK_EQUAL_FP32(2.0, s0);
CHECK_EQUAL_FP32(2.0, dst[0]);
CHECK_EQUAL_FP32(1.0, s1);
CHECK_EQUAL_FP32(1.0, dst[2]);
CHECK_EQUAL_FP32(3.0, s2);
CHECK_EQUAL_FP32(3.0, dst[1]);
CHECK_EQUAL_64(src_base, x17);
CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18);
CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
CHECK_EQUAL_64(dst_base, x22);
TEARDOWN();
}
TEST(load_store_double) {
INIT_V8();
SETUP();
double src[3] = {1.0, 2.0, 3.0};
double dst[3] = {0.0, 0.0, 0.0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
START();
__ Mov(x17, src_base);
__ Mov(x18, dst_base);
__ Mov(x19, src_base);
__ Mov(x20, dst_base);
__ Mov(x21, src_base);
__ Mov(x22, dst_base);
__ Ldr(d0, MemOperand(x17, sizeof(src[0])));
__ Str(d0, MemOperand(x18, sizeof(dst[0]), PostIndex));
__ Ldr(d1, MemOperand(x19, sizeof(src[0]), PostIndex));
__ Str(d1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
__ Ldr(d2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
__ Str(d2, MemOperand(x22, sizeof(dst[0])));
END();
RUN();
CHECK_EQUAL_FP64(2.0, d0);
CHECK_EQUAL_FP64(2.0, dst[0]);
CHECK_EQUAL_FP64(1.0, d1);
CHECK_EQUAL_FP64(1.0, dst[2]);
CHECK_EQUAL_FP64(3.0, d2);
CHECK_EQUAL_FP64(3.0, dst[1]);
CHECK_EQUAL_64(src_base, x17);
CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18);
CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
CHECK_EQUAL_64(dst_base, x22);
TEARDOWN();
}
TEST(load_store_b) {
INIT_V8();
SETUP();
uint8_t src[3] = {0x12, 0x23, 0x34};
uint8_t dst[3] = {0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
START();
__ Mov(x17, src_base);
__ Mov(x18, dst_base);
__ Mov(x19, src_base);
__ Mov(x20, dst_base);
__ Mov(x21, src_base);
__ Mov(x22, dst_base);
__ Ldr(b0, MemOperand(x17, sizeof(src[0])));
__ Str(b0, MemOperand(x18, sizeof(dst[0]), PostIndex));
__ Ldr(b1, MemOperand(x19, sizeof(src[0]), PostIndex));
__ Str(b1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
__ Ldr(b2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
__ Str(b2, MemOperand(x22, sizeof(dst[0])));
END();
RUN();
CHECK_EQUAL_128(0, 0x23, q0);
CHECK_EQUAL_64(0x23, dst[0]);
CHECK_EQUAL_128(0, 0x12, q1);
CHECK_EQUAL_64(0x12, dst[2]);
CHECK_EQUAL_128(0, 0x34, q2);
CHECK_EQUAL_64(0x34, dst[1]);
CHECK_EQUAL_64(src_base, x17);
CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18);
CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
CHECK_EQUAL_64(dst_base, x22);
TEARDOWN();
}
TEST(load_store_h) {
INIT_V8();
SETUP();
uint16_t src[3] = {0x1234, 0x2345, 0x3456};
uint16_t dst[3] = {0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
START();
__ Mov(x17, src_base);
__ Mov(x18, dst_base);
__ Mov(x19, src_base);
__ Mov(x20, dst_base);
__ Mov(x21, src_base);
__ Mov(x22, dst_base);
__ Ldr(h0, MemOperand(x17, sizeof(src[0])));
__ Str(h0, MemOperand(x18, sizeof(dst[0]), PostIndex));
__ Ldr(h1, MemOperand(x19, sizeof(src[0]), PostIndex));
__ Str(h1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
__ Ldr(h2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
__ Str(h2, MemOperand(x22, sizeof(dst[0])));
END();
RUN();
CHECK_EQUAL_128(0, 0x2345, q0);
CHECK_EQUAL_64(0x2345, dst[0]);
CHECK_EQUAL_128(0, 0x1234, q1);
CHECK_EQUAL_64(0x1234, dst[2]);
CHECK_EQUAL_128(0, 0x3456, q2);
CHECK_EQUAL_64(0x3456, dst[1]);
CHECK_EQUAL_64(src_base, x17);
CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18);
CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
CHECK_EQUAL_64(dst_base, x22);
TEARDOWN();
}
TEST(load_store_q) {
INIT_V8();
SETUP();
uint8_t src[48] = {0x10, 0x32, 0x54, 0x76, 0x98, 0xBA, 0xDC, 0xFE, 0x01, 0x23,
0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0x21, 0x43, 0x65, 0x87,
0xA9, 0xCB, 0xED, 0x0F, 0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC,
0xDE, 0xF0, 0x24, 0x46, 0x68, 0x8A, 0xAC, 0xCE, 0xE0, 0x02,
0x42, 0x64, 0x86, 0xA8, 0xCA, 0xEC, 0x0E, 0x20};
uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
START();
__ Mov(x17, src_base);
__ Mov(x18, dst_base);
__ Mov(x19, src_base);
__ Mov(x20, dst_base);
__ Mov(x21, src_base);
__ Mov(x22, dst_base);
__ Ldr(q0, MemOperand(x17, 16));
__ Str(q0, MemOperand(x18, 16, PostIndex));
__ Ldr(q1, MemOperand(x19, 16, PostIndex));
__ Str(q1, MemOperand(x20, 32, PreIndex));
__ Ldr(q2, MemOperand(x21, 32, PreIndex));
__ Str(q2, MemOperand(x22, 16));
END();
RUN();
CHECK_EQUAL_128(0xF0DEBC9A78563412, 0x0FEDCBA987654321, q0);
CHECK_EQUAL_64(0x0FEDCBA987654321, dst[0]);
CHECK_EQUAL_64(0xF0DEBC9A78563412, dst[1]);
CHECK_EQUAL_128(0xEFCDAB8967452301, 0xFEDCBA9876543210, q1);
CHECK_EQUAL_64(0xFEDCBA9876543210, dst[4]);
CHECK_EQUAL_64(0xEFCDAB8967452301, dst[5]);
CHECK_EQUAL_128(0x200EECCAA8866442, 0x02E0CEAC8A684624, q2);
CHECK_EQUAL_64(0x02E0CEAC8A684624, dst[2]);
CHECK_EQUAL_64(0x200EECCAA8866442, dst[3]);
CHECK_EQUAL_64(src_base, x17);
CHECK_EQUAL_64(dst_base + 16, x18);
CHECK_EQUAL_64(src_base + 16, x19);
CHECK_EQUAL_64(dst_base + 32, x20);
CHECK_EQUAL_64(src_base + 32, x21);
CHECK_EQUAL_64(dst_base, x22);
TEARDOWN();
}
TEST(neon_ld1_d) {
INIT_V8();
SETUP();
uint8_t src[32 + 5];
for (unsigned i = 0; i < sizeof(src); i++) {
src[i] = i;
}
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
START();
__ Mov(x17, src_base);
__ Ldr(q2, MemOperand(x17)); // Initialise top 64-bits of Q register.